file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/pydev_monkey_qt.py | from __future__ import nested_scopes
from _pydev_bundle._pydev_saved_modules import threading
import os
from _pydev_bundle import pydev_log
def set_trace_in_qt():
from _pydevd_bundle.pydevd_comm import get_global_debugger
py_db = get_global_debugger()
if py_db is not None:
threading.current_thread() # Create the dummy thread for qt.
py_db.enable_tracing()
_patched_qt = False
def patch_qt(qt_support_mode):
'''
This method patches qt (PySide2, PySide, PyQt4, PyQt5) so that we have hooks to set the tracing for QThread.
'''
if not qt_support_mode:
return
if qt_support_mode is True or qt_support_mode == 'True':
# do not break backward compatibility
qt_support_mode = 'auto'
if qt_support_mode == 'auto':
qt_support_mode = os.getenv('PYDEVD_PYQT_MODE', 'auto')
# Avoid patching more than once
global _patched_qt
if _patched_qt:
return
pydev_log.debug('Qt support mode: %s', qt_support_mode)
_patched_qt = True
if qt_support_mode == 'auto':
patch_qt_on_import = None
try:
import PySide2 # @UnresolvedImport @UnusedImport
qt_support_mode = 'pyside2'
except:
try:
import Pyside # @UnresolvedImport @UnusedImport
qt_support_mode = 'pyside'
except:
try:
import PyQt5 # @UnresolvedImport @UnusedImport
qt_support_mode = 'pyqt5'
except:
try:
import PyQt4 # @UnresolvedImport @UnusedImport
qt_support_mode = 'pyqt4'
except:
return
if qt_support_mode == 'pyside2':
try:
import PySide2.QtCore # @UnresolvedImport
_internal_patch_qt(PySide2.QtCore, qt_support_mode)
except:
return
elif qt_support_mode == 'pyside':
try:
import PySide.QtCore # @UnresolvedImport
_internal_patch_qt(PySide.QtCore, qt_support_mode)
except:
return
elif qt_support_mode == 'pyqt5':
try:
import PyQt5.QtCore # @UnresolvedImport
_internal_patch_qt(PyQt5.QtCore)
except:
return
elif qt_support_mode == 'pyqt4':
# Ok, we have an issue here:
# PyDev-452: Selecting PyQT API version using sip.setapi fails in debug mode
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# Mostly, if the user uses a different API version (i.e.: v2 instead of v1),
# that has to be done before importing PyQt4 modules (PySide/PyQt5 don't have this issue
# as they only implements v2).
patch_qt_on_import = 'PyQt4'
def get_qt_core_module():
import PyQt4.QtCore # @UnresolvedImport
return PyQt4.QtCore
_patch_import_to_patch_pyqt_on_import(patch_qt_on_import, get_qt_core_module)
else:
raise ValueError('Unexpected qt support mode: %s' % (qt_support_mode,))
def _patch_import_to_patch_pyqt_on_import(patch_qt_on_import, get_qt_core_module):
# I don't like this approach very much as we have to patch __import__, but I like even less
# asking the user to configure something in the client side...
# So, our approach is to patch PyQt4 right before the user tries to import it (at which
# point he should've set the sip api version properly already anyways).
pydev_log.debug('Setting up Qt post-import monkeypatch.')
dotted = patch_qt_on_import + '.'
original_import = __import__
from _pydev_bundle._pydev_sys_patch import patch_sys_module, patch_reload, cancel_patches_in_sys_module
patch_sys_module()
patch_reload()
def patched_import(name, *args, **kwargs):
if patch_qt_on_import == name or name.startswith(dotted):
builtins.__import__ = original_import
cancel_patches_in_sys_module()
_internal_patch_qt(get_qt_core_module()) # Patch it only when the user would import the qt module
return original_import(name, *args, **kwargs)
import builtins # Py3
builtins.__import__ = patched_import
def _internal_patch_qt(QtCore, qt_support_mode='auto'):
pydev_log.debug('Patching Qt: %s', QtCore)
_original_thread_init = QtCore.QThread.__init__
_original_runnable_init = QtCore.QRunnable.__init__
_original_QThread = QtCore.QThread
class FuncWrapper:
def __init__(self, original):
self._original = original
def __call__(self, *args, **kwargs):
set_trace_in_qt()
return self._original(*args, **kwargs)
class StartedSignalWrapper(QtCore.QObject): # Wrapper for the QThread.started signal
try:
_signal = QtCore.Signal() # @UndefinedVariable
except:
_signal = QtCore.pyqtSignal() # @UndefinedVariable
def __init__(self, thread, original_started):
QtCore.QObject.__init__(self)
self.thread = thread
self.original_started = original_started
if qt_support_mode in ('pyside', 'pyside2'):
self._signal = original_started
else:
self._signal.connect(self._on_call)
self.original_started.connect(self._signal)
def connect(self, func, *args, **kwargs):
if qt_support_mode in ('pyside', 'pyside2'):
return self._signal.connect(FuncWrapper(func), *args, **kwargs)
else:
return self._signal.connect(func, *args, **kwargs)
def disconnect(self, *args, **kwargs):
return self._signal.disconnect(*args, **kwargs)
def emit(self, *args, **kwargs):
return self._signal.emit(*args, **kwargs)
def _on_call(self, *args, **kwargs):
set_trace_in_qt()
class ThreadWrapper(QtCore.QThread): # Wrapper for QThread
def __init__(self, *args, **kwargs):
_original_thread_init(self, *args, **kwargs)
# In PyQt5 the program hangs when we try to call original run method of QThread class.
# So we need to distinguish instances of QThread class and instances of QThread inheritors.
if self.__class__.run == _original_QThread.run:
self.run = self._exec_run
else:
self._original_run = self.run
self.run = self._new_run
self._original_started = self.started
self.started = StartedSignalWrapper(self, self.started)
def _exec_run(self):
set_trace_in_qt()
self.exec_()
return None
def _new_run(self):
set_trace_in_qt()
return self._original_run()
class RunnableWrapper(QtCore.QRunnable): # Wrapper for QRunnable
def __init__(self, *args, **kwargs):
_original_runnable_init(self, *args, **kwargs)
self._original_run = self.run
self.run = self._new_run
def _new_run(self):
set_trace_in_qt()
return self._original_run()
QtCore.QThread = ThreadWrapper
QtCore.QRunnable = RunnableWrapper
| 7,306 | Python | 32.672811 | 112 | 0.588968 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/pydev_log.py | from _pydevd_bundle.pydevd_constants import DebugInfoHolder, SHOW_COMPILE_CYTHON_COMMAND_LINE, NULL, LOG_TIME
from contextlib import contextmanager
import traceback
import os
import sys
class _LoggingGlobals(object):
_warn_once_map = {}
_debug_stream_filename = None
_debug_stream = sys.stderr
_debug_stream_initialized = False
def initialize_debug_stream(reinitialize=False):
'''
:param bool reinitialize:
Reinitialize is used to update the debug stream after a fork (thus, if it wasn't
initialized, we don't need to do anything).
'''
if reinitialize:
if not _LoggingGlobals._debug_stream_initialized:
return
else:
if _LoggingGlobals._debug_stream_initialized:
return
_LoggingGlobals._debug_stream_initialized = True
# Note: we cannot initialize with sys.stderr because when forking we may end up logging things in 'os' calls.
_LoggingGlobals._debug_stream = NULL
_LoggingGlobals._debug_stream_filename = None
if not DebugInfoHolder.PYDEVD_DEBUG_FILE:
_LoggingGlobals._debug_stream = sys.stderr
else:
# Add pid to the filename.
try:
dirname = os.path.dirname(DebugInfoHolder.PYDEVD_DEBUG_FILE)
basename = os.path.basename(DebugInfoHolder.PYDEVD_DEBUG_FILE)
try:
os.makedirs(dirname)
except:
pass # Ignore error if it already exists.
name, ext = os.path.splitext(basename)
debug_file = os.path.join(dirname, name + '.' + str(os.getpid()) + ext)
_LoggingGlobals._debug_stream = open(debug_file, 'w')
_LoggingGlobals._debug_stream_filename = debug_file
except:
_LoggingGlobals._debug_stream = sys.stderr
# Don't fail when trying to setup logging, just show the exception.
traceback.print_exc()
def list_log_files(pydevd_debug_file):
log_files = []
dirname = os.path.dirname(pydevd_debug_file)
basename = os.path.basename(pydevd_debug_file)
if os.path.isdir(dirname):
name, ext = os.path.splitext(basename)
for f in os.listdir(dirname):
if f.startswith(name) and f.endswith(ext):
log_files.append(os.path.join(dirname, f))
return log_files
@contextmanager
def log_context(trace_level, stream):
'''
To be used to temporarily change the logging settings.
'''
original_trace_level = DebugInfoHolder.DEBUG_TRACE_LEVEL
original_debug_stream = _LoggingGlobals._debug_stream
original_pydevd_debug_file = DebugInfoHolder.PYDEVD_DEBUG_FILE
original_debug_stream_filename = _LoggingGlobals._debug_stream_filename
original_initialized = _LoggingGlobals._debug_stream_initialized
DebugInfoHolder.DEBUG_TRACE_LEVEL = trace_level
_LoggingGlobals._debug_stream = stream
_LoggingGlobals._debug_stream_initialized = True
try:
yield
finally:
DebugInfoHolder.DEBUG_TRACE_LEVEL = original_trace_level
_LoggingGlobals._debug_stream = original_debug_stream
DebugInfoHolder.PYDEVD_DEBUG_FILE = original_pydevd_debug_file
_LoggingGlobals._debug_stream_filename = original_debug_stream_filename
_LoggingGlobals._debug_stream_initialized = original_initialized
import time
_last_log_time = time.time()
def _pydevd_log(level, msg, *args):
'''
Levels are:
0 most serious warnings/errors (always printed)
1 warnings/significant events
2 informational trace
3 verbose mode
'''
if level <= DebugInfoHolder.DEBUG_TRACE_LEVEL:
# yes, we can have errors printing if the console of the program has been finished (and we're still trying to print something)
try:
try:
if args:
msg = msg % args
except:
msg = '%s - %s' % (msg, args)
if LOG_TIME:
global _last_log_time
new_log_time = time.time()
time_diff = new_log_time - _last_log_time
_last_log_time = new_log_time
msg = '%.2fs - %s\n' % (time_diff, msg,)
else:
msg = '%s\n' % (msg,)
try:
try:
initialize_debug_stream() # Do it as late as possible
_LoggingGlobals._debug_stream.write(msg)
except TypeError:
if isinstance(msg, bytes):
# Depending on the StringIO flavor, it may only accept unicode.
msg = msg.decode('utf-8', 'replace')
_LoggingGlobals._debug_stream.write(msg)
except UnicodeEncodeError:
# When writing to the stream it's possible that the string can't be represented
# in the encoding expected (in this case, convert it to the stream encoding
# or ascii if we can't find one suitable using a suitable replace).
encoding = getattr(_LoggingGlobals._debug_stream, 'encoding', 'ascii')
msg = msg.encode(encoding, 'backslashreplace')
msg = msg.decode(encoding)
_LoggingGlobals._debug_stream.write(msg)
_LoggingGlobals._debug_stream.flush()
except:
pass
return True
def _pydevd_log_exception(msg='', *args):
if msg or args:
_pydevd_log(0, msg, *args)
try:
initialize_debug_stream() # Do it as late as possible
traceback.print_exc(file=_LoggingGlobals._debug_stream)
_LoggingGlobals._debug_stream.flush()
except:
raise
def verbose(msg, *args):
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 3:
_pydevd_log(3, msg, *args)
def debug(msg, *args):
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 2:
_pydevd_log(2, msg, *args)
def info(msg, *args):
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
_pydevd_log(1, msg, *args)
warn = info
def critical(msg, *args):
_pydevd_log(0, msg, *args)
def exception(msg='', *args):
try:
_pydevd_log_exception(msg, *args)
except:
pass # Should never fail (even at interpreter shutdown).
error = exception
def error_once(msg, *args):
try:
if args:
message = msg % args
else:
message = str(msg)
except:
message = '%s - %s' % (msg, args)
if message not in _LoggingGlobals._warn_once_map:
_LoggingGlobals._warn_once_map[message] = True
critical(message)
def exception_once(msg, *args):
try:
if args:
message = msg % args
else:
message = str(msg)
except:
message = '%s - %s' % (msg, args)
if message not in _LoggingGlobals._warn_once_map:
_LoggingGlobals._warn_once_map[message] = True
exception(message)
def debug_once(msg, *args):
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 3:
error_once(msg, *args)
def show_compile_cython_command_line():
if SHOW_COMPILE_CYTHON_COMMAND_LINE:
dirname = os.path.dirname(os.path.dirname(__file__))
error_once("warning: Debugger speedups using cython not found. Run '\"%s\" \"%s\" build_ext --inplace' to build.",
sys.executable, os.path.join(dirname, 'setup_pydevd_cython.py'))
| 7,359 | Python | 31.139738 | 134 | 0.608507 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/pydev_versioncheck.py | import sys
def versionok_for_gui():
''' Return True if running Python is suitable for GUI Event Integration and deeper IPython integration '''
# We require Python 2.6+ ...
if sys.hexversion < 0x02060000:
return False
# Or Python 3.2+
if sys.hexversion >= 0x03000000 and sys.hexversion < 0x03020000:
return False
# Not supported under Jython nor IronPython
if sys.platform.startswith("java") or sys.platform.startswith('cli'):
return False
return True
| 510 | Python | 29.058822 | 110 | 0.678431 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_completer.py | from collections import namedtuple
from string import ascii_letters, digits
from _pydevd_bundle import pydevd_xml
import pydevconsole
import builtins as __builtin__ # Py3
try:
import java.lang # @UnusedImport
from _pydev_bundle import _pydev_jy_imports_tipper
_pydev_imports_tipper = _pydev_jy_imports_tipper
except ImportError:
IS_JYTHON = False
from _pydev_bundle import _pydev_imports_tipper
dir2 = _pydev_imports_tipper.generate_imports_tip_for_module
#=======================================================================================================================
# _StartsWithFilter
#=======================================================================================================================
class _StartsWithFilter:
'''
Used because we can't create a lambda that'll use an outer scope in jython 2.1
'''
def __init__(self, start_with):
self.start_with = start_with.lower()
def __call__(self, name):
return name.lower().startswith(self.start_with)
#=======================================================================================================================
# Completer
#
# This class was gotten from IPython.completer (dir2 was replaced with the completer already in pydev)
#=======================================================================================================================
class Completer:
def __init__(self, namespace=None, global_namespace=None):
"""Create a new completer for the command line.
Completer([namespace,global_namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
An optional second namespace can be given. This allows the completer
to handle cases where both the local and global scopes need to be
distinguished.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)
"""
# Don't bind to namespace quite yet, but flag whether the user wants a
# specific namespace or to use __main__.__dict__. This will allow us
# to bind to __main__.__dict__ at completion time, not now.
if namespace is None:
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
# The global namespace, if given, can be bound directly
if global_namespace is None:
self.global_namespace = {}
else:
self.global_namespace = global_namespace
def complete(self, text):
"""Return the next possible completion for 'text'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with 'text'.
"""
if self.use_main_ns:
# In pydev this option should never be used
raise RuntimeError('Namespace must be provided!')
self.namespace = __main__.__dict__ # @UndefinedVariable
if "." in text:
return self.attr_matches(text)
else:
return self.global_matches(text)
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
"""
def get_item(obj, attr):
return obj[attr]
a = {}
for dict_with_comps in [__builtin__.__dict__, self.namespace, self.global_namespace]: # @UndefinedVariable
a.update(dict_with_comps)
filter = _StartsWithFilter(text)
return dir2(a, a.keys(), get_item, filter)
def attr_matches(self, text):
"""Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace or self.global_namespace, it will be
evaluated and its attributes (as revealed by dir()) are used as
possible completions. (For class instances, class members are are
also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.
"""
import re
# Another option, seems to work great. Catches things like ''.<tab>
m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text) # @UndefinedVariable
if not m:
return []
expr, attr = m.group(1, 3)
try:
obj = eval(expr, self.namespace)
except:
try:
obj = eval(expr, self.global_namespace)
except:
return []
filter = _StartsWithFilter(attr)
words = dir2(obj, filter=filter)
return words
def generate_completions(frame, act_tok):
'''
:return list(tuple(method_name, docstring, parameters, completion_type))
method_name: str
docstring: str
parameters: str -- i.e.: "(a, b)"
completion_type is an int
See: _pydev_bundle._pydev_imports_tipper for TYPE_ constants
'''
if frame is None:
return []
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
if pydevconsole.IPYTHON:
completions = pydevconsole.get_completions(act_tok, act_tok, updated_globals, frame.f_locals)
else:
completer = Completer(updated_globals, None)
# list(tuple(name, descr, parameters, type))
completions = completer.complete(act_tok)
return completions
def generate_completions_as_xml(frame, act_tok):
completions = generate_completions(frame, act_tok)
return completions_to_xml(completions)
def completions_to_xml(completions):
valid_xml = pydevd_xml.make_valid_xml_value
quote = pydevd_xml.quote
msg = ["<xml>"]
for comp in completions:
msg.append('<comp p0="')
msg.append(valid_xml(quote(comp[0], '/>_= \t')))
msg.append('" p1="')
msg.append(valid_xml(quote(comp[1], '/>_= \t')))
msg.append('" p2="')
msg.append(valid_xml(quote(comp[2], '/>_= \t')))
msg.append('" p3="')
msg.append(valid_xml(quote(comp[3], '/>_= \t')))
msg.append('"/>')
msg.append("</xml>")
return ''.join(msg)
identifier_start = ascii_letters + '_'
identifier_part = ascii_letters + '_' + digits
identifier_start = set(identifier_start)
identifier_part = set(identifier_part)
def isidentifier(s):
return s.isidentifier()
TokenAndQualifier = namedtuple('TokenAndQualifier', 'token, qualifier')
def extract_token_and_qualifier(text, line=0, column=0):
'''
Extracts the token a qualifier from the text given the line/colum
(see test_extract_token_and_qualifier for examples).
:param unicode text:
:param int line: 0-based
:param int column: 0-based
'''
# Note: not using the tokenize module because text should be unicode and
# line/column refer to the unicode text (otherwise we'd have to know
# those ranges after converted to bytes).
if line < 0:
line = 0
if column < 0:
column = 0
if isinstance(text, bytes):
text = text.decode('utf-8')
lines = text.splitlines()
try:
text = lines[line]
except IndexError:
return TokenAndQualifier(u'', u'')
if column >= len(text):
column = len(text)
text = text[:column]
token = u''
qualifier = u''
temp_token = []
for i in range(column - 1, -1, -1):
c = text[i]
if c in identifier_part or isidentifier(c) or c == u'.':
temp_token.append(c)
else:
break
temp_token = u''.join(reversed(temp_token))
if u'.' in temp_token:
temp_token = temp_token.split(u'.')
token = u'.'.join(temp_token[:-1])
qualifier = temp_token[-1]
else:
qualifier = temp_token
return TokenAndQualifier(token, qualifier)
| 8,544 | Python | 30.884328 | 127 | 0.587196 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_filesystem_encoding.py | import sys
def __getfilesystemencoding():
'''
Note: there's a copy of this method in interpreterInfo.py
'''
try:
ret = sys.getfilesystemencoding()
if not ret:
raise RuntimeError('Unable to get encoding.')
return ret
except:
try:
#Handle Jython
from java.lang import System # @UnresolvedImport
env = System.getProperty("os.name").lower()
if env.find('win') != -1:
return 'ISO-8859-1' #mbcs does not work on Jython, so, use a (hopefully) suitable replacement
return 'utf-8'
except:
pass
#Only available from 2.3 onwards.
if sys.platform == 'win32':
return 'mbcs'
return 'utf-8'
def getfilesystemencoding():
try:
ret = __getfilesystemencoding()
#Check if the encoding is actually there to be used!
if hasattr('', 'encode'):
''.encode(ret)
if hasattr('', 'decode'):
''.decode(ret)
return ret
except:
return 'utf-8'
| 1,095 | Python | 25.095237 | 110 | 0.536073 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_imports_tipper.py | import inspect
import os.path
import sys
from _pydev_bundle._pydev_tipper_common import do_find
from _pydevd_bundle.pydevd_utils import hasattr_checked, dir_checked
from inspect import getfullargspec
def getargspec(*args, **kwargs):
arg_spec = getfullargspec(*args, **kwargs)
return arg_spec.args, arg_spec.varargs, arg_spec.varkw, arg_spec.defaults, arg_spec.kwonlyargs or [], arg_spec.kwonlydefaults or {}
# completion types.
TYPE_IMPORT = '0'
TYPE_CLASS = '1'
TYPE_FUNCTION = '2'
TYPE_ATTR = '3'
TYPE_BUILTIN = '4'
TYPE_PARAM = '5'
def _imp(name, log=None):
try:
return __import__(name)
except:
if '.' in name:
sub = name[0:name.rfind('.')]
if log is not None:
log.add_content('Unable to import', name, 'trying with', sub)
log.add_exception()
return _imp(sub, log)
else:
s = 'Unable to import module: %s - sys.path: %s' % (str(name), sys.path)
if log is not None:
log.add_content(s)
log.add_exception()
raise ImportError(s)
IS_IPY = False
if sys.platform == 'cli':
IS_IPY = True
_old_imp = _imp
def _imp(name, log=None):
# We must add a reference in clr for .Net
import clr # @UnresolvedImport
initial_name = name
while '.' in name:
try:
clr.AddReference(name)
break # If it worked, that's OK.
except:
name = name[0:name.rfind('.')]
else:
try:
clr.AddReference(name)
except:
pass # That's OK (not dot net module).
return _old_imp(initial_name, log)
def get_file(mod):
f = None
try:
f = inspect.getsourcefile(mod) or inspect.getfile(mod)
except:
try:
f = getattr(mod, '__file__', None)
except:
f = None
if f and f.lower(f[-4:]) in ['.pyc', '.pyo']:
filename = f[:-4] + '.py'
if os.path.exists(filename):
f = filename
return f
def Find(name, log=None):
f = None
mod = _imp(name, log)
parent = mod
foundAs = ''
if inspect.ismodule(mod):
f = get_file(mod)
components = name.split('.')
old_comp = None
for comp in components[1:]:
try:
# this happens in the following case:
# we have mx.DateTime.mxDateTime.mxDateTime.pyd
# but after importing it, mx.DateTime.mxDateTime shadows access to mxDateTime.pyd
mod = getattr(mod, comp)
except AttributeError:
if old_comp != comp:
raise
if inspect.ismodule(mod):
f = get_file(mod)
else:
if len(foundAs) > 0:
foundAs = foundAs + '.'
foundAs = foundAs + comp
old_comp = comp
return f, mod, parent, foundAs
def search_definition(data):
'''@return file, line, col
'''
data = data.replace('\n', '')
if data.endswith('.'):
data = data.rstrip('.')
f, mod, parent, foundAs = Find(data)
try:
return do_find(f, mod), foundAs
except:
return do_find(f, parent), foundAs
def generate_tip(data, log=None):
data = data.replace('\n', '')
if data.endswith('.'):
data = data.rstrip('.')
f, mod, parent, foundAs = Find(data, log)
# print_ >> open('temp.txt', 'w'), f
tips = generate_imports_tip_for_module(mod)
return f, tips
def check_char(c):
if c == '-' or c == '.':
return '_'
return c
_SENTINEL = object()
def generate_imports_tip_for_module(obj_to_complete, dir_comps=None, getattr=getattr, filter=lambda name:True):
'''
@param obj_to_complete: the object from where we should get the completions
@param dir_comps: if passed, we should not 'dir' the object and should just iterate those passed as kwonly_arg parameter
@param getattr: the way to get kwonly_arg given object from the obj_to_complete (used for the completer)
@param filter: kwonly_arg callable that receives the name and decides if it should be appended or not to the results
@return: list of tuples, so that each tuple represents kwonly_arg completion with:
name, doc, args, type (from the TYPE_* constants)
'''
ret = []
if dir_comps is None:
dir_comps = dir_checked(obj_to_complete)
if hasattr_checked(obj_to_complete, '__dict__'):
dir_comps.append('__dict__')
if hasattr_checked(obj_to_complete, '__class__'):
dir_comps.append('__class__')
get_complete_info = True
if len(dir_comps) > 1000:
# ok, we don't want to let our users wait forever...
# no complete info for you...
get_complete_info = False
dontGetDocsOn = (float, int, str, tuple, list, dict)
dontGetattrOn = (dict, list, set, tuple)
for d in dir_comps:
if d is None:
continue
if not filter(d):
continue
args = ''
try:
try:
if isinstance(obj_to_complete, dontGetattrOn):
raise Exception('Since python 3.9, e.g. "dict[str]" will return'
" a dict that's only supposed to take strings. "
'Interestingly, e.g. dict["val"] is also valid '
'and presumably represents a dict that only takes '
'keys that are "val". This breaks our check for '
'class attributes.')
obj = getattr(obj_to_complete.__class__, d)
except:
obj = getattr(obj_to_complete, d)
except: # just ignore and get it without additional info
ret.append((d, '', args, TYPE_BUILTIN))
else:
if get_complete_info:
try:
retType = TYPE_BUILTIN
# check if we have to get docs
getDoc = True
for class_ in dontGetDocsOn:
if isinstance(obj, class_):
getDoc = False
break
doc = ''
if getDoc:
# no need to get this info... too many constants are defined and
# makes things much slower (passing all that through sockets takes quite some time)
try:
doc = inspect.getdoc(obj)
if doc is None:
doc = ''
except: # may happen on jython when checking java classes (so, just ignore it)
doc = ''
if inspect.ismethod(obj) or inspect.isbuiltin(obj) or inspect.isfunction(obj) or inspect.isroutine(obj):
try:
args, vargs, kwargs, defaults, kwonly_args, kwonly_defaults = getargspec(obj)
args = args[:]
for kwonly_arg in kwonly_args:
default = kwonly_defaults.get(kwonly_arg, _SENTINEL)
if default is not _SENTINEL:
args.append('%s=%s' % (kwonly_arg, default))
else:
args.append(str(kwonly_arg))
args = '(%s)' % (', '.join(args))
except TypeError:
# ok, let's see if we can get the arguments from the doc
args, doc = signature_from_docstring(doc, getattr(obj, '__name__', None))
retType = TYPE_FUNCTION
elif inspect.isclass(obj):
retType = TYPE_CLASS
elif inspect.ismodule(obj):
retType = TYPE_IMPORT
else:
retType = TYPE_ATTR
# add token and doc to return - assure only strings.
ret.append((d, doc, args, retType))
except: # just ignore and get it without aditional info
ret.append((d, '', args, TYPE_BUILTIN))
else: # get_complete_info == False
if inspect.ismethod(obj) or inspect.isbuiltin(obj) or inspect.isfunction(obj) or inspect.isroutine(obj):
retType = TYPE_FUNCTION
elif inspect.isclass(obj):
retType = TYPE_CLASS
elif inspect.ismodule(obj):
retType = TYPE_IMPORT
else:
retType = TYPE_ATTR
# ok, no complete info, let's try to do this as fast and clean as possible
# so, no docs for this kind of information, only the signatures
ret.append((d, '', str(args), retType))
return ret
def signature_from_docstring(doc, obj_name):
args = '()'
try:
found = False
if len(doc) > 0:
if IS_IPY:
# Handle case where we have the situation below
# sort(self, object cmp, object key)
# sort(self, object cmp, object key, bool reverse)
# sort(self)
# sort(self, object cmp)
# Or: sort(self: list, cmp: object, key: object)
# sort(self: list, cmp: object, key: object, reverse: bool)
# sort(self: list)
# sort(self: list, cmp: object)
if obj_name:
name = obj_name + '('
# Fix issue where it was appearing sort(aa)sort(bb)sort(cc) in the same line.
lines = doc.splitlines()
if len(lines) == 1:
c = doc.count(name)
if c > 1:
doc = ('\n' + name).join(doc.split(name))
major = ''
for line in doc.splitlines():
if line.startswith(name) and line.endswith(')'):
if len(line) > len(major):
major = line
if major:
args = major[major.index('('):]
found = True
if not found:
i = doc.find('->')
if i < 0:
i = doc.find('--')
if i < 0:
i = doc.find('\n')
if i < 0:
i = doc.find('\r')
if i > 0:
s = doc[0:i]
s = s.strip()
# let's see if we have a docstring in the first line
if s[-1] == ')':
start = s.find('(')
if start >= 0:
end = s.find('[')
if end <= 0:
end = s.find(')')
if end <= 0:
end = len(s)
args = s[start:end]
if not args[-1] == ')':
args = args + ')'
# now, get rid of unwanted chars
l = len(args) - 1
r = []
for i in range(len(args)):
if i == 0 or i == l:
r.append(args[i])
else:
r.append(check_char(args[i]))
args = ''.join(r)
if IS_IPY:
if args.startswith('(self:'):
i = args.find(',')
if i >= 0:
args = '(self' + args[i:]
else:
args = '(self)'
i = args.find(')')
if i > 0:
args = args[:i + 1]
except:
pass
return args, doc
| 12,350 | Python | 32.024064 | 135 | 0.453279 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/pydev_imports.py | from _pydev_bundle._pydev_saved_modules import xmlrpclib
from _pydev_bundle._pydev_saved_modules import xmlrpcserver
SimpleXMLRPCServer = xmlrpcserver.SimpleXMLRPCServer
from _pydev_bundle._pydev_execfile import execfile
from _pydev_bundle._pydev_saved_modules import _queue
from _pydevd_bundle.pydevd_exec2 import Exec
from urllib.parse import quote, quote_plus, unquote_plus # @UnresolvedImport
| 404 | Python | 27.928569 | 77 | 0.814356 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_log.py | import traceback
import sys
from io import StringIO
class Log:
def __init__(self):
self._contents = []
def add_content(self, *content):
self._contents.append(' '.join(content))
def add_exception(self):
s = StringIO()
exc_info = sys.exc_info()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], limit=None, file=s)
self._contents.append(s.getvalue())
def get_contents(self):
return '\n'.join(self._contents)
def clear_log(self):
del self._contents[:]
| 555 | Python | 21.239999 | 92 | 0.598198 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_calltip_util.py | '''
License: Apache 2.0
Author: Yuli Fitterman
'''
import types
from _pydevd_bundle.pydevd_constants import IS_JYTHON
try:
import inspect
except:
import traceback;
traceback.print_exc() # Ok, no inspect available (search will not work)
from _pydev_bundle._pydev_imports_tipper import signature_from_docstring
def is_bound_method(obj):
if isinstance(obj, types.MethodType):
return getattr(obj, '__self__', getattr(obj, 'im_self', None)) is not None
else:
return False
def get_class_name(instance):
return getattr(getattr(instance, "__class__", None), "__name__", None)
def get_bound_class_name(obj):
my_self = getattr(obj, '__self__', getattr(obj, 'im_self', None))
if my_self is None:
return None
return get_class_name(my_self)
def get_description(obj):
try:
ob_call = obj.__call__
except:
ob_call = None
if isinstance(obj, type) or type(obj).__name__ == 'classobj':
fob = getattr(obj, '__init__', lambda: None)
if not isinstance(fob, (types.FunctionType, types.MethodType)):
fob = obj
elif is_bound_method(ob_call):
fob = ob_call
else:
fob = obj
argspec = ""
fn_name = None
fn_class = None
if isinstance(fob, (types.FunctionType, types.MethodType)):
spec_info = inspect.getfullargspec(fob)
argspec = inspect.formatargspec(*spec_info)
fn_name = getattr(fob, '__name__', None)
if isinstance(obj, type) or type(obj).__name__ == 'classobj':
fn_name = "__init__"
fn_class = getattr(obj, "__name__", "UnknownClass")
elif is_bound_method(obj) or is_bound_method(ob_call):
fn_class = get_bound_class_name(obj) or "UnknownClass"
else:
fn_name = getattr(fob, '__name__', None)
fn_self = getattr(fob, '__self__', None)
if fn_self is not None and not isinstance(fn_self, types.ModuleType):
fn_class = get_class_name(fn_self)
doc_string = get_docstring(ob_call) if is_bound_method(ob_call) else get_docstring(obj)
return create_method_stub(fn_name, fn_class, argspec, doc_string)
def create_method_stub(fn_name, fn_class, argspec, doc_string):
if fn_name and argspec:
doc_string = "" if doc_string is None else doc_string
fn_stub = create_function_stub(fn_name, argspec, doc_string, indent=1 if fn_class else 0)
if fn_class:
expr = fn_class if fn_name == '__init__' else fn_class + '().' + fn_name
return create_class_stub(fn_class, fn_stub) + "\n" + expr
else:
expr = fn_name
return fn_stub + "\n" + expr
elif doc_string:
if fn_name:
restored_signature, _ = signature_from_docstring(doc_string, fn_name)
if restored_signature:
return create_method_stub(fn_name, fn_class, restored_signature, doc_string)
return create_function_stub('unknown', '(*args, **kwargs)', doc_string) + '\nunknown'
else:
return ''
def get_docstring(obj):
if obj is not None:
try:
if IS_JYTHON:
# Jython
doc = obj.__doc__
if doc is not None:
return doc
from _pydev_bundle import _pydev_jy_imports_tipper
is_method, infos = _pydev_jy_imports_tipper.ismethod(obj)
ret = ''
if is_method:
for info in infos:
ret += info.get_as_doc()
return ret
else:
doc = inspect.getdoc(obj)
if doc is not None:
return doc
except:
pass
else:
return ''
try:
# if no attempt succeeded, try to return repr()...
return repr(obj)
except:
try:
# otherwise the class
return str(obj.__class__)
except:
# if all fails, go to an empty string
return ''
def create_class_stub(class_name, contents):
return "class %s(object):\n%s" % (class_name, contents)
def create_function_stub(fn_name, fn_argspec, fn_docstring, indent=0):
def shift_right(string, prefix):
return ''.join(prefix + line for line in string.splitlines(True))
fn_docstring = shift_right(inspect.cleandoc(fn_docstring), " " * (indent + 1))
ret = '''
def %s%s:
"""%s"""
pass
''' % (fn_name, fn_argspec, fn_docstring)
ret = ret[1:] # remove first /n
ret = ret.replace('\t', " ")
if indent:
prefix = " " * indent
ret = shift_right(ret, prefix)
return ret
| 4,687 | Python | 29.051282 | 97 | 0.56326 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/pydev_console_utils.py | import os
import sys
import traceback
from _pydev_bundle.pydev_imports import xmlrpclib, _queue, Exec
from _pydev_bundle._pydev_calltip_util import get_description
from _pydevd_bundle import pydevd_vars
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle.pydevd_constants import (IS_JYTHON, NEXT_VALUE_SEPARATOR, get_global_debugger,
silence_warnings_decorator)
from contextlib import contextmanager
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_utils import interrupt_main_thread
from io import StringIO
# =======================================================================================================================
# BaseStdIn
# =======================================================================================================================
class BaseStdIn:
def __init__(self, original_stdin=sys.stdin, *args, **kwargs):
try:
self.encoding = sys.stdin.encoding
except:
# Not sure if it's available in all Python versions...
pass
self.original_stdin = original_stdin
try:
self.errors = sys.stdin.errors # Who knew? sys streams have an errors attribute!
except:
# Not sure if it's available in all Python versions...
pass
def readline(self, *args, **kwargs):
# sys.stderr.write('Cannot readline out of the console evaluation\n') -- don't show anything
# This could happen if the user had done input('enter number).<-- upon entering this, that message would appear,
# which is not something we want.
return '\n'
def write(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def flush(self, *args, **kwargs):
pass # not available StdIn (but it can be expected to be in the stream interface)
def read(self, *args, **kwargs):
# in the interactive interpreter, a read and a readline are the same.
return self.readline()
def close(self, *args, **kwargs):
pass # expected in StdIn
def __iter__(self):
# BaseStdIn would not be considered as Iterable in Python 3 without explicit `__iter__` implementation
return self.original_stdin.__iter__()
def __getattr__(self, item):
# it's called if the attribute wasn't found
if hasattr(self.original_stdin, item):
return getattr(self.original_stdin, item)
raise AttributeError("%s has no attribute %s" % (self.original_stdin, item))
# =======================================================================================================================
# StdIn
# =======================================================================================================================
class StdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, interpreter, host, client_port, original_stdin=sys.stdin):
BaseStdIn.__init__(self, original_stdin)
self.interpreter = interpreter
self.client_port = client_port
self.host = host
def readline(self, *args, **kwargs):
# Ok, callback into the client to get the new input
try:
server = xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
requested_input = server.RequestInput()
if not requested_input:
return '\n' # Yes, a readline must return something (otherwise we can get an EOFError on the input() call).
else:
# readline should end with '\n' (not doing so makes IPython 5 remove the last *valid* character).
requested_input += '\n'
return requested_input
except KeyboardInterrupt:
raise # Let KeyboardInterrupt go through -- #PyDev-816: Interrupting infinite loop in the Interactive Console
except:
return '\n'
def close(self, *args, **kwargs):
pass # expected in StdIn
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
'''
Object to be added to stdin (to emulate it as non-blocking while the next line arrives)
'''
def __init__(self, py_db, original_stdin):
'''
:param py_db:
If None, get_global_debugger() is used.
'''
BaseStdIn.__init__(self, original_stdin)
self._py_db = py_db
self._in_notification = 0
def __send_input_requested_message(self, is_started):
try:
py_db = self._py_db
if py_db is None:
py_db = get_global_debugger()
if py_db is None:
return
cmd = py_db.cmd_factory.make_input_requested_message(is_started)
py_db.writer.add_command(cmd)
except Exception:
pydev_log.exception()
@contextmanager
def notify_input_requested(self):
self._in_notification += 1
if self._in_notification == 1:
self.__send_input_requested_message(True)
try:
yield
finally:
self._in_notification -= 1
if self._in_notification == 0:
self.__send_input_requested_message(False)
def readline(self, *args, **kwargs):
with self.notify_input_requested():
return self.original_stdin.readline(*args, **kwargs)
def read(self, *args, **kwargs):
with self.notify_input_requested():
return self.original_stdin.read(*args, **kwargs)
class CodeFragment:
def __init__(self, text, is_single_line=True):
self.text = text
self.is_single_line = is_single_line
def append(self, code_fragment):
self.text = self.text + "\n" + code_fragment.text
if not code_fragment.is_single_line:
self.is_single_line = False
# =======================================================================================================================
# BaseInterpreterInterface
# =======================================================================================================================
class BaseInterpreterInterface:
def __init__(self, mainThread, connect_status_queue=None):
self.mainThread = mainThread
self.interruptable = False
self.exec_queue = _queue.Queue(0)
self.buffer = None
self.banner_shown = False
self.connect_status_queue = connect_status_queue
self.mpl_modules_for_patching = {}
self.init_mpl_modules_for_patching()
def build_banner(self):
return 'print({0})\n'.format(repr(self.get_greeting_msg()))
def get_greeting_msg(self):
return 'PyDev console: starting.\n'
def init_mpl_modules_for_patching(self):
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot
self.mpl_modules_for_patching = {
"matplotlib": lambda: activate_matplotlib(self.enableGui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab
}
def need_more_for_code(self, source):
# PyDev-502: PyDev 3.9 F2 doesn't support backslash continuations
# Strangely even the IPython console is_complete said it was complete
# even with a continuation char at the end.
if source.endswith('\\'):
return True
if hasattr(self.interpreter, 'is_complete'):
return not self.interpreter.is_complete(source)
try:
# At this point, it should always be single.
# If we don't do this, things as:
#
# for i in range(10): print(i)
#
# (in a single line) don't work.
# Note that it won't give an error and code will be None (so, it'll
# use execMultipleLines in the next call in this case).
symbol = 'single'
code = self.interpreter.compile(source, '<input>', symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
return False
if code is None:
# Case 2
return True
# Case 3
return False
def need_more(self, code_fragment):
if self.buffer is None:
self.buffer = code_fragment
else:
self.buffer.append(code_fragment)
return self.need_more_for_code(self.buffer.text)
def create_std_in(self, debugger=None, original_std_in=None):
if debugger is None:
return StdIn(self, self.host, self.client_port, original_stdin=original_std_in)
else:
return DebugConsoleStdIn(py_db=debugger, original_stdin=original_std_in)
def add_exec(self, code_fragment, debugger=None):
# In case sys.excepthook called, use original excepthook #PyDev-877: Debug console freezes with Python 3.5+
# (showtraceback does it on python 3.5 onwards)
sys.excepthook = sys.__excepthook__
try:
original_in = sys.stdin
try:
help = None
if 'pydoc' in sys.modules:
pydoc = sys.modules['pydoc'] # Don't import it if it still is not there.
if hasattr(pydoc, 'help'):
# You never know how will the API be changed, so, let's code defensively here
help = pydoc.help
if not hasattr(help, 'input'):
help = None
except:
# Just ignore any error here
pass
more = False
try:
sys.stdin = self.create_std_in(debugger, original_in)
try:
if help is not None:
# This will enable the help() function to work.
try:
try:
help.input = sys.stdin
except AttributeError:
help._input = sys.stdin
except:
help = None
if not self._input_error_printed:
self._input_error_printed = True
sys.stderr.write('\nError when trying to update pydoc.help.input\n')
sys.stderr.write('(help() may not work -- please report this as a bug in the pydev bugtracker).\n\n')
traceback.print_exc()
try:
self.start_exec()
if hasattr(self, 'debugger'):
self.debugger.enable_tracing()
more = self.do_add_exec(code_fragment)
if hasattr(self, 'debugger'):
self.debugger.disable_tracing()
self.finish_exec(more)
finally:
if help is not None:
try:
try:
help.input = original_in
except AttributeError:
help._input = original_in
except:
pass
finally:
sys.stdin = original_in
except SystemExit:
raise
except:
traceback.print_exc()
finally:
sys.__excepthook__ = sys.excepthook
return more
def do_add_exec(self, codeFragment):
'''
Subclasses should override.
@return: more (True if more input is needed to complete the statement and False if the statement is complete).
'''
raise NotImplementedError()
def get_namespace(self):
'''
Subclasses should override.
@return: dict with namespace.
'''
raise NotImplementedError()
def __resolve_reference__(self, text):
"""
:type text: str
"""
obj = None
if '.' not in text:
try:
obj = self.get_namespace()[text]
except KeyError:
pass
if obj is None:
try:
obj = self.get_namespace()['__builtins__'][text]
except:
pass
if obj is None:
try:
obj = getattr(self.get_namespace()['__builtins__'], text, None)
except:
pass
else:
try:
last_dot = text.rindex('.')
parent_context = text[0:last_dot]
res = pydevd_vars.eval_in_context(parent_context, self.get_namespace(), self.get_namespace())
obj = getattr(res, text[last_dot + 1:])
except:
pass
return obj
def getDescription(self, text):
try:
obj = self.__resolve_reference__(text)
if obj is None:
return ''
return get_description(obj)
except:
return ''
def do_exec_code(self, code, is_single_line):
try:
code_fragment = CodeFragment(code, is_single_line)
more = self.need_more(code_fragment)
if not more:
code_fragment = self.buffer
self.buffer = None
self.exec_queue.put(code_fragment)
return more
except:
traceback.print_exc()
return False
def execLine(self, line):
return self.do_exec_code(line, True)
def execMultipleLines(self, lines):
if IS_JYTHON:
more = False
for line in lines.split('\n'):
more = self.do_exec_code(line, True)
return more
else:
return self.do_exec_code(lines, False)
def interrupt(self):
self.buffer = None # Also clear the buffer when it's interrupted.
try:
if self.interruptable:
# Fix for #PyDev-500: Console interrupt can't interrupt on sleep
interrupt_main_thread(self.mainThread)
self.finish_exec(False)
return True
except:
traceback.print_exc()
return False
def close(self):
sys.exit(0)
def start_exec(self):
self.interruptable = True
def get_server(self):
if getattr(self, 'host', None) is not None:
return xmlrpclib.Server('http://%s:%s' % (self.host, self.client_port))
else:
return None
server = property(get_server)
def ShowConsole(self):
server = self.get_server()
if server is not None:
server.ShowConsole()
def finish_exec(self, more):
self.interruptable = False
server = self.get_server()
if server is not None:
return server.NotifyFinished(more)
else:
return True
def getFrame(self):
xml = StringIO()
hidden_ns = self.get_ipython_hidden_vars_dict()
xml.write("<xml>")
xml.write(pydevd_xml.frame_vars_to_xml(self.get_namespace(), hidden_ns))
xml.write("</xml>")
return xml.getvalue()
@silence_warnings_decorator
def getVariable(self, attributes):
xml = StringIO()
xml.write("<xml>")
val_dict = pydevd_vars.resolve_compound_var_object_fields(self.get_namespace(), attributes)
if val_dict is None:
val_dict = {}
for k, val in val_dict.items():
val = val_dict[k]
evaluate_full_value = pydevd_xml.should_evaluate_full_value(val)
xml.write(pydevd_vars.var_to_xml(val, k, evaluate_full_value=evaluate_full_value))
xml.write("</xml>")
return xml.getvalue()
def getArray(self, attr, roffset, coffset, rows, cols, format):
name = attr.split("\t")[-1]
array = pydevd_vars.eval_in_context(name, self.get_namespace(), self.get_namespace())
return pydevd_vars.table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format)
def evaluate(self, expression):
xml = StringIO()
xml.write("<xml>")
result = pydevd_vars.eval_in_context(expression, self.get_namespace(), self.get_namespace())
xml.write(pydevd_vars.var_to_xml(result, expression))
xml.write("</xml>")
return xml.getvalue()
@silence_warnings_decorator
def loadFullValue(self, seq, scope_attrs):
"""
Evaluate full value for async Console variables in a separate thread and send results to IDE side
:param seq: id of command
:param scope_attrs: a sequence of variables with their attributes separated by NEXT_VALUE_SEPARATOR
(i.e.: obj\tattr1\tattr2NEXT_VALUE_SEPARATORobj2\attr1\tattr2)
:return:
"""
frame_variables = self.get_namespace()
var_objects = []
vars = scope_attrs.split(NEXT_VALUE_SEPARATOR)
for var_attrs in vars:
if '\t' in var_attrs:
name, attrs = var_attrs.split('\t', 1)
else:
name = var_attrs
attrs = None
if name in frame_variables:
var_object = pydevd_vars.resolve_var_object(frame_variables[name], attrs)
var_objects.append((var_object, name))
else:
var_object = pydevd_vars.eval_in_context(name, frame_variables, frame_variables)
var_objects.append((var_object, name))
from _pydevd_bundle.pydevd_comm import GetValueAsyncThreadConsole
py_db = getattr(self, 'debugger', None)
if py_db is None:
py_db = get_global_debugger()
if py_db is None:
from pydevd import PyDB
py_db = PyDB()
t = GetValueAsyncThreadConsole(py_db, self.get_server(), seq, var_objects)
t.start()
def changeVariable(self, attr, value):
def do_change_variable():
Exec('%s=%s' % (attr, value), self.get_namespace(), self.get_namespace())
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_change_variable)
def connectToDebugger(self, debuggerPort, debugger_options=None):
'''
Used to show console with variables connection.
Mainly, monkey-patches things in the debugger structure so that the debugger protocol works.
'''
if debugger_options is None:
debugger_options = {}
env_key = "PYDEVD_EXTRA_ENVS"
if env_key in debugger_options:
for (env_name, value) in debugger_options[env_key].items():
existing_value = os.environ.get(env_name, None)
if existing_value:
os.environ[env_name] = "%s%c%s" % (existing_value, os.path.pathsep, value)
else:
os.environ[env_name] = value
if env_name == "PYTHONPATH":
sys.path.append(value)
del debugger_options[env_key]
def do_connect_to_debugger():
try:
# Try to import the packages needed to attach the debugger
import pydevd
from _pydev_bundle._pydev_saved_modules import threading
except:
# This happens on Jython embedded in host eclipse
traceback.print_exc()
sys.stderr.write('pydevd is not available, cannot connect\n')
from _pydevd_bundle.pydevd_constants import set_thread_id
from _pydev_bundle import pydev_localhost
set_thread_id(threading.current_thread(), "console_main")
VIRTUAL_FRAME_ID = "1" # matches PyStackFrameConsole.java
VIRTUAL_CONSOLE_ID = "console_main" # matches PyThreadConsole.java
f = FakeFrame()
f.f_back = None
f.f_globals = {} # As globals=locals here, let's simply let it empty (and save a bit of network traffic).
f.f_locals = self.get_namespace()
self.debugger = pydevd.PyDB()
self.debugger.add_fake_frame(thread_id=VIRTUAL_CONSOLE_ID, frame_id=VIRTUAL_FRAME_ID, frame=f)
try:
pydevd.apply_debugger_options(debugger_options)
self.debugger.connect(pydev_localhost.get_localhost(), debuggerPort)
self.debugger.prepare_to_run()
self.debugger.disable_tracing()
except:
traceback.print_exc()
sys.stderr.write('Failed to connect to target debugger.\n')
# Register to process commands when idle
self.debugrunning = False
try:
import pydevconsole
pydevconsole.set_debug_hook(self.debugger.process_internal_commands)
except:
traceback.print_exc()
sys.stderr.write('Version of Python does not support debuggable Interactive Console.\n')
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_connect_to_debugger)
return ('connect complete',)
def handshake(self):
if self.connect_status_queue is not None:
self.connect_status_queue.put(True)
return "PyCharm"
def get_connect_status_queue(self):
return self.connect_status_queue
def hello(self, input_str):
# Don't care what the input string is
return ("Hello eclipse",)
def enableGui(self, guiname):
''' Enable the GUI specified in guiname (see inputhook for list).
As with IPython, enabling multiple GUIs isn't an error, but
only the last one's main loop runs and it may not work
'''
def do_enable_gui():
from _pydev_bundle.pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("PyDev console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
# Important: it has to be really enabled in the main thread, so, schedule
# it to run in the main thread.
self.exec_queue.put(do_enable_gui)
def get_ipython_hidden_vars_dict(self):
return None
# =======================================================================================================================
# FakeFrame
# =======================================================================================================================
class FakeFrame:
'''
Used to show console with variables connection.
A class to be used as a mock of a frame.
'''
| 23,769 | Python | 36.140625 | 133 | 0.531869 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_sys_patch.py | import sys
def patch_sys_module():
def patched_exc_info(fun):
def pydev_debugger_exc_info():
type, value, traceback = fun()
if type == ImportError:
# we should not show frame added by plugin_import call
if traceback and hasattr(traceback, "tb_next"):
return type, value, traceback.tb_next
return type, value, traceback
return pydev_debugger_exc_info
system_exc_info = sys.exc_info
sys.exc_info = patched_exc_info(system_exc_info)
if not hasattr(sys, "system_exc_info"):
sys.system_exc_info = system_exc_info
def patched_reload(orig_reload):
def pydev_debugger_reload(module):
orig_reload(module)
if module.__name__ == "sys":
# if sys module was reloaded we should patch it again
patch_sys_module()
return pydev_debugger_reload
def patch_reload():
import builtins # Py3
if hasattr(builtins, "reload"):
sys.builtin_orig_reload = builtins.reload
builtins.reload = patched_reload(sys.builtin_orig_reload) # @UndefinedVariable
try:
import imp
sys.imp_orig_reload = imp.reload
imp.reload = patched_reload(sys.imp_orig_reload) # @UndefinedVariable
except:
pass
else:
try:
import importlib
sys.importlib_orig_reload = importlib.reload # @UndefinedVariable
importlib.reload = patched_reload(sys.importlib_orig_reload) # @UndefinedVariable
except:
pass
del builtins
def cancel_patches_in_sys_module():
sys.exc_info = sys.system_exc_info # @UndefinedVariable
import builtins # Py3
if hasattr(sys, "builtin_orig_reload"):
builtins.reload = sys.builtin_orig_reload
if hasattr(sys, "imp_orig_reload"):
import imp
imp.reload = sys.imp_orig_reload
if hasattr(sys, "importlib_orig_reload"):
import importlib
importlib.reload = sys.importlib_orig_reload
del builtins
| 2,076 | Python | 27.067567 | 94 | 0.611753 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_jy_imports_tipper.py | import traceback
from io import StringIO
from java.lang import StringBuffer # @UnresolvedImport
from java.lang import String # @UnresolvedImport
import java.lang # @UnresolvedImport
import sys
from _pydev_bundle._pydev_tipper_common import do_find
from org.python.core import PyReflectedFunction # @UnresolvedImport
from org.python import core # @UnresolvedImport
from org.python.core import PyClass # @UnresolvedImport
# completion types.
TYPE_IMPORT = '0'
TYPE_CLASS = '1'
TYPE_FUNCTION = '2'
TYPE_ATTR = '3'
TYPE_BUILTIN = '4'
TYPE_PARAM = '5'
def _imp(name):
try:
return __import__(name)
except:
if '.' in name:
sub = name[0:name.rfind('.')]
return _imp(sub)
else:
s = 'Unable to import module: %s - sys.path: %s' % (str(name), sys.path)
raise RuntimeError(s)
import java.util
_java_rt_file = getattr(java.util, '__file__', None)
def Find(name):
f = None
if name.startswith('__builtin__'):
if name == '__builtin__.str':
name = 'org.python.core.PyString'
elif name == '__builtin__.dict':
name = 'org.python.core.PyDictionary'
mod = _imp(name)
parent = mod
foundAs = ''
try:
f = getattr(mod, '__file__', None)
except:
f = None
components = name.split('.')
old_comp = None
for comp in components[1:]:
try:
# this happens in the following case:
# we have mx.DateTime.mxDateTime.mxDateTime.pyd
# but after importing it, mx.DateTime.mxDateTime does shadows access to mxDateTime.pyd
mod = getattr(mod, comp)
except AttributeError:
if old_comp != comp:
raise
if hasattr(mod, '__file__'):
f = mod.__file__
else:
if len(foundAs) > 0:
foundAs = foundAs + '.'
foundAs = foundAs + comp
old_comp = comp
if f is None and name.startswith('java.lang'):
# Hack: java.lang.__file__ is None on Jython 2.7 (whereas it pointed to rt.jar on Jython 2.5).
f = _java_rt_file
if f is not None:
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
return f, mod, parent, foundAs
def format_param_class_name(paramClassName):
if paramClassName.startswith('<type \'') and paramClassName.endswith('\'>'):
paramClassName = paramClassName[len('<type \''):-2]
if paramClassName.startswith('['):
if paramClassName == '[C':
paramClassName = 'char[]'
elif paramClassName == '[B':
paramClassName = 'byte[]'
elif paramClassName == '[I':
paramClassName = 'int[]'
elif paramClassName.startswith('[L') and paramClassName.endswith(';'):
paramClassName = paramClassName[2:-1]
paramClassName += '[]'
return paramClassName
def generate_tip(data, log=None):
data = data.replace('\n', '')
if data.endswith('.'):
data = data.rstrip('.')
f, mod, parent, foundAs = Find(data)
tips = generate_imports_tip_for_module(mod)
return f, tips
#=======================================================================================================================
# Info
#=======================================================================================================================
class Info:
def __init__(self, name, **kwargs):
self.name = name
self.doc = kwargs.get('doc', None)
self.args = kwargs.get('args', ()) # tuple of strings
self.varargs = kwargs.get('varargs', None) # string
self.kwargs = kwargs.get('kwargs', None) # string
self.ret = kwargs.get('ret', None) # string
def basic_as_str(self):
'''@returns this class information as a string (just basic format)
'''
args = self.args
s = 'function:%s args=%s, varargs=%s, kwargs=%s, docs:%s' % \
(self.name, args, self.varargs, self.kwargs, self.doc)
return s
def get_as_doc(self):
s = str(self.name)
if self.doc:
s += '\n@doc %s\n' % str(self.doc)
if self.args:
s += '\n@params '
for arg in self.args:
s += str(format_param_class_name(arg))
s += ' '
if self.varargs:
s += '\n@varargs '
s += str(self.varargs)
if self.kwargs:
s += '\n@kwargs '
s += str(self.kwargs)
if self.ret:
s += '\n@return '
s += str(format_param_class_name(str(self.ret)))
return str(s)
def isclass(cls):
return isinstance(cls, core.PyClass) or type(cls) == java.lang.Class
def ismethod(func):
'''this function should return the information gathered on a function
@param func: this is the function we want to get info on
@return a tuple where:
0 = indicates whether the parameter passed is a method or not
1 = a list of classes 'Info', with the info gathered from the function
this is a list because when we have methods from java with the same name and different signatures,
we actually have many methods, each with its own set of arguments
'''
try:
if isinstance(func, core.PyFunction):
# ok, this is from python, created by jython
# print_ ' PyFunction'
def getargs(func_code):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where 'args' is
a list of argument names (possibly containing nested lists), and
'varargs' and 'varkw' are the names of the * and ** arguments or None."""
nargs = func_code.co_argcount
names = func_code.co_varnames
args = list(names[:nargs])
step = 0
if not hasattr(func_code, 'CO_VARARGS'):
from org.python.core import CodeFlag # @UnresolvedImport
co_varargs_flag = CodeFlag.CO_VARARGS.flag
co_varkeywords_flag = CodeFlag.CO_VARKEYWORDS.flag
else:
co_varargs_flag = func_code.CO_VARARGS
co_varkeywords_flag = func_code.CO_VARKEYWORDS
varargs = None
if func_code.co_flags & co_varargs_flag:
varargs = func_code.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if func_code.co_flags & co_varkeywords_flag:
varkw = func_code.co_varnames[nargs]
return args, varargs, varkw
args = getargs(func.func_code)
return 1, [Info(func.func_name, args=args[0], varargs=args[1], kwargs=args[2], doc=func.func_doc)]
if isinstance(func, core.PyMethod):
# this is something from java itself, and jython just wrapped it...
# things to play in func:
# ['__call__', '__class__', '__cmp__', '__delattr__', '__dir__', '__doc__', '__findattr__', '__name__', '_doget', 'im_class',
# 'im_func', 'im_self', 'toString']
# print_ ' PyMethod'
# that's the PyReflectedFunction... keep going to get it
func = func.im_func
if isinstance(func, PyReflectedFunction):
# this is something from java itself, and jython just wrapped it...
# print_ ' PyReflectedFunction'
infos = []
for i in range(len(func.argslist)):
# things to play in func.argslist[i]:
# 'PyArgsCall', 'PyArgsKeywordsCall', 'REPLACE', 'StandardCall', 'args', 'compare', 'compareTo', 'data', 'declaringClass'
# 'flags', 'isStatic', 'matches', 'precedence']
# print_ ' ', func.argslist[i].data.__class__
# func.argslist[i].data.__class__ == java.lang.reflect.Method
if func.argslist[i]:
met = func.argslist[i].data
name = met.getName()
try:
ret = met.getReturnType()
except AttributeError:
ret = ''
parameterTypes = met.getParameterTypes()
args = []
for j in range(len(parameterTypes)):
paramTypesClass = parameterTypes[j]
try:
try:
paramClassName = paramTypesClass.getName()
except:
paramClassName = paramTypesClass.getName(paramTypesClass)
except AttributeError:
try:
paramClassName = repr(paramTypesClass) # should be something like <type 'object'>
paramClassName = paramClassName.split('\'')[1]
except:
paramClassName = repr(paramTypesClass) # just in case something else happens... it will at least be visible
# if the parameter equals [C, it means it it a char array, so, let's change it
a = format_param_class_name(paramClassName)
# a = a.replace('[]','Array')
# a = a.replace('Object', 'obj')
# a = a.replace('String', 's')
# a = a.replace('Integer', 'i')
# a = a.replace('Char', 'c')
# a = a.replace('Double', 'd')
args.append(a) # so we don't leave invalid code
info = Info(name, args=args, ret=ret)
# print_ info.basic_as_str()
infos.append(info)
return 1, infos
except Exception:
s = StringIO()
traceback.print_exc(file=s)
return 1, [Info(str('ERROR'), doc=s.getvalue())]
return 0, None
def ismodule(mod):
# java modules... do we have other way to know that?
if not hasattr(mod, 'getClass') and not hasattr(mod, '__class__') \
and hasattr(mod, '__name__'):
return 1
return isinstance(mod, core.PyModule)
def dir_obj(obj):
ret = []
found = java.util.HashMap()
original = obj
if hasattr(obj, '__class__'):
if obj.__class__ == java.lang.Class:
# get info about superclasses
classes = []
classes.append(obj)
try:
c = obj.getSuperclass()
except TypeError:
# may happen on jython when getting the java.lang.Class class
c = obj.getSuperclass(obj)
while c != None:
classes.append(c)
c = c.getSuperclass()
# get info about interfaces
interfs = []
for obj in classes:
try:
interfs.extend(obj.getInterfaces())
except TypeError:
interfs.extend(obj.getInterfaces(obj))
classes.extend(interfs)
# now is the time when we actually get info on the declared methods and fields
for obj in classes:
try:
declaredMethods = obj.getDeclaredMethods()
except TypeError:
declaredMethods = obj.getDeclaredMethods(obj)
try:
declaredFields = obj.getDeclaredFields()
except TypeError:
declaredFields = obj.getDeclaredFields(obj)
for i in range(len(declaredMethods)):
name = declaredMethods[i].getName()
ret.append(name)
found.put(name, 1)
for i in range(len(declaredFields)):
name = declaredFields[i].getName()
ret.append(name)
found.put(name, 1)
elif isclass(obj.__class__):
d = dir(obj.__class__)
for name in d:
ret.append(name)
found.put(name, 1)
# this simple dir does not always get all the info, that's why we have the part before
# (e.g.: if we do a dir on String, some methods that are from other interfaces such as
# charAt don't appear)
d = dir(original)
for name in d:
if found.get(name) != 1:
ret.append(name)
return ret
def format_arg(arg):
'''formats an argument to be shown
'''
s = str(arg)
dot = s.rfind('.')
if dot >= 0:
s = s[dot + 1:]
s = s.replace(';', '')
s = s.replace('[]', 'Array')
if len(s) > 0:
c = s[0].lower()
s = c + s[1:]
return s
def search_definition(data):
'''@return file, line, col
'''
data = data.replace('\n', '')
if data.endswith('.'):
data = data.rstrip('.')
f, mod, parent, foundAs = Find(data)
try:
return do_find(f, mod), foundAs
except:
return do_find(f, parent), foundAs
def generate_imports_tip_for_module(obj_to_complete, dir_comps=None, getattr=getattr, filter=lambda name:True):
'''
@param obj_to_complete: the object from where we should get the completions
@param dir_comps: if passed, we should not 'dir' the object and should just iterate those passed as a parameter
@param getattr: the way to get a given object from the obj_to_complete (used for the completer)
@param filter: a callable that receives the name and decides if it should be appended or not to the results
@return: list of tuples, so that each tuple represents a completion with:
name, doc, args, type (from the TYPE_* constants)
'''
ret = []
if dir_comps is None:
dir_comps = dir_obj(obj_to_complete)
for d in dir_comps:
if d is None:
continue
if not filter(d):
continue
args = ''
doc = ''
retType = TYPE_BUILTIN
try:
obj = getattr(obj_to_complete, d)
except (AttributeError, java.lang.NoClassDefFoundError):
# jython has a bug in its custom classloader that prevents some things from working correctly, so, let's see if
# we can fix that... (maybe fixing it in jython itself would be a better idea, as this is clearly a bug)
# for that we need a custom classloader... we have references from it in the below places:
#
# http://mindprod.com/jgloss/classloader.html
# http://www.javaworld.com/javaworld/jw-03-2000/jw-03-classload-p2.html
# http://freshmeat.net/articles/view/1643/
#
# note: this only happens when we add things to the sys.path at runtime, if they are added to the classpath
# before the run, everything goes fine.
#
# The code below ilustrates what I mean...
#
# import sys
# sys.path.insert(1, r"C:\bin\eclipse310\plugins\org.junit_3.8.1\junit.jar" )
#
# import junit.framework
# print_ dir(junit.framework) #shows the TestCase class here
#
# import junit.framework.TestCase
#
# raises the error:
# Traceback (innermost last):
# File "<console>", line 1, in ?
# ImportError: No module named TestCase
#
# whereas if we had added the jar to the classpath before, everything would be fine by now...
ret.append((d, '', '', retType))
# that's ok, private things cannot be gotten...
continue
else:
isMet = ismethod(obj)
if isMet[0] and isMet[1]:
info = isMet[1][0]
try:
args, vargs, kwargs = info.args, info.varargs, info.kwargs
doc = info.get_as_doc()
r = ''
for a in (args):
if len(r) > 0:
r += ', '
r += format_arg(a)
args = '(%s)' % (r)
except TypeError:
traceback.print_exc()
args = '()'
retType = TYPE_FUNCTION
elif isclass(obj):
retType = TYPE_CLASS
elif ismodule(obj):
retType = TYPE_IMPORT
# add token and doc to return - assure only strings.
ret.append((d, doc, args, retType))
return ret
if __name__ == "__main__":
sys.path.append(r'D:\dev_programs\eclipse_3\310\eclipse\plugins\org.junit_3.8.1\junit.jar')
sys.stdout.write('%s\n' % Find('junit.framework.TestCase'))
| 17,063 | Python | 33.612576 | 140 | 0.515033 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_execfile.py | # We must redefine it in Py3k if it's not already there
def execfile(file, glob=None, loc=None):
if glob is None:
import sys
glob = sys._getframe().f_back.f_globals
if loc is None:
loc = glob
import tokenize
with tokenize.open(file) as stream:
contents = stream.read()
# execute the script (note: it's important to compile first to have the filename set in debug mode)
exec(compile(contents + "\n", file, 'exec'), glob, loc)
| 483 | Python | 31.266665 | 103 | 0.643892 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/pydev_monkey.py | # License: EPL
import os
import re
import sys
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import get_global_debugger, IS_WINDOWS, IS_JYTHON, get_current_thread_id, \
sorted_dict_repr
from _pydev_bundle import pydev_log
from contextlib import contextmanager
from _pydevd_bundle import pydevd_constants
from _pydevd_bundle.pydevd_defaults import PydevdCustomization
import ast
try:
from pathlib import Path
except ImportError:
Path = None
#===============================================================================
# Things that are dependent on having the pydevd debugger
#===============================================================================
pydev_src_dir = os.path.dirname(os.path.dirname(__file__))
_arg_patch = threading.local()
@contextmanager
def skip_subprocess_arg_patch():
_arg_patch.apply_arg_patching = False
try:
yield
finally:
_arg_patch.apply_arg_patching = True
def _get_apply_arg_patching():
return getattr(_arg_patch, 'apply_arg_patching', True)
def _get_setup_updated_with_protocol_and_ppid(setup, is_exec=False):
if setup is None:
setup = {}
setup = setup.copy()
# Discard anything related to the protocol (we'll set the the protocol based on the one
# currently set).
setup.pop(pydevd_constants.ARGUMENT_HTTP_JSON_PROTOCOL, None)
setup.pop(pydevd_constants.ARGUMENT_JSON_PROTOCOL, None)
setup.pop(pydevd_constants.ARGUMENT_QUOTED_LINE_PROTOCOL, None)
if not is_exec:
# i.e.: The ppid for the subprocess is the current pid.
# If it's an exec, keep it what it was.
setup[pydevd_constants.ARGUMENT_PPID] = os.getpid()
protocol = pydevd_constants.get_protocol()
if protocol == pydevd_constants.HTTP_JSON_PROTOCOL:
setup[pydevd_constants.ARGUMENT_HTTP_JSON_PROTOCOL] = True
elif protocol == pydevd_constants.JSON_PROTOCOL:
setup[pydevd_constants.ARGUMENT_JSON_PROTOCOL] = True
elif protocol == pydevd_constants.QUOTED_LINE_PROTOCOL:
setup[pydevd_constants.ARGUMENT_QUOTED_LINE_PROTOCOL] = True
elif protocol == pydevd_constants.HTTP_PROTOCOL:
setup[pydevd_constants.ARGUMENT_HTTP_PROTOCOL] = True
else:
pydev_log.debug('Unexpected protocol: %s', protocol)
return setup
class _LastFutureImportFinder(ast.NodeVisitor):
def __init__(self):
self.last_future_import_found = None
def visit_ImportFrom(self, node):
if node.module == '__future__':
self.last_future_import_found = node
def _get_offset_from_line_col(code, line, col):
offset = 0
for i, line_contents in enumerate(code.splitlines(True)):
if i == line:
offset += col
return offset
else:
offset += len(line_contents)
return -1
def _separate_future_imports(code):
'''
:param code:
The code from where we want to get the __future__ imports (note that it's possible that
there's no such entry).
:return tuple(str, str):
The return is a tuple(future_import, code).
If the future import is not available a return such as ('', code) is given, otherwise, the
future import will end with a ';' (so that it can be put right before the pydevd attach
code).
'''
try:
node = ast.parse(code, '<string>', 'exec')
visitor = _LastFutureImportFinder()
visitor.visit(node)
if visitor.last_future_import_found is None:
return '', code
node = visitor.last_future_import_found
offset = -1
if hasattr(node, 'end_lineno') and hasattr(node, 'end_col_offset'):
# Python 3.8 onwards has these (so, use when possible).
line, col = node.end_lineno, node.end_col_offset
offset = _get_offset_from_line_col(code, line - 1, col) # ast lines are 1-based, make it 0-based.
else:
# end line/col not available, let's just find the offset and then search
# for the alias from there.
line, col = node.lineno, node.col_offset
offset = _get_offset_from_line_col(code, line - 1, col) # ast lines are 1-based, make it 0-based.
if offset >= 0 and node.names:
from_future_import_name = node.names[-1].name
i = code.find(from_future_import_name, offset)
if i < 0:
offset = -1
else:
offset = i + len(from_future_import_name)
if offset >= 0:
for i in range(offset, len(code)):
if code[i] in (' ', '\t', ';', ')', '\n'):
offset += 1
else:
break
future_import = code[:offset]
code_remainder = code[offset:]
# Now, put '\n' lines back into the code remainder (we had to search for
# `\n)`, but in case we just got the `\n`, it should be at the remainder,
# not at the future import.
while future_import.endswith('\n'):
future_import = future_import[:-1]
code_remainder = '\n' + code_remainder
if not future_import.endswith(';'):
future_import += ';'
return future_import, code_remainder
# This shouldn't happen...
pydev_log.info('Unable to find line %s in code:\n%r', line, code)
return '', code
except:
pydev_log.exception('Error getting from __future__ imports from: %r', code)
return '', code
def _get_python_c_args(host, port, code, args, setup):
setup = _get_setup_updated_with_protocol_and_ppid(setup)
# i.e.: We want to make the repr sorted so that it works in tests.
setup_repr = setup if setup is None else (sorted_dict_repr(setup))
future_imports = ''
if '__future__' in code:
# If the code has a __future__ import, we need to be able to strip the __future__
# imports from the code and add them to the start of our code snippet.
future_imports, code = _separate_future_imports(code)
return ("%simport sys; sys.path.insert(0, r'%s'); import pydevd; pydevd.PydevdCustomization.DEFAULT_PROTOCOL=%r; "
"pydevd.settrace(host=%r, port=%s, suspend=False, trace_only_current_thread=False, patch_multiprocessing=True, access_token=%r, client_access_token=%r, __setup_holder__=%s); "
"%s"
) % (
future_imports,
pydev_src_dir,
pydevd_constants.get_protocol(),
host,
port,
setup.get('access-token'),
setup.get('client-access-token'),
setup_repr,
code)
def _get_host_port():
import pydevd
host, port = pydevd.dispatch()
return host, port
def _is_managed_arg(arg):
pydevd_py = _get_str_type_compatible(arg, 'pydevd.py')
if arg.endswith(pydevd_py):
return True
return False
def _on_forked_process(setup_tracing=True):
pydevd_constants.after_fork()
pydev_log.initialize_debug_stream(reinitialize=True)
if setup_tracing:
pydev_log.debug('pydevd on forked process: %s', os.getpid())
import pydevd
pydevd.threadingCurrentThread().__pydevd_main_thread = True
pydevd.settrace_forked(setup_tracing=setup_tracing)
def _on_set_trace_for_new_thread(global_debugger):
if global_debugger is not None:
global_debugger.enable_tracing()
def _get_str_type_compatible(s, args):
'''
This method converts `args` to byte/unicode based on the `s' type.
'''
if isinstance(args, (list, tuple)):
ret = []
for arg in args:
if type(s) == type(arg):
ret.append(arg)
else:
if isinstance(s, bytes):
ret.append(arg.encode('utf-8'))
else:
ret.append(arg.decode('utf-8'))
return ret
else:
if type(s) == type(args):
return args
else:
if isinstance(s, bytes):
return args.encode('utf-8')
else:
return args.decode('utf-8')
#===============================================================================
# Things related to monkey-patching
#===============================================================================
def is_python(path):
single_quote, double_quote = _get_str_type_compatible(path, ["'", '"'])
if path.endswith(single_quote) or path.endswith(double_quote):
path = path[1:len(path) - 1]
filename = os.path.basename(path).lower()
for name in _get_str_type_compatible(filename, ['python', 'jython', 'pypy']):
if filename.find(name) != -1:
return True
return False
class InvalidTypeInArgsException(Exception):
pass
def remove_quotes_from_args(args):
if sys.platform == "win32":
new_args = []
for x in args:
if Path is not None and isinstance(x, Path):
x = str(x)
else:
if not isinstance(x, (bytes, str)):
raise InvalidTypeInArgsException(str(type(x)))
double_quote, two_double_quotes = _get_str_type_compatible(x, ['"', '""'])
if x != two_double_quotes:
if len(x) > 1 and x.startswith(double_quote) and x.endswith(double_quote):
x = x[1:-1]
new_args.append(x)
return new_args
else:
new_args = []
for x in args:
if Path is not None and isinstance(x, Path):
x = x.as_posix()
else:
if not isinstance(x, (bytes, str)):
raise InvalidTypeInArgsException(str(type(x)))
new_args.append(x)
return new_args
def quote_arg_win32(arg):
fix_type = lambda x: _get_str_type_compatible(arg, x)
# See if we need to quote at all - empty strings need quoting, as do strings
# with whitespace or quotes in them. Backslashes do not need quoting.
if arg and not set(arg).intersection(fix_type(' "\t\n\v')):
return arg
# Per https://docs.microsoft.com/en-us/windows/desktop/api/shellapi/nf-shellapi-commandlinetoargvw,
# the standard way to interpret arguments in double quotes is as follows:
#
# 2N backslashes followed by a quotation mark produce N backslashes followed by
# begin/end quote. This does not become part of the parsed argument, but toggles
# the "in quotes" mode.
#
# 2N+1 backslashes followed by a quotation mark again produce N backslashes followed
# by a quotation mark literal ("). This does not toggle the "in quotes" mode.
#
# N backslashes not followed by a quotation mark simply produce N backslashes.
#
# This code needs to do the reverse transformation, thus:
#
# N backslashes followed by " produce 2N+1 backslashes followed by "
#
# N backslashes at the end (i.e. where the closing " goes) produce 2N backslashes.
#
# N backslashes in any other position remain as is.
arg = re.sub(fix_type(r'(\\*)\"'), fix_type(r'\1\1\\"'), arg)
arg = re.sub(fix_type(r'(\\*)$'), fix_type(r'\1\1'), arg)
return fix_type('"') + arg + fix_type('"')
def quote_args(args):
if sys.platform == "win32":
return list(map(quote_arg_win32, args))
else:
return args
def patch_args(args, is_exec=False):
'''
:param list args:
Arguments to patch.
:param bool is_exec:
If it's an exec, the current process will be replaced (this means we have
to keep the same ppid).
'''
try:
pydev_log.debug("Patching args: %s", args)
original_args = args
try:
unquoted_args = remove_quotes_from_args(args)
except InvalidTypeInArgsException as e:
pydev_log.info('Unable to monkey-patch subprocess arguments because a type found in the args is invalid: %s', e)
return original_args
# Internally we should reference original_args (if we want to return them) or unquoted_args
# to add to the list which will be then quoted in the end.
del args
from pydevd import SetupHolder
if not unquoted_args:
return original_args
if not is_python(unquoted_args[0]):
pydev_log.debug("Process is not python, returning.")
return original_args
# Note: we create a copy as string to help with analyzing the arguments, but
# the final list should have items from the unquoted_args as they were initially.
args_as_str = _get_str_type_compatible('', unquoted_args)
params_with_value_in_separate_arg = (
'--check-hash-based-pycs',
'--jit' # pypy option
)
# All short switches may be combined together. The ones below require a value and the
# value itself may be embedded in the arg.
#
# i.e.: Python accepts things as:
#
# python -OQold -qmtest
#
# Which is the same as:
#
# python -O -Q old -q -m test
#
# or even:
#
# python -OQold "-vcimport sys;print(sys)"
#
# Which is the same as:
#
# python -O -Q old -v -c "import sys;print(sys)"
params_with_combinable_arg = set(('W', 'X', 'Q', 'c', 'm'))
module_name = None
before_module_flag = ''
module_name_i_start = -1
module_name_i_end = -1
code = None
code_i = -1
code_i_end = -1
code_flag = ''
filename = None
filename_i = -1
ignore_next = True # start ignoring the first (the first entry is the python executable)
for i, arg_as_str in enumerate(args_as_str):
if ignore_next:
ignore_next = False
continue
if arg_as_str.startswith('-'):
if arg_as_str == '-':
# Contents will be read from the stdin. This is not currently handled.
pydev_log.debug('Unable to fix arguments to attach debugger on subprocess when reading from stdin ("python ... -").')
return original_args
if arg_as_str.startswith(params_with_value_in_separate_arg):
if arg_as_str in params_with_value_in_separate_arg:
ignore_next = True
continue
break_out = False
for j, c in enumerate(arg_as_str):
# i.e.: Python supports -X faulthandler as well as -Xfaulthandler
# (in one case we have to ignore the next and in the other we don't
# have to ignore it).
if c in params_with_combinable_arg:
remainder = arg_as_str[j + 1:]
if not remainder:
ignore_next = True
if c == 'm':
# i.e.: Something as
# python -qm test
# python -m test
# python -qmtest
before_module_flag = arg_as_str[:j] # before_module_flag would then be "-q"
if before_module_flag == '-':
before_module_flag = ''
module_name_i_start = i
if not remainder:
module_name = unquoted_args[i + 1]
module_name_i_end = i + 1
else:
# i.e.: python -qmtest should provide 'test' as the module_name
module_name = unquoted_args[i][j + 1:]
module_name_i_end = module_name_i_start
break_out = True
break
elif c == 'c':
# i.e.: Something as
# python -qc "import sys"
# python -c "import sys"
# python "-qcimport sys"
code_flag = arg_as_str[:j + 1] # code_flag would then be "-qc"
if not remainder:
# arg_as_str is something as "-qc", "import sys"
code = unquoted_args[i + 1]
code_i_end = i + 2
else:
# if arg_as_str is something as "-qcimport sys"
code = remainder # code would be "import sys"
code_i_end = i + 1
code_i = i
break_out = True
break
else:
break
if break_out:
break
else:
# It doesn't start with '-' and we didn't ignore this entry:
# this means that this is the file to be executed.
filename = unquoted_args[i]
# Note that the filename is not validated here.
# There are cases where even a .exe is valid (xonsh.exe):
# https://github.com/microsoft/debugpy/issues/945
# So, we should support whatever runpy.run_path
# supports in this case.
filename_i = i
if _is_managed_arg(filename): # no need to add pydevd twice
pydev_log.debug('Skipped monkey-patching as pydevd.py is in args already.')
return original_args
break
else:
# We didn't find the filename (something is unexpected).
pydev_log.debug('Unable to fix arguments to attach debugger on subprocess (filename not found).')
return original_args
if code_i != -1:
host, port = _get_host_port()
if port is not None:
new_args = []
new_args.extend(unquoted_args[:code_i])
new_args.append(code_flag)
new_args.append(_get_python_c_args(host, port, code, unquoted_args, SetupHolder.setup))
new_args.extend(unquoted_args[code_i_end:])
return quote_args(new_args)
first_non_vm_index = max(filename_i, module_name_i_start)
if first_non_vm_index == -1:
pydev_log.debug('Unable to fix arguments to attach debugger on subprocess (could not resolve filename nor module name).')
return original_args
# Original args should be something as:
# ['X:\\pysrc\\pydevd.py', '--multiprocess', '--print-in-debugger-startup',
# '--vm_type', 'python', '--client', '127.0.0.1', '--port', '56352', '--file', 'x:\\snippet1.py']
from _pydevd_bundle.pydevd_command_line_handling import setup_to_argv
new_args = []
new_args.extend(unquoted_args[:first_non_vm_index])
if before_module_flag:
new_args.append(before_module_flag)
add_module_at = len(new_args) + 1
new_args.extend(setup_to_argv(
_get_setup_updated_with_protocol_and_ppid(SetupHolder.setup, is_exec=is_exec),
skip_names=set(('module', 'cmd-line'))
))
new_args.append('--file')
if module_name is not None:
assert module_name_i_start != -1
assert module_name_i_end != -1
# Always after 'pydevd' (i.e.: pydevd "--module" --multiprocess ...)
new_args.insert(add_module_at, '--module')
new_args.append(module_name)
new_args.extend(unquoted_args[module_name_i_end + 1:])
elif filename is not None:
assert filename_i != -1
new_args.append(filename)
new_args.extend(unquoted_args[filename_i + 1:])
else:
raise AssertionError('Internal error (unexpected condition)')
return quote_args(new_args)
except:
pydev_log.exception('Error patching args (debugger not attached to subprocess).')
return original_args
def str_to_args_windows(args):
# See https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments.
#
# Implemetation ported from DebugPlugin.parseArgumentsWindows:
# https://github.com/eclipse/eclipse.platform.debug/blob/master/org.eclipse.debug.core/core/org/eclipse/debug/core/DebugPlugin.java
result = []
DEFAULT = 0
ARG = 1
IN_DOUBLE_QUOTE = 2
state = DEFAULT
backslashes = 0
buf = ''
args_len = len(args)
for i in range(args_len):
ch = args[i]
if (ch == '\\'):
backslashes += 1
continue
elif (backslashes != 0):
if ch == '"':
while backslashes >= 2:
backslashes -= 2
buf += '\\'
if (backslashes == 1):
if (state == DEFAULT):
state = ARG
buf += '"'
backslashes = 0
continue
# else fall through to switch
else:
# false alarm, treat passed backslashes literally...
if (state == DEFAULT):
state = ARG
while backslashes > 0:
backslashes -= 1
buf += '\\'
# fall through to switch
if ch in (' ', '\t'):
if (state == DEFAULT):
# skip
continue
elif (state == ARG):
state = DEFAULT
result.append(buf)
buf = ''
continue
if state in (DEFAULT, ARG):
if ch == '"':
state = IN_DOUBLE_QUOTE
else:
state = ARG
buf += ch
elif state == IN_DOUBLE_QUOTE:
if ch == '"':
if (i + 1 < args_len and args[i + 1] == '"'):
# Undocumented feature in Windows:
# Two consecutive double quotes inside a double-quoted argument are interpreted as
# a single double quote.
buf += '"'
i += 1
else:
state = ARG
else:
buf += ch
else:
raise RuntimeError('Illegal condition')
if len(buf) > 0 or state != DEFAULT:
result.append(buf)
return result
def patch_arg_str_win(arg_str):
args = str_to_args_windows(arg_str)
# Fix https://youtrack.jetbrains.com/issue/PY-9767 (args may be empty)
if not args or not is_python(args[0]):
return arg_str
arg_str = ' '.join(patch_args(args))
pydev_log.debug("New args: %s", arg_str)
return arg_str
def monkey_patch_module(module, funcname, create_func):
if hasattr(module, funcname):
original_name = 'original_' + funcname
if not hasattr(module, original_name):
setattr(module, original_name, getattr(module, funcname))
setattr(module, funcname, create_func(original_name))
def monkey_patch_os(funcname, create_func):
monkey_patch_module(os, funcname, create_func)
def warn_multiproc():
pass # TODO: Provide logging as messages to the IDE.
# pydev_log.error_once(
# "pydev debugger: New process is launching (breakpoints won't work in the new process).\n"
# "pydev debugger: To debug that process please enable 'Attach to subprocess automatically while debugging?' option in the debugger settings.\n")
#
def create_warn_multiproc(original_name):
def new_warn_multiproc(*args, **kwargs):
import os
warn_multiproc()
return getattr(os, original_name)(*args, **kwargs)
return new_warn_multiproc
def create_execl(original_name):
def new_execl(path, *args):
"""
os.execl(path, arg0, arg1, ...)
os.execle(path, arg0, arg1, ..., env)
os.execlp(file, arg0, arg1, ...)
os.execlpe(file, arg0, arg1, ..., env)
"""
if _get_apply_arg_patching():
args = patch_args(args, is_exec=True)
send_process_created_message()
send_process_about_to_be_replaced()
return getattr(os, original_name)(path, *args)
return new_execl
def create_execv(original_name):
def new_execv(path, args):
"""
os.execv(path, args)
os.execvp(file, args)
"""
if _get_apply_arg_patching():
args = patch_args(args, is_exec=True)
send_process_created_message()
send_process_about_to_be_replaced()
return getattr(os, original_name)(path, args)
return new_execv
def create_execve(original_name):
"""
os.execve(path, args, env)
os.execvpe(file, args, env)
"""
def new_execve(path, args, env):
if _get_apply_arg_patching():
args = patch_args(args, is_exec=True)
send_process_created_message()
send_process_about_to_be_replaced()
return getattr(os, original_name)(path, args, env)
return new_execve
def create_spawnl(original_name):
def new_spawnl(mode, path, *args):
"""
os.spawnl(mode, path, arg0, arg1, ...)
os.spawnlp(mode, file, arg0, arg1, ...)
"""
if _get_apply_arg_patching():
args = patch_args(args)
send_process_created_message()
return getattr(os, original_name)(mode, path, *args)
return new_spawnl
def create_spawnv(original_name):
def new_spawnv(mode, path, args):
"""
os.spawnv(mode, path, args)
os.spawnvp(mode, file, args)
"""
if _get_apply_arg_patching():
args = patch_args(args)
send_process_created_message()
return getattr(os, original_name)(mode, path, args)
return new_spawnv
def create_spawnve(original_name):
"""
os.spawnve(mode, path, args, env)
os.spawnvpe(mode, file, args, env)
"""
def new_spawnve(mode, path, args, env):
if _get_apply_arg_patching():
args = patch_args(args)
send_process_created_message()
return getattr(os, original_name)(mode, path, args, env)
return new_spawnve
def create_posix_spawn(original_name):
"""
os.posix_spawn(executable, args, env, **kwargs)
"""
def new_posix_spawn(executable, args, env, **kwargs):
if _get_apply_arg_patching():
args = patch_args(args)
send_process_created_message()
return getattr(os, original_name)(executable, args, env, **kwargs)
return new_posix_spawn
def create_fork_exec(original_name):
"""
_posixsubprocess.fork_exec(args, executable_list, close_fds, ... (13 more))
"""
def new_fork_exec(args, *other_args):
import _posixsubprocess # @UnresolvedImport
if _get_apply_arg_patching():
args = patch_args(args)
send_process_created_message()
return getattr(_posixsubprocess, original_name)(args, *other_args)
return new_fork_exec
def create_warn_fork_exec(original_name):
"""
_posixsubprocess.fork_exec(args, executable_list, close_fds, ... (13 more))
"""
def new_warn_fork_exec(*args):
try:
import _posixsubprocess
warn_multiproc()
return getattr(_posixsubprocess, original_name)(*args)
except:
pass
return new_warn_fork_exec
def create_CreateProcess(original_name):
"""
CreateProcess(*args, **kwargs)
"""
def new_CreateProcess(app_name, cmd_line, *args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
if _get_apply_arg_patching():
cmd_line = patch_arg_str_win(cmd_line)
send_process_created_message()
return getattr(_subprocess, original_name)(app_name, cmd_line, *args)
return new_CreateProcess
def create_CreateProcessWarnMultiproc(original_name):
"""
CreateProcess(*args, **kwargs)
"""
def new_CreateProcess(*args):
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
warn_multiproc()
return getattr(_subprocess, original_name)(*args)
return new_CreateProcess
def create_fork(original_name):
def new_fork():
# A simple fork will result in a new python process
is_new_python_process = True
frame = sys._getframe()
apply_arg_patch = _get_apply_arg_patching()
is_subprocess_fork = False
while frame is not None:
if frame.f_code.co_name == '_execute_child' and 'subprocess' in frame.f_code.co_filename:
is_subprocess_fork = True
# If we're actually in subprocess.Popen creating a child, it may
# result in something which is not a Python process, (so, we
# don't want to connect with it in the forked version).
executable = frame.f_locals.get('executable')
if executable is not None:
is_new_python_process = False
if is_python(executable):
is_new_python_process = True
break
frame = frame.f_back
frame = None # Just make sure we don't hold on to it.
protocol = pydevd_constants.get_protocol()
child_process = getattr(os, original_name)() # fork
if not child_process:
if is_new_python_process:
PydevdCustomization.DEFAULT_PROTOCOL = protocol
_on_forked_process(setup_tracing=apply_arg_patch and not is_subprocess_fork)
else:
if is_new_python_process:
send_process_created_message()
return child_process
return new_fork
def send_process_created_message():
py_db = get_global_debugger()
if py_db is not None:
py_db.send_process_created_message()
def send_process_about_to_be_replaced():
py_db = get_global_debugger()
if py_db is not None:
py_db.send_process_about_to_be_replaced()
def patch_new_process_functions():
# os.execl(path, arg0, arg1, ...)
# os.execle(path, arg0, arg1, ..., env)
# os.execlp(file, arg0, arg1, ...)
# os.execlpe(file, arg0, arg1, ..., env)
# os.execv(path, args)
# os.execve(path, args, env)
# os.execvp(file, args)
# os.execvpe(file, args, env)
monkey_patch_os('execl', create_execl)
monkey_patch_os('execle', create_execl)
monkey_patch_os('execlp', create_execl)
monkey_patch_os('execlpe', create_execl)
monkey_patch_os('execv', create_execv)
monkey_patch_os('execve', create_execve)
monkey_patch_os('execvp', create_execv)
monkey_patch_os('execvpe', create_execve)
# os.spawnl(mode, path, ...)
# os.spawnle(mode, path, ..., env)
# os.spawnlp(mode, file, ...)
# os.spawnlpe(mode, file, ..., env)
# os.spawnv(mode, path, args)
# os.spawnve(mode, path, args, env)
# os.spawnvp(mode, file, args)
# os.spawnvpe(mode, file, args, env)
monkey_patch_os('spawnl', create_spawnl)
monkey_patch_os('spawnle', create_spawnl)
monkey_patch_os('spawnlp', create_spawnl)
monkey_patch_os('spawnlpe', create_spawnl)
monkey_patch_os('spawnv', create_spawnv)
monkey_patch_os('spawnve', create_spawnve)
monkey_patch_os('spawnvp', create_spawnv)
monkey_patch_os('spawnvpe', create_spawnve)
monkey_patch_os('posix_spawn', create_posix_spawn)
if not IS_JYTHON:
if not IS_WINDOWS:
monkey_patch_os('fork', create_fork)
try:
import _posixsubprocess
monkey_patch_module(_posixsubprocess, 'fork_exec', create_fork_exec)
except ImportError:
pass
else:
# Windows
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
monkey_patch_module(_subprocess, 'CreateProcess', create_CreateProcess)
def patch_new_process_functions_with_warning():
monkey_patch_os('execl', create_warn_multiproc)
monkey_patch_os('execle', create_warn_multiproc)
monkey_patch_os('execlp', create_warn_multiproc)
monkey_patch_os('execlpe', create_warn_multiproc)
monkey_patch_os('execv', create_warn_multiproc)
monkey_patch_os('execve', create_warn_multiproc)
monkey_patch_os('execvp', create_warn_multiproc)
monkey_patch_os('execvpe', create_warn_multiproc)
monkey_patch_os('spawnl', create_warn_multiproc)
monkey_patch_os('spawnle', create_warn_multiproc)
monkey_patch_os('spawnlp', create_warn_multiproc)
monkey_patch_os('spawnlpe', create_warn_multiproc)
monkey_patch_os('spawnv', create_warn_multiproc)
monkey_patch_os('spawnve', create_warn_multiproc)
monkey_patch_os('spawnvp', create_warn_multiproc)
monkey_patch_os('spawnvpe', create_warn_multiproc)
monkey_patch_os('posix_spawn', create_warn_multiproc)
if not IS_JYTHON:
if not IS_WINDOWS:
monkey_patch_os('fork', create_warn_multiproc)
try:
import _posixsubprocess
monkey_patch_module(_posixsubprocess, 'fork_exec', create_warn_fork_exec)
except ImportError:
pass
else:
# Windows
try:
import _subprocess
except ImportError:
import _winapi as _subprocess
monkey_patch_module(_subprocess, 'CreateProcess', create_CreateProcessWarnMultiproc)
class _NewThreadStartupWithTrace:
def __init__(self, original_func, args, kwargs):
self.original_func = original_func
self.args = args
self.kwargs = kwargs
def __call__(self):
# We monkey-patch the thread creation so that this function is called in the new thread. At this point
# we notify of its creation and start tracing it.
py_db = get_global_debugger()
thread_id = None
if py_db is not None:
# Note: if this is a thread from threading.py, we're too early in the boostrap process (because we mocked
# the start_new_thread internal machinery and thread._bootstrap has not finished), so, the code below needs
# to make sure that we use the current thread bound to the original function and not use
# threading.current_thread() unless we're sure it's a dummy thread.
t = getattr(self.original_func, '__self__', getattr(self.original_func, 'im_self', None))
if not isinstance(t, threading.Thread):
# This is not a threading.Thread but a Dummy thread (so, get it as a dummy thread using
# currentThread).
t = threading.current_thread()
if not getattr(t, 'is_pydev_daemon_thread', False):
thread_id = get_current_thread_id(t)
py_db.notify_thread_created(thread_id, t)
_on_set_trace_for_new_thread(py_db)
if getattr(py_db, 'thread_analyser', None) is not None:
try:
from _pydevd_bundle.pydevd_concurrency_analyser.pydevd_concurrency_logger import log_new_thread
log_new_thread(py_db, t)
except:
sys.stderr.write("Failed to detect new thread for visualization")
try:
ret = self.original_func(*self.args, **self.kwargs)
finally:
if thread_id is not None:
if py_db is not None:
# At thread shutdown we only have pydevd-related code running (which shouldn't
# be tracked).
py_db.disable_tracing()
py_db.notify_thread_not_alive(thread_id)
return ret
class _NewThreadStartupWithoutTrace:
def __init__(self, original_func, args, kwargs):
self.original_func = original_func
self.args = args
self.kwargs = kwargs
def __call__(self):
return self.original_func(*self.args, **self.kwargs)
_UseNewThreadStartup = _NewThreadStartupWithTrace
def _get_threading_modules_to_patch():
threading_modules_to_patch = []
try:
import thread as _thread
except:
import _thread
threading_modules_to_patch.append(_thread)
threading_modules_to_patch.append(threading)
return threading_modules_to_patch
threading_modules_to_patch = _get_threading_modules_to_patch()
def patch_thread_module(thread_module):
if getattr(thread_module, '_original_start_new_thread', None) is None:
if thread_module is threading:
if not hasattr(thread_module, '_start_new_thread'):
return # Jython doesn't have it.
_original_start_new_thread = thread_module._original_start_new_thread = thread_module._start_new_thread
else:
_original_start_new_thread = thread_module._original_start_new_thread = thread_module.start_new_thread
else:
_original_start_new_thread = thread_module._original_start_new_thread
class ClassWithPydevStartNewThread:
def pydev_start_new_thread(self, function, args=(), kwargs={}):
'''
We need to replace the original thread_module.start_new_thread with this function so that threads started
through it and not through the threading module are properly traced.
'''
return _original_start_new_thread(_UseNewThreadStartup(function, args, kwargs), ())
# This is a hack for the situation where the thread_module.start_new_thread is declared inside a class, such as the one below
# class F(object):
# start_new_thread = thread_module.start_new_thread
#
# def start_it(self):
# self.start_new_thread(self.function, args, kwargs)
# So, if it's an already bound method, calling self.start_new_thread won't really receive a different 'self' -- it
# does work in the default case because in builtins self isn't passed either.
pydev_start_new_thread = ClassWithPydevStartNewThread().pydev_start_new_thread
try:
# We need to replace the original thread_module.start_new_thread with this function so that threads started through
# it and not through the threading module are properly traced.
if thread_module is threading:
thread_module._start_new_thread = pydev_start_new_thread
else:
thread_module.start_new_thread = pydev_start_new_thread
thread_module.start_new = pydev_start_new_thread
except:
pass
def patch_thread_modules():
for t in threading_modules_to_patch:
patch_thread_module(t)
def undo_patch_thread_modules():
for t in threading_modules_to_patch:
try:
t.start_new_thread = t._original_start_new_thread
except:
pass
try:
t.start_new = t._original_start_new_thread
except:
pass
try:
t._start_new_thread = t._original_start_new_thread
except:
pass
def disable_trace_thread_modules():
'''
Can be used to temporarily stop tracing threads created with thread.start_new_thread.
'''
global _UseNewThreadStartup
_UseNewThreadStartup = _NewThreadStartupWithoutTrace
def enable_trace_thread_modules():
'''
Can be used to start tracing threads created with thread.start_new_thread again.
'''
global _UseNewThreadStartup
_UseNewThreadStartup = _NewThreadStartupWithTrace
def get_original_start_new_thread(threading_module):
try:
return threading_module._original_start_new_thread
except:
return threading_module.start_new_thread
| 40,358 | Python | 33.14467 | 187 | 0.567199 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console.py | import sys
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface
import traceback
# Uncomment to force PyDev standard shell.
# raise ImportError()
from _pydev_bundle.pydev_ipython_console_011 import get_pydev_frontend
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, host, client_port, main_thread, show_banner=True, connect_status_queue=None):
BaseInterpreterInterface.__init__(self, main_thread, connect_status_queue)
self.client_port = client_port
self.host = host
self.interpreter = get_pydev_frontend(host, client_port)
self._input_error_printed = False
self.notification_succeeded = False
self.notification_tries = 0
self.notification_max_tries = 3
self.show_banner = show_banner
self.notify_about_magic()
def get_greeting_msg(self):
if self.show_banner:
self.interpreter.show_banner()
return self.interpreter.get_greeting_msg()
def do_add_exec(self, code_fragment):
self.notify_about_magic()
if code_fragment.text.rstrip().endswith('??'):
print('IPython-->')
try:
res = bool(self.interpreter.add_exec(code_fragment.text))
finally:
if code_fragment.text.rstrip().endswith('??'):
print('<--IPython')
return res
def get_namespace(self):
return self.interpreter.get_namespace()
def getCompletions(self, text, act_tok):
return self.interpreter.getCompletions(text, act_tok)
def close(self):
sys.exit(0)
def notify_about_magic(self):
if not self.notification_succeeded:
self.notification_tries += 1
if self.notification_tries > self.notification_max_tries:
return
completions = self.getCompletions("%", "%")
magic_commands = [x[0] for x in completions]
server = self.get_server()
if server is not None:
try:
server.NotifyAboutMagic(magic_commands, self.interpreter.is_automagic())
self.notification_succeeded = True
except:
self.notification_succeeded = False
def get_ipython_hidden_vars_dict(self):
try:
if hasattr(self.interpreter, 'ipython') and hasattr(self.interpreter.ipython, 'user_ns_hidden'):
user_ns_hidden = self.interpreter.ipython.user_ns_hidden
if isinstance(user_ns_hidden, dict):
# Since IPython 2 dict `user_ns_hidden` contains hidden variables and values
user_hidden_dict = user_ns_hidden.copy()
else:
# In IPython 1.x `user_ns_hidden` used to be a set with names of hidden variables
user_hidden_dict = dict([(key, val) for key, val in self.interpreter.ipython.user_ns.items()
if key in user_ns_hidden])
# while `_`, `__` and `___` were not initialized, they are not presented in `user_ns_hidden`
user_hidden_dict.setdefault('_', '')
user_hidden_dict.setdefault('__', '')
user_hidden_dict.setdefault('___', '')
return user_hidden_dict
except:
# Getting IPython variables shouldn't break loading frame variables
traceback.print_exc()
| 3,821 | Python | 38 | 120 | 0.558754 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/_pydev_getopt.py |
#=======================================================================================================================
# getopt code copied since gnu_getopt is not available on jython 2.1
#=======================================================================================================================
class GetoptError(Exception):
opt = ''
msg = ''
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
Exception.__init__(self, msg, opt)
def __str__(self):
return self.msg
def gnu_getopt(args, shortopts, longopts=[]):
"""getopt(args, options[, long_options]) -> opts, args
This function works like getopt(), except that GNU style scanning
mode is used by default. This means that option and non-option
arguments may be intermixed. The getopt() function stops
processing options as soon as a non-option argument is
encountered.
If the first character of the option string is `+', or if the
environment variable POSIXLY_CORRECT is set, then option
processing stops as soon as a non-option argument is encountered.
"""
opts = []
prog_args = []
if type('') == type(longopts):
longopts = [longopts]
else:
longopts = list(longopts)
# Allow options after non-option arguments?
all_options_first = False
if shortopts.startswith('+'):
shortopts = shortopts[1:]
all_options_first = True
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
opts, args = do_longs(opts, args[0][2:], longopts, args[1:])
elif args[0][:1] == '-':
opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])
else:
if all_options_first:
prog_args += args
break
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
def do_longs(opts, opt, longopts, args):
try:
i = opt.index('=')
except ValueError:
optarg = None
else:
opt, optarg = opt[:i], opt[i + 1:]
has_arg, opt = long_has_args(opt, longopts)
if has_arg:
if optarg is None:
if not args:
raise GetoptError('option --%s requires argument' % opt, opt)
optarg, args = args[0], args[1:]
elif optarg:
raise GetoptError('option --%s must not have an argument' % opt, opt)
opts.append(('--' + opt, optarg or ''))
return opts, args
# Return:
# has_arg?
# full option name
def long_has_args(opt, longopts):
possibilities = [o for o in longopts if o.startswith(opt)]
if not possibilities:
raise GetoptError('option --%s not recognized' % opt, opt)
# Is there an exact match?
if opt in possibilities:
return False, opt
elif opt + '=' in possibilities:
return True, opt
# No exact match, so better be unique.
if len(possibilities) > 1:
# XXX since possibilities contains all valid continuations, might be
# nice to work them into the error msg
raise GetoptError('option --%s not a unique prefix' % opt, opt)
assert len(possibilities) == 1
unique_match = possibilities[0]
has_arg = unique_match.endswith('=')
if has_arg:
unique_match = unique_match[:-1]
return has_arg, unique_match
def do_shorts(opts, optstring, shortopts, args):
while optstring != '':
opt, optstring = optstring[0], optstring[1:]
if short_has_arg(opt, shortopts):
if optstring == '':
if not args:
raise GetoptError('option -%s requires argument' % opt,
opt)
optstring, args = args[0], args[1:]
optarg, optstring = optstring, ''
else:
optarg = ''
opts.append(('-' + opt, optarg))
return opts, args
def short_has_arg(opt, shortopts):
for i in range(len(shortopts)):
if opt == shortopts[i] != ':':
return shortopts.startswith(':', i + 1)
raise GetoptError('option -%s not recognized' % opt, opt)
#=======================================================================================================================
# End getopt code
#=======================================================================================================================
| 4,458 | Python | 33.038168 | 120 | 0.506729 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/pydev_ipython_console_011.py | # TODO that would make IPython integration better
# - show output other times then when enter was pressed
# - support proper exit to allow IPython to cleanup (e.g. temp files created with %edit)
# - support Ctrl-D (Ctrl-Z on Windows)
# - use IPython (numbered) prompts in PyDev
# - better integration of IPython and PyDev completions
# - some of the semantics on handling the code completion are not correct:
# eg: Start a line with % and then type c should give %cd as a completion by it doesn't
# however type %c and request completions and %cd is given as an option
# eg: Completing a magic when user typed it without the leading % causes the % to be inserted
# to the left of what should be the first colon.
"""Interface to TerminalInteractiveShell for PyDev Interactive Console frontend
for IPython 0.11 to 1.0+.
"""
from __future__ import print_function
import os
import sys
import codeop
import traceback
from IPython.core.error import UsageError
from IPython.core.completer import IPCompleter
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.core.usage import default_banner_parts
from IPython.utils.strdispatch import StrDispatch
import IPython.core.release as IPythonRelease
from IPython.terminal.interactiveshell import TerminalInteractiveShell
try:
from traitlets import CBool, Unicode
except ImportError:
from IPython.utils.traitlets import CBool, Unicode
from IPython.core import release
from _pydev_bundle.pydev_imports import xmlrpclib
default_pydev_banner_parts = default_banner_parts
default_pydev_banner = ''.join(default_pydev_banner_parts)
def show_in_pager(self, strng, *args, **kwargs):
""" Run a string through pager """
# On PyDev we just output the string, there are scroll bars in the console
# to handle "paging". This is the same behaviour as when TERM==dump (see
# page.py)
# for compatibility with mime-bundle form:
if isinstance(strng, dict):
strng = strng.get('text/plain', strng)
print(strng)
def create_editor_hook(pydev_host, pydev_client_port):
def call_editor(filename, line=0, wait=True):
""" Open an editor in PyDev """
if line is None:
line = 0
# Make sure to send an absolution path because unlike most editor hooks
# we don't launch a process. This is more like what happens in the zmqshell
filename = os.path.abspath(filename)
# import sys
# sys.__stderr__.write('Calling editor at: %s:%s\n' % (pydev_host, pydev_client_port))
# Tell PyDev to open the editor
server = xmlrpclib.Server('http://%s:%s' % (pydev_host, pydev_client_port))
server.IPythonEditor(filename, str(line))
if wait:
input("Press Enter when done editing:")
return call_editor
class PyDevIPCompleter(IPCompleter):
def __init__(self, *args, **kwargs):
""" Create a Completer that reuses the advanced completion support of PyDev
in addition to the completion support provided by IPython """
IPCompleter.__init__(self, *args, **kwargs)
# Use PyDev for python matches, see getCompletions below
if self.python_matches in self.matchers:
# `self.python_matches` matches attributes or global python names
self.matchers.remove(self.python_matches)
class PyDevIPCompleter6(IPCompleter):
def __init__(self, *args, **kwargs):
""" Create a Completer that reuses the advanced completion support of PyDev
in addition to the completion support provided by IPython """
IPCompleter.__init__(self, *args, **kwargs)
@property
def matchers(self):
"""All active matcher routines for completion"""
# To remove python_matches we now have to override it as it's now a property in the superclass.
return [
self.file_matches,
self.magic_matches,
self.python_func_kw_matches,
self.dict_key_matches,
]
@matchers.setter
def matchers(self, value):
# To stop the init in IPCompleter raising an AttributeError we now have to specify a setter as it's now a property in the superclass.
return
class PyDevTerminalInteractiveShell(TerminalInteractiveShell):
banner1 = Unicode(default_pydev_banner, config=True,
help="""The part of the banner to be printed before the profile"""
)
# TODO term_title: (can PyDev's title be changed???, see terminal.py for where to inject code, in particular set_term_title as used by %cd)
# for now, just disable term_title
term_title = CBool(False)
# Note in version 0.11 there is no guard in the IPython code about displaying a
# warning, so with 0.11 you get:
# WARNING: Readline services not available or not loaded.
# WARNING: The auto-indent feature requires the readline library
# Disable readline, readline type code is all handled by PyDev (on Java side)
readline_use = CBool(False)
# autoindent has no meaning in PyDev (PyDev always handles that on the Java side),
# and attempting to enable it will print a warning in the absence of readline.
autoindent = CBool(False)
# Force console to not give warning about color scheme choice and default to NoColor.
# TODO It would be nice to enable colors in PyDev but:
# - The PyDev Console (Eclipse Console) does not support the full range of colors, so the
# effect isn't as nice anyway at the command line
# - If done, the color scheme should default to LightBG, but actually be dependent on
# any settings the user has (such as if a dark theme is in use, then Linux is probably
# a better theme).
colors_force = CBool(True)
colors = Unicode("NoColor")
# Since IPython 5 the terminal interface is not compatible with Emacs `inferior-shell` and
# the `simple_prompt` flag is needed
simple_prompt = CBool(True)
# In the PyDev Console, GUI control is done via hookable XML-RPC server
@staticmethod
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
"""
# Deferred import
from pydev_ipython.inputhook import enable_gui as real_enable_gui
try:
return real_enable_gui(gui, app)
except ValueError as e:
raise UsageError("%s" % e)
#-------------------------------------------------------------------------
# Things related to hooks
#-------------------------------------------------------------------------
def init_history(self):
# Disable history so that we don't have an additional thread for that
# (and we don't use the history anyways).
self.config.HistoryManager.enabled = False
super(PyDevTerminalInteractiveShell, self).init_history()
def init_hooks(self):
super(PyDevTerminalInteractiveShell, self).init_hooks()
self.set_hook('show_in_pager', show_in_pager)
#-------------------------------------------------------------------------
# Things related to exceptions
#-------------------------------------------------------------------------
def showtraceback(self, exc_tuple=None, *args, **kwargs):
# IPython does a lot of clever stuff with Exceptions. However mostly
# it is related to IPython running in a terminal instead of an IDE.
# (e.g. it prints out snippets of code around the stack trace)
# PyDev does a lot of clever stuff too, so leave exception handling
# with default print_exc that PyDev can parse and do its clever stuff
# with (e.g. it puts links back to the original source code)
try:
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
except ValueError:
return
if tb is not None:
traceback.print_exception(etype, value, tb)
#-------------------------------------------------------------------------
# Things related to text completion
#-------------------------------------------------------------------------
# The way to construct an IPCompleter changed in most versions,
# so we have a custom, per version implementation of the construction
def _new_completer_100(self):
completer = PyDevIPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
alias_table=self.alias_manager.alias_table,
use_readline=self.has_readline,
parent=self,
)
return completer
def _new_completer_234(self):
# correct for IPython versions 2.x, 3.x, 4.x
completer = PyDevIPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
use_readline=self.has_readline,
parent=self,
)
return completer
def _new_completer_500(self):
completer = PyDevIPCompleter(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
use_readline=False,
parent=self
)
return completer
def _new_completer_600(self):
completer = PyDevIPCompleter6(shell=self,
namespace=self.user_ns,
global_namespace=self.user_global_ns,
use_readline=False,
parent=self
)
return completer
def add_completer_hooks(self):
from IPython.core.completerlib import module_completer, magic_run_completer, cd_completer
try:
from IPython.core.completerlib import reset_completer
except ImportError:
# reset_completer was added for rel-0.13
reset_completer = None
self.configurables.append(self.Completer)
# Add custom completers to the basic ones built into IPCompleter
sdisp = self.strdispatchers.get('complete_command', StrDispatch())
self.strdispatchers['complete_command'] = sdisp
self.Completer.custom_completers = sdisp
self.set_hook('complete_command', module_completer, str_key='import')
self.set_hook('complete_command', module_completer, str_key='from')
self.set_hook('complete_command', magic_run_completer, str_key='%run')
self.set_hook('complete_command', cd_completer, str_key='%cd')
if reset_completer:
self.set_hook('complete_command', reset_completer, str_key='%reset')
def init_completer(self):
"""Initialize the completion machinery.
This creates a completer that provides the completions that are
IPython specific. We use this to supplement PyDev's core code
completions.
"""
# PyDev uses its own completer and custom hooks so that it uses
# most completions from PyDev's core completer which provides
# extra information.
# See getCompletions for where the two sets of results are merged
if IPythonRelease._version_major >= 6:
self.Completer = self._new_completer_600()
elif IPythonRelease._version_major >= 5:
self.Completer = self._new_completer_500()
elif IPythonRelease._version_major >= 2:
self.Completer = self._new_completer_234()
elif IPythonRelease._version_major >= 1:
self.Completer = self._new_completer_100()
if hasattr(self.Completer, 'use_jedi'):
self.Completer.use_jedi = False
self.add_completer_hooks()
if IPythonRelease._version_major <= 3:
# Only configure readline if we truly are using readline. IPython can
# do tab-completion over the network, in GUIs, etc, where readline
# itself may be absent
if self.has_readline:
self.set_readline_completer()
#-------------------------------------------------------------------------
# Things related to aliases
#-------------------------------------------------------------------------
def init_alias(self):
# InteractiveShell defines alias's we want, but TerminalInteractiveShell defines
# ones we don't. So don't use super and instead go right to InteractiveShell
InteractiveShell.init_alias(self)
#-------------------------------------------------------------------------
# Things related to exiting
#-------------------------------------------------------------------------
def ask_exit(self):
""" Ask the shell to exit. Can be overiden and used as a callback. """
# TODO PyDev's console does not have support from the Python side to exit
# the console. If user forces the exit (with sys.exit()) then the console
# simply reports errors. e.g.:
# >>> import sys
# >>> sys.exit()
# Failed to create input stream: Connection refused
# >>>
# Console already exited with value: 0 while waiting for an answer.
# Error stream:
# Output stream:
# >>>
#
# Alternatively if you use the non-IPython shell this is what happens
# >>> exit()
# <type 'exceptions.SystemExit'>:None
# >>>
# <type 'exceptions.SystemExit'>:None
# >>>
#
super(PyDevTerminalInteractiveShell, self).ask_exit()
print('To exit the PyDev Console, terminate the console within IDE.')
#-------------------------------------------------------------------------
# Things related to magics
#-------------------------------------------------------------------------
def init_magics(self):
super(PyDevTerminalInteractiveShell, self).init_magics()
# TODO Any additional magics for PyDev?
InteractiveShellABC.register(PyDevTerminalInteractiveShell) # @UndefinedVariable
#=======================================================================================================================
# _PyDevFrontEnd
#=======================================================================================================================
class _PyDevFrontEnd:
version = release.__version__
def __init__(self):
# Create and initialize our IPython instance.
if hasattr(PyDevTerminalInteractiveShell, '_instance') and PyDevTerminalInteractiveShell._instance is not None:
self.ipython = PyDevTerminalInteractiveShell._instance
else:
self.ipython = PyDevTerminalInteractiveShell.instance()
self._curr_exec_line = 0
self._curr_exec_lines = []
def show_banner(self):
self.ipython.show_banner()
def update(self, globals, locals):
ns = self.ipython.user_ns
for key, value in list(ns.items()):
if key not in locals:
locals[key] = value
self.ipython.user_global_ns.clear()
self.ipython.user_global_ns.update(globals)
self.ipython.user_ns = locals
if hasattr(self.ipython, 'history_manager') and hasattr(self.ipython.history_manager, 'save_thread'):
self.ipython.history_manager.save_thread.pydev_do_not_trace = True # don't trace ipython history saving thread
def complete(self, string):
try:
if string:
return self.ipython.complete(None, line=string, cursor_pos=string.__len__())
else:
return self.ipython.complete(string, string, 0)
except:
# Silence completer exceptions
pass
def is_complete(self, string):
# Based on IPython 0.10.1
if string in ('', '\n'):
# Prefiltering, eg through ipython0, may return an empty
# string although some operations have been accomplished. We
# thus want to consider an empty string as a complete
# statement.
return True
else:
try:
# Add line returns here, to make sure that the statement is
# complete (except if '\' was used).
# This should probably be done in a different place (like
# maybe 'prefilter_input' method? For now, this works.
clean_string = string.rstrip('\n')
if not clean_string.endswith('\\'):
clean_string += '\n\n'
is_complete = codeop.compile_command(
clean_string,
"<string>",
"exec"
)
except Exception:
# XXX: Hack: return True so that the
# code gets executed and the error captured.
is_complete = True
return is_complete
def getCompletions(self, text, act_tok):
# Get completions from IPython and from PyDev and merge the results
# IPython only gives context free list of completions, while PyDev
# gives detailed information about completions.
try:
TYPE_IPYTHON = '11'
TYPE_IPYTHON_MAGIC = '12'
_line, ipython_completions = self.complete(text)
from _pydev_bundle._pydev_completer import Completer
completer = Completer(self.get_namespace(), None)
ret = completer.complete(act_tok)
append = ret.append
ip = self.ipython
pydev_completions = set([f[0] for f in ret])
for ipython_completion in ipython_completions:
# PyCharm was not expecting completions with '%'...
# Could be fixed in the backend, but it's probably better
# fixing it at PyCharm.
# if ipython_completion.startswith('%'):
# ipython_completion = ipython_completion[1:]
if ipython_completion not in pydev_completions:
pydev_completions.add(ipython_completion)
inf = ip.object_inspect(ipython_completion)
if inf['type_name'] == 'Magic function':
pydev_type = TYPE_IPYTHON_MAGIC
else:
pydev_type = TYPE_IPYTHON
pydev_doc = inf['docstring']
if pydev_doc is None:
pydev_doc = ''
append((ipython_completion, pydev_doc, '', pydev_type))
return ret
except:
import traceback;traceback.print_exc()
return []
def get_namespace(self):
return self.ipython.user_ns
def clear_buffer(self):
del self._curr_exec_lines[:]
def add_exec(self, line):
if self._curr_exec_lines:
self._curr_exec_lines.append(line)
buf = '\n'.join(self._curr_exec_lines)
if self.is_complete(buf):
self._curr_exec_line += 1
self.ipython.run_cell(buf)
del self._curr_exec_lines[:]
return False # execute complete (no more)
return True # needs more
else:
if not self.is_complete(line):
# Did not execute
self._curr_exec_lines.append(line)
return True # needs more
else:
self._curr_exec_line += 1
self.ipython.run_cell(line, store_history=True)
# hist = self.ipython.history_manager.output_hist_reprs
# rep = hist.get(self._curr_exec_line, None)
# if rep is not None:
# print(rep)
return False # execute complete (no more)
def is_automagic(self):
return self.ipython.automagic
def get_greeting_msg(self):
return 'PyDev console: using IPython %s\n' % self.version
class _PyDevFrontEndContainer:
_instance = None
_last_host_port = None
def get_pydev_frontend(pydev_host, pydev_client_port):
if _PyDevFrontEndContainer._instance is None:
_PyDevFrontEndContainer._instance = _PyDevFrontEnd()
if _PyDevFrontEndContainer._last_host_port != (pydev_host, pydev_client_port):
_PyDevFrontEndContainer._last_host_port = pydev_host, pydev_client_port
# Back channel to PyDev to open editors (in the future other
# info may go back this way. This is the same channel that is
# used to get stdin, see StdIn in pydev_console_utils)
_PyDevFrontEndContainer._instance.ipython.hooks['editor'] = create_editor_hook(pydev_host, pydev_client_port)
# Note: setting the callback directly because setting it with set_hook would actually create a chain instead
# of ovewriting at each new call).
# _PyDevFrontEndContainer._instance.ipython.set_hook('editor', create_editor_hook(pydev_host, pydev_client_port))
return _PyDevFrontEndContainer._instance
| 21,354 | Python | 40.305609 | 143 | 0.581249 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydev_bundle/fsnotify/__init__.py | '''
Sample usage to track changes in a thread.
import threading
import time
watcher = fsnotify.Watcher()
watcher.accepted_file_extensions = {'.py', '.pyw'}
# Configure target values to compute throttling.
# Note: internal sleep times will be updated based on
# profiling the actual application runtime to match
# those values.
watcher.target_time_for_single_scan = 2.
watcher.target_time_for_notification = 4.
watcher.set_tracked_paths([target_dir])
def start_watching(): # Called from thread
for change_enum, change_path in watcher.iter_changes():
if change_enum == fsnotify.Change.added:
print('Added: ', change_path)
elif change_enum == fsnotify.Change.modified:
print('Modified: ', change_path)
elif change_enum == fsnotify.Change.deleted:
print('Deleted: ', change_path)
t = threading.Thread(target=start_watching)
t.daemon = True
t.start()
try:
...
finally:
watcher.dispose()
Note: changes are only reported for files (added/modified/deleted), not directories.
'''
import threading
import sys
from os.path import basename
from _pydev_bundle import pydev_log
from os import scandir
try:
from enum import IntEnum
except:
class IntEnum(object):
pass
import time
__author__ = 'Fabio Zadrozny'
__email__ = '[email protected]'
__version__ = '0.1.5' # Version here and in setup.py
class Change(IntEnum):
added = 1
modified = 2
deleted = 3
class _SingleVisitInfo(object):
def __init__(self):
self.count = 0
self.visited_dirs = set()
self.file_to_mtime = {}
self.last_sleep_time = time.time()
class _PathWatcher(object):
'''
Helper to watch a single path.
'''
def __init__(self, root_path, accept_directory, accept_file, single_visit_info, max_recursion_level, sleep_time=.0):
'''
:type root_path: str
:type accept_directory: Callback[str, bool]
:type accept_file: Callback[str, bool]
:type max_recursion_level: int
:type sleep_time: float
'''
self.accept_directory = accept_directory
self.accept_file = accept_file
self._max_recursion_level = max_recursion_level
self._root_path = root_path
# Initial sleep value for throttling, it'll be auto-updated based on the
# Watcher.target_time_for_single_scan.
self.sleep_time = sleep_time
self.sleep_at_elapsed = 1. / 30.
# When created, do the initial snapshot right away!
old_file_to_mtime = {}
self._check(single_visit_info, lambda _change: None, old_file_to_mtime)
def __eq__(self, o):
if isinstance(o, _PathWatcher):
return self._root_path == o._root_path
return False
def __ne__(self, o):
return not self == o
def __hash__(self):
return hash(self._root_path)
def _check_dir(self, dir_path, single_visit_info, append_change, old_file_to_mtime, level):
# This is the actual poll loop
if dir_path in single_visit_info.visited_dirs or level > self._max_recursion_level:
return
single_visit_info.visited_dirs.add(dir_path)
try:
if isinstance(dir_path, bytes):
try:
dir_path = dir_path.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
try:
dir_path = dir_path.decode('utf-8')
except UnicodeDecodeError:
return # Ignore if we can't deal with the path.
new_files = single_visit_info.file_to_mtime
for entry in scandir(dir_path):
single_visit_info.count += 1
# Throttle if needed inside the loop
# to avoid consuming too much CPU.
if single_visit_info.count % 300 == 0:
if self.sleep_time > 0:
t = time.time()
diff = t - single_visit_info.last_sleep_time
if diff > self.sleep_at_elapsed:
time.sleep(self.sleep_time)
single_visit_info.last_sleep_time = time.time()
if entry.is_dir():
if self.accept_directory(entry.path):
self._check_dir(entry.path, single_visit_info, append_change, old_file_to_mtime, level + 1)
elif self.accept_file(entry.path):
stat = entry.stat()
mtime = (stat.st_mtime_ns, stat.st_size)
path = entry.path
new_files[path] = mtime
old_mtime = old_file_to_mtime.pop(path, None)
if not old_mtime:
append_change((Change.added, path))
elif old_mtime != mtime:
append_change((Change.modified, path))
except OSError:
pass # Directory was removed in the meanwhile.
def _check(self, single_visit_info, append_change, old_file_to_mtime):
self._check_dir(self._root_path, single_visit_info, append_change, old_file_to_mtime, 0)
class Watcher(object):
# By default (if accept_directory is not specified), these will be the
# ignored directories.
ignored_dirs = {u'.git', u'__pycache__', u'.idea', u'node_modules', u'.metadata'}
# By default (if accept_file is not specified), these will be the
# accepted files.
accepted_file_extensions = ()
# Set to the target value for doing full scan of all files (adds a sleep inside the poll loop
# which processes files to reach the target time).
# Lower values will consume more CPU
# Set to 0.0 to have no sleeps (which will result in a higher cpu load).
target_time_for_single_scan = 2.0
# Set the target value from the start of one scan to the start of another scan (adds a
# sleep after a full poll is done to reach the target time).
# Lower values will consume more CPU.
# Set to 0.0 to have a new scan start right away without any sleeps.
target_time_for_notification = 4.0
# Set to True to print the time for a single poll through all the paths.
print_poll_time = False
# This is the maximum recursion level.
max_recursion_level = 10
def __init__(self, accept_directory=None, accept_file=None):
'''
:param Callable[str, bool] accept_directory:
Callable that returns whether a directory should be watched.
Note: if passed it'll override the `ignored_dirs`
:param Callable[str, bool] accept_file:
Callable that returns whether a file should be watched.
Note: if passed it'll override the `accepted_file_extensions`.
'''
self._path_watchers = set()
self._disposed = threading.Event()
if accept_directory is None:
accept_directory = lambda dir_path: basename(dir_path) not in self.ignored_dirs
if accept_file is None:
accept_file = lambda path_name: \
not self.accepted_file_extensions or path_name.endswith(self.accepted_file_extensions)
self.accept_file = accept_file
self.accept_directory = accept_directory
self._single_visit_info = _SingleVisitInfo()
@property
def accept_directory(self):
return self._accept_directory
@accept_directory.setter
def accept_directory(self, accept_directory):
self._accept_directory = accept_directory
for path_watcher in self._path_watchers:
path_watcher.accept_directory = accept_directory
@property
def accept_file(self):
return self._accept_file
@accept_file.setter
def accept_file(self, accept_file):
self._accept_file = accept_file
for path_watcher in self._path_watchers:
path_watcher.accept_file = accept_file
def dispose(self):
self._disposed.set()
@property
def path_watchers(self):
return tuple(self._path_watchers)
def set_tracked_paths(self, paths):
"""
Note: always resets all path trackers to track the passed paths.
"""
if not isinstance(paths, (list, tuple, set)):
paths = (paths,)
# Sort by the path len so that the bigger paths come first (so,
# if there's any nesting we want the nested paths to be visited
# before the parent paths so that the max_recursion_level is correct).
paths = sorted(set(paths), key=lambda path:-len(path))
path_watchers = set()
self._single_visit_info = _SingleVisitInfo()
initial_time = time.time()
for path in paths:
sleep_time = 0. # When collecting the first time, sleep_time should be 0!
path_watcher = _PathWatcher(
path,
self.accept_directory,
self.accept_file,
self._single_visit_info,
max_recursion_level=self.max_recursion_level,
sleep_time=sleep_time,
)
path_watchers.add(path_watcher)
actual_time = (time.time() - initial_time)
pydev_log.debug('Tracking the following paths for changes: %s', paths)
pydev_log.debug('Time to track: %.2fs', actual_time)
pydev_log.debug('Folders found: %s', len(self._single_visit_info.visited_dirs))
pydev_log.debug('Files found: %s', len(self._single_visit_info.file_to_mtime))
self._path_watchers = path_watchers
def iter_changes(self):
'''
Continuously provides changes (until dispose() is called).
Changes provided are tuples with the Change enum and filesystem path.
:rtype: Iterable[Tuple[Change, str]]
'''
while not self._disposed.is_set():
initial_time = time.time()
old_visit_info = self._single_visit_info
old_file_to_mtime = old_visit_info.file_to_mtime
changes = []
append_change = changes.append
self._single_visit_info = single_visit_info = _SingleVisitInfo()
for path_watcher in self._path_watchers:
path_watcher._check(single_visit_info, append_change, old_file_to_mtime)
# Note that we pop entries while visiting, so, what remained is what's deleted.
for entry in old_file_to_mtime:
append_change((Change.deleted, entry))
for change in changes:
yield change
actual_time = (time.time() - initial_time)
if self.print_poll_time:
print('--- Total poll time: %.3fs' % actual_time)
if actual_time > 0:
if self.target_time_for_single_scan <= 0.0:
for path_watcher in self._path_watchers:
path_watcher.sleep_time = 0.0
else:
perc = self.target_time_for_single_scan / actual_time
# Prevent from changing the values too much (go slowly into the right
# direction).
# (to prevent from cases where the user puts the machine on sleep and
# values become too skewed).
if perc > 2.:
perc = 2.
elif perc < 0.5:
perc = 0.5
for path_watcher in self._path_watchers:
if path_watcher.sleep_time <= 0.0:
path_watcher.sleep_time = 0.001
new_sleep_time = path_watcher.sleep_time * perc
# Prevent from changing the values too much (go slowly into the right
# direction).
# (to prevent from cases where the user puts the machine on sleep and
# values become too skewed).
diff_sleep_time = new_sleep_time - path_watcher.sleep_time
path_watcher.sleep_time += (diff_sleep_time / (3.0 * len(self._path_watchers)))
if actual_time > 0:
self._disposed.wait(actual_time)
if path_watcher.sleep_time < 0.001:
path_watcher.sleep_time = 0.001
# print('new sleep time: %s' % path_watcher.sleep_time)
diff = self.target_time_for_notification - actual_time
if diff > 0.:
self._disposed.wait(diff)
| 12,704 | Python | 34.88983 | 120 | 0.574465 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/pydevd_frame_eval_cython_wrapper.py | try:
try:
from _pydevd_frame_eval_ext import pydevd_frame_evaluator as mod
except ImportError:
from _pydevd_frame_eval import pydevd_frame_evaluator as mod
except ImportError:
try:
import sys
try:
is_64bits = sys.maxsize > 2 ** 32
except:
# In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.
raise ImportError
plat = '32'
if is_64bits:
plat = '64'
# We also accept things as:
#
# _pydevd_frame_eval.pydevd_frame_evaluator_win32_27_32
# _pydevd_frame_eval.pydevd_frame_evaluator_win32_34_64
#
# to have multiple pre-compiled pyds distributed along the IDE
# (generated by build_tools/build_binaries_windows.py).
mod_name = 'pydevd_frame_evaluator_%s_%s%s_%s' % (sys.platform, sys.version_info[0], sys.version_info[1], plat)
check_name = '_pydevd_frame_eval.%s' % (mod_name,)
mod = __import__(check_name)
mod = getattr(mod, mod_name)
except ImportError:
raise
frame_eval_func = mod.frame_eval_func
stop_frame_eval = mod.stop_frame_eval
dummy_trace_dispatch = mod.dummy_trace_dispatch
get_thread_info_py = mod.get_thread_info_py
clear_thread_local_info = mod.clear_thread_local_info
| 1,343 | Python | 29.545454 | 119 | 0.621742 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/pydevd_frame_tracing.py | import sys
from _pydev_bundle import pydev_log
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_comm import get_global_debugger
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
class DummyTracingHolder:
dummy_trace_func = None
def set_trace_func(self, trace_func):
self.dummy_trace_func = trace_func
dummy_tracing_holder = DummyTracingHolder()
def update_globals_dict(globals_dict):
new_globals = {'_pydev_stop_at_break': _pydev_stop_at_break}
globals_dict.update(new_globals)
def _get_line_for_frame(frame):
# it's absolutely necessary to reset tracing function for frame in order to get the real line number
tracing_func = frame.f_trace
frame.f_trace = None
line = frame.f_lineno
frame.f_trace = tracing_func
return line
def _pydev_stop_at_break(line):
frame = sys._getframe(1)
# print('pydevd SET TRACING at ', line, 'curr line', frame.f_lineno)
t = threading.current_thread()
try:
additional_info = t.additional_info
except:
additional_info = set_additional_thread_info(t)
if additional_info.is_tracing:
return
additional_info.is_tracing += 1
try:
py_db = get_global_debugger()
if py_db is None:
return
pydev_log.debug("Setting f_trace due to frame eval mode in file: %s on line %s", frame.f_code.co_filename, line)
additional_info.trace_suspend_type = 'frame_eval'
pydevd_frame_eval_cython_wrapper = sys.modules['_pydevd_frame_eval.pydevd_frame_eval_cython_wrapper']
thread_info = pydevd_frame_eval_cython_wrapper.get_thread_info_py()
if thread_info.thread_trace_func is not None:
frame.f_trace = thread_info.thread_trace_func
else:
frame.f_trace = py_db.get_thread_local_trace_func()
finally:
additional_info.is_tracing -= 1
def _pydev_needs_stop_at_break(line):
'''
We separate the functionality into 2 functions so that we can generate a bytecode which
generates a spurious line change so that we can do:
if _pydev_needs_stop_at_break():
# Set line to line -1
_pydev_stop_at_break()
# then, proceed to go to the current line
# (which will then trigger a line event).
'''
t = threading.current_thread()
try:
additional_info = t.additional_info
except:
additional_info = set_additional_thread_info(t)
if additional_info.is_tracing:
return False
additional_info.is_tracing += 1
try:
frame = sys._getframe(1)
# print('pydev needs stop at break?', line, 'curr line', frame.f_lineno, 'curr trace', frame.f_trace)
if frame.f_trace is not None:
# i.e.: this frame is already being traced, thus, we don't need to use programmatic breakpoints.
return False
py_db = get_global_debugger()
if py_db is None:
return False
try:
abs_path_real_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
canonical_normalized_filename = abs_path_real_path_and_base[1]
try:
python_breakpoint = py_db.breakpoints[canonical_normalized_filename][line]
except:
# print("Couldn't find breakpoint in the file %s on line %s" % (frame.f_code.co_filename, line))
# Could be KeyError if line is not there or TypeError if breakpoints_for_file is None.
# Note: using catch-all exception for performance reasons (if the user adds a breakpoint
# and then removes it after hitting it once, this method added for the programmatic
# breakpoint will keep on being called and one of those exceptions will always be raised
# here).
return False
if python_breakpoint:
# print('YES')
return True
finally:
additional_info.is_tracing -= 1
return False
| 4,219 | Python | 33.308943 | 120 | 0.650391 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/pydevd_modify_bytecode.py | from collections import namedtuple
import dis
from functools import partial
import itertools
import os.path
import sys
from _pydevd_frame_eval.vendored import bytecode
from _pydevd_frame_eval.vendored.bytecode.instr import Instr, Label
from _pydev_bundle import pydev_log
from _pydevd_frame_eval.pydevd_frame_tracing import _pydev_stop_at_break, _pydev_needs_stop_at_break
DEBUG = False
class DebugHelper(object):
def __init__(self):
self._debug_dir = os.path.join(os.path.dirname(__file__), 'debug_info')
try:
os.makedirs(self._debug_dir)
except:
pass
self._next = partial(next, itertools.count(0))
def _get_filename(self, op_number=None, prefix=''):
if op_number is None:
op_number = self._next()
name = '%03d_before.txt' % op_number
else:
name = '%03d_change.txt' % op_number
filename = os.path.join(self._debug_dir, prefix + name)
return filename, op_number
def write_bytecode(self, b, op_number=None, prefix=''):
filename, op_number = self._get_filename(op_number, prefix)
with open(filename, 'w') as stream:
bytecode.dump_bytecode(b, stream=stream, lineno=True)
return op_number
def write_dis(self, code_to_modify, op_number=None, prefix=''):
filename, op_number = self._get_filename(op_number, prefix)
with open(filename, 'w') as stream:
stream.write('-------- ')
stream.write('-------- ')
stream.write('id(code_to_modify): %s' % id(code_to_modify))
stream.write('\n\n')
dis.dis(code_to_modify, file=stream)
return op_number
_CodeLineInfo = namedtuple('_CodeLineInfo', 'line_to_offset, first_line, last_line')
# Note: this method has a version in cython too (that one is usually used, this is just for tests).
def _get_code_line_info(code_obj):
line_to_offset = {}
first_line = None
last_line = None
for offset, line in dis.findlinestarts(code_obj):
line_to_offset[line] = offset
if line_to_offset:
first_line = min(line_to_offset)
last_line = max(line_to_offset)
return _CodeLineInfo(line_to_offset, first_line, last_line)
if DEBUG:
debug_helper = DebugHelper()
def get_instructions_to_add(
stop_at_line,
_pydev_stop_at_break=_pydev_stop_at_break,
_pydev_needs_stop_at_break=_pydev_needs_stop_at_break
):
'''
This is the bytecode for something as:
if _pydev_needs_stop_at_break():
_pydev_stop_at_break()
but with some special handling for lines.
'''
# Good reference to how things work regarding line numbers and jumps:
# https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt
# Usually use a stop line -1, but if that'd be 0, using line +1 is ok too.
spurious_line = stop_at_line - 1
if spurious_line <= 0:
spurious_line = stop_at_line + 1
label = Label()
return [
# -- if _pydev_needs_stop_at_break():
Instr("LOAD_CONST", _pydev_needs_stop_at_break, lineno=stop_at_line),
Instr("LOAD_CONST", stop_at_line, lineno=stop_at_line),
Instr("CALL_FUNCTION", 1, lineno=stop_at_line),
Instr("POP_JUMP_IF_FALSE", label, lineno=stop_at_line),
# -- _pydev_stop_at_break()
#
# Note that this has line numbers -1 so that when the NOP just below
# is executed we have a spurious line event.
Instr("LOAD_CONST", _pydev_stop_at_break, lineno=spurious_line),
Instr("LOAD_CONST", stop_at_line, lineno=spurious_line),
Instr("CALL_FUNCTION", 1, lineno=spurious_line),
Instr("POP_TOP", lineno=spurious_line),
# Reason for the NOP: Python will give us a 'line' trace event whenever we forward jump to
# the first instruction of a line, so, in the case where we haven't added a programmatic
# breakpoint (either because we didn't hit a breakpoint anymore or because it was already
# tracing), we don't want the spurious line event due to the line change, so, we make a jump
# to the instruction right after the NOP so that the spurious line event is NOT generated in
# this case (otherwise we'd have a line event even if the line didn't change).
Instr("NOP", lineno=stop_at_line),
label,
]
class _Node(object):
def __init__(self, data):
self.prev = None
self.next = None
self.data = data
def append(self, data):
node = _Node(data)
curr_next = self.next
node.next = self.next
node.prev = self
self.next = node
if curr_next is not None:
curr_next.prev = node
return node
def prepend(self, data):
node = _Node(data)
curr_prev = self.prev
node.prev = self.prev
node.next = self
self.prev = node
if curr_prev is not None:
curr_prev.next = node
return node
class _HelperBytecodeList(object):
'''
A helper double-linked list to make the manipulation a bit easier (so that we don't need
to keep track of indices that change) and performant (because adding multiple items to
the middle of a regular list isn't ideal).
'''
def __init__(self, lst=None):
self._head = None
self._tail = None
if lst:
node = self
for item in lst:
node = node.append(item)
def append(self, data):
if self._tail is None:
node = _Node(data)
self._head = self._tail = node
return node
else:
node = self._tail = self.tail.append(data)
return node
@property
def head(self):
node = self._head
# Manipulating the node directly may make it unsynchronized.
while node.prev:
self._head = node = node.prev
return node
@property
def tail(self):
node = self._tail
# Manipulating the node directly may make it unsynchronized.
while node.next:
self._tail = node = node.next
return node
def __iter__(self):
node = self.head
while node:
yield node.data
node = node.next
_PREDICT_TABLE = {
'LIST_APPEND': ('JUMP_ABSOLUTE',),
'SET_ADD': ('JUMP_ABSOLUTE',),
'GET_ANEXT': ('LOAD_CONST',),
'GET_AWAITABLE': ('LOAD_CONST',),
'DICT_MERGE': ('CALL_FUNCTION_EX',),
'MAP_ADD': ('JUMP_ABSOLUTE',),
'COMPARE_OP': ('POP_JUMP_IF_FALSE', 'POP_JUMP_IF_TRUE',),
'IS_OP': ('POP_JUMP_IF_FALSE', 'POP_JUMP_IF_TRUE',),
'CONTAINS_OP': ('POP_JUMP_IF_FALSE', 'POP_JUMP_IF_TRUE',),
# Note: there are some others with PREDICT on ceval, but they have more logic
# and it needs more experimentation to know how it behaves in the static generated
# code (and it's only an issue for us if there's actually a line change between
# those, so, we don't have to really handle all the cases, only the one where
# the line number actually changes from one instruction to the predicted one).
}
# 3.10 optimizations include copying code branches multiple times (for instance
# if the body of a finally has a single assign statement it can copy the assign to the case
# where an exception happens and doesn't happen for optimization purposes) and as such
# we need to add the programmatic breakpoint multiple times.
TRACK_MULTIPLE_BRANCHES = sys.version_info[:2] >= (3, 10)
# When tracking multiple branches, we try to fix the bytecodes which would be PREDICTED in the
# Python eval loop so that we don't have spurious line events that wouldn't usually be issued
# in the tracing as they're ignored due to the eval prediction (even though they're in the bytecode).
FIX_PREDICT = sys.version_info[:2] >= (3, 10)
def insert_pydevd_breaks(
code_to_modify,
breakpoint_lines,
code_line_info=None,
_pydev_stop_at_break=_pydev_stop_at_break,
_pydev_needs_stop_at_break=_pydev_needs_stop_at_break,
):
"""
Inserts pydevd programmatic breaks into the code (at the given lines).
:param breakpoint_lines: set with the lines where we should add breakpoints.
:return: tuple(boolean flag whether insertion was successful, modified code).
"""
if code_line_info is None:
code_line_info = _get_code_line_info(code_to_modify)
if not code_line_info.line_to_offset:
return False, code_to_modify
# Create a copy (and make sure we're dealing with a set).
breakpoint_lines = set(breakpoint_lines)
# Note that we can even generate breakpoints on the first line of code
# now, since we generate a spurious line event -- it may be a bit pointless
# as we'll stop in the first line and we don't currently stop the tracing after the
# user resumes, but in the future, if we do that, this would be a nice
# improvement.
# if code_to_modify.co_firstlineno in breakpoint_lines:
# return False, code_to_modify
for line in breakpoint_lines:
if line <= 0:
# The first line is line 1, so, a break at line 0 is not valid.
pydev_log.info('Trying to add breakpoint in invalid line: %s', line)
return False, code_to_modify
try:
b = bytecode.Bytecode.from_code(code_to_modify)
if DEBUG:
op_number_bytecode = debug_helper.write_bytecode(b, prefix='bytecode.')
helper_list = _HelperBytecodeList(b)
modified_breakpoint_lines = breakpoint_lines.copy()
curr_node = helper_list.head
added_breaks_in_lines = set()
last_lineno = None
while curr_node is not None:
instruction = curr_node.data
instruction_lineno = getattr(instruction, 'lineno', None)
curr_name = getattr(instruction, 'name', None)
if FIX_PREDICT:
predict_targets = _PREDICT_TABLE.get(curr_name)
if predict_targets:
# Odd case: the next instruction may have a line number but it doesn't really
# appear in the tracing due to the PREDICT() in ceval, so, fix the bytecode so
# that it does things the way that ceval actually interprets it.
# See: https://mail.python.org/archives/list/[email protected]/thread/CP2PTFCMTK57KM3M3DLJNWGO66R5RVPB/
next_instruction = curr_node.next.data
next_name = getattr(next_instruction, 'name', None)
if next_name in predict_targets:
next_instruction_lineno = getattr(next_instruction, 'lineno', None)
if next_instruction_lineno:
next_instruction.lineno = None
if instruction_lineno is not None:
if TRACK_MULTIPLE_BRANCHES:
if last_lineno is None:
last_lineno = instruction_lineno
else:
if last_lineno == instruction_lineno:
# If the previous is a label, someone may jump into it, so, we need to add
# the break even if it's in the same line.
if curr_node.prev.data.__class__ != Label:
# Skip adding this as the line is still the same.
curr_node = curr_node.next
continue
last_lineno = instruction_lineno
else:
if instruction_lineno in added_breaks_in_lines:
curr_node = curr_node.next
continue
if instruction_lineno in modified_breakpoint_lines:
added_breaks_in_lines.add(instruction_lineno)
if curr_node.prev is not None and curr_node.prev.data.__class__ == Label \
and curr_name == 'POP_TOP':
# If we have a SETUP_FINALLY where the target is a POP_TOP, we can't change
# the target to be the breakpoint instruction (this can crash the interpreter).
for new_instruction in get_instructions_to_add(
instruction_lineno,
_pydev_stop_at_break=_pydev_stop_at_break,
_pydev_needs_stop_at_break=_pydev_needs_stop_at_break,
):
curr_node = curr_node.append(new_instruction)
else:
for new_instruction in get_instructions_to_add(
instruction_lineno,
_pydev_stop_at_break=_pydev_stop_at_break,
_pydev_needs_stop_at_break=_pydev_needs_stop_at_break,
):
curr_node.prepend(new_instruction)
curr_node = curr_node.next
b[:] = helper_list
if DEBUG:
debug_helper.write_bytecode(b, op_number_bytecode, prefix='bytecode.')
new_code = b.to_code()
except:
pydev_log.exception('Error inserting pydevd breaks.')
return False, code_to_modify
if DEBUG:
op_number = debug_helper.write_dis(code_to_modify)
debug_helper.write_dis(new_code, op_number)
return True, new_code
| 13,545 | Python | 36.010929 | 127 | 0.591953 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/pydevd_frame_eval_main.py | import os
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_trace_dispatch import USING_CYTHON
from _pydevd_bundle.pydevd_constants import USE_CYTHON_FLAG, ENV_FALSE_LOWER_VALUES, \
ENV_TRUE_LOWER_VALUES, IS_PY36_OR_GREATER, IS_PY38_OR_GREATER, SUPPORT_GEVENT, IS_PYTHON_STACKLESS, \
PYDEVD_USE_FRAME_EVAL, PYDEVD_IPYTHON_COMPATIBLE_DEBUGGING
frame_eval_func = None
stop_frame_eval = None
dummy_trace_dispatch = None
clear_thread_local_info = None
# "NO" means we should not use frame evaluation, 'YES' we should use it (and fail if not there) and unspecified uses if possible.
if (
PYDEVD_USE_FRAME_EVAL in ENV_FALSE_LOWER_VALUES or
USE_CYTHON_FLAG in ENV_FALSE_LOWER_VALUES or
not USING_CYTHON or
# Frame eval mode does not work with ipython compatible debugging (this happens because the
# way that frame eval works is run untraced and set tracing only for the frames with
# breakpoints, but ipython compatible debugging creates separate frames for what's logically
# the same frame).
PYDEVD_IPYTHON_COMPATIBLE_DEBUGGING
):
USING_FRAME_EVAL = False
elif SUPPORT_GEVENT or (IS_PYTHON_STACKLESS and not IS_PY38_OR_GREATER):
USING_FRAME_EVAL = False
# i.e gevent and frame eval mode don't get along very well.
# https://github.com/microsoft/debugpy/issues/189
# Same problem with Stackless.
# https://github.com/stackless-dev/stackless/issues/240
elif PYDEVD_USE_FRAME_EVAL in ENV_TRUE_LOWER_VALUES:
# Fail if unable to use
from _pydevd_frame_eval.pydevd_frame_eval_cython_wrapper import frame_eval_func, stop_frame_eval, dummy_trace_dispatch, clear_thread_local_info
USING_FRAME_EVAL = True
else:
USING_FRAME_EVAL = False
# Try to use if possible
if IS_PY36_OR_GREATER:
try:
from _pydevd_frame_eval.pydevd_frame_eval_cython_wrapper import frame_eval_func, stop_frame_eval, dummy_trace_dispatch, clear_thread_local_info
USING_FRAME_EVAL = True
except ImportError:
pydev_log.show_compile_cython_command_line()
| 2,105 | Python | 41.979591 | 155 | 0.717815 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/pydevd_fix_code.py | def _fix_contents(filename, contents):
import re
contents = re.sub(
r"from bytecode", r'from _pydevd_frame_eval.vendored.bytecode', contents, flags=re.MULTILINE
)
contents = re.sub(
r"import bytecode", r'from _pydevd_frame_eval.vendored import bytecode', contents, flags=re.MULTILINE
)
# This test will import the wrong setup (we're not interested in it).
contents = re.sub(
r"def test_version\(self\):", r'def skip_test_version(self):', contents, flags=re.MULTILINE
)
if filename.startswith('test_'):
if 'pytestmark' not in contents:
pytest_mark = '''
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
'''
contents = pytest_mark + contents
return contents
def main():
import os
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk(os.path.dirname(__file__)):
path = root.split(os.sep)
for filename in files:
if filename.endswith('.py') and filename != 'pydevd_fix_code.py':
with open(os.path.join(root, filename), 'r') as stream:
contents = stream.read()
new_contents = _fix_contents(filename, contents)
if contents != new_contents:
print('fixed ', os.path.join(root, filename))
with open(os.path.join(root, filename), 'w') as stream:
stream.write(new_contents)
# print(len(path) * '---', filename)
if __name__ == '__main__':
main() | 1,801 | Python | 35.039999 | 126 | 0.612438 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/flags.py | # alias to keep the 'bytecode' variable free
import sys
from enum import IntFlag
from _pydevd_frame_eval.vendored import bytecode as _bytecode
class CompilerFlags(IntFlag):
"""Possible values of the co_flags attribute of Code object.
Note: We do not rely on inspect values here as some of them are missing and
furthermore would be version dependent.
"""
OPTIMIZED = 0x00001 # noqa
NEWLOCALS = 0x00002 # noqa
VARARGS = 0x00004 # noqa
VARKEYWORDS = 0x00008 # noqa
NESTED = 0x00010 # noqa
GENERATOR = 0x00020 # noqa
NOFREE = 0x00040 # noqa
# New in Python 3.5
# Used for coroutines defined using async def ie native coroutine
COROUTINE = 0x00080 # noqa
# Used for coroutines defined as a generator and then decorated using
# types.coroutine
ITERABLE_COROUTINE = 0x00100 # noqa
# New in Python 3.6
# Generator defined in an async def function
ASYNC_GENERATOR = 0x00200 # noqa
# __future__ flags
# future flags changed in Python 3.9
if sys.version_info < (3, 9):
FUTURE_GENERATOR_STOP = 0x80000 # noqa
if sys.version_info > (3, 6):
FUTURE_ANNOTATIONS = 0x100000
else:
FUTURE_GENERATOR_STOP = 0x800000 # noqa
FUTURE_ANNOTATIONS = 0x1000000
def infer_flags(bytecode, is_async=None):
"""Infer the proper flags for a bytecode based on the instructions.
Because the bytecode does not have enough context to guess if a function
is asynchronous the algorithm tries to be conservative and will never turn
a previously async code into a sync one.
Parameters
----------
bytecode : Bytecode | ConcreteBytecode | ControlFlowGraph
Bytecode for which to infer the proper flags
is_async : bool | None, optional
Force the code to be marked as asynchronous if True, prevent it from
being marked as asynchronous if False and simply infer the best
solution based on the opcode and the existing flag if None.
"""
flags = CompilerFlags(0)
if not isinstance(
bytecode,
(_bytecode.Bytecode, _bytecode.ConcreteBytecode, _bytecode.ControlFlowGraph),
):
msg = (
"Expected a Bytecode, ConcreteBytecode or ControlFlowGraph "
"instance not %s"
)
raise ValueError(msg % bytecode)
instructions = (
bytecode.get_instructions()
if isinstance(bytecode, _bytecode.ControlFlowGraph)
else bytecode
)
instr_names = {
i.name
for i in instructions
if not isinstance(i, (_bytecode.SetLineno, _bytecode.Label))
}
# Identify optimized code
if not (instr_names & {"STORE_NAME", "LOAD_NAME", "DELETE_NAME"}):
flags |= CompilerFlags.OPTIMIZED
# Check for free variables
if not (
instr_names
& {
"LOAD_CLOSURE",
"LOAD_DEREF",
"STORE_DEREF",
"DELETE_DEREF",
"LOAD_CLASSDEREF",
}
):
flags |= CompilerFlags.NOFREE
# Copy flags for which we cannot infer the right value
flags |= bytecode.flags & (
CompilerFlags.NEWLOCALS
| CompilerFlags.VARARGS
| CompilerFlags.VARKEYWORDS
| CompilerFlags.NESTED
)
sure_generator = instr_names & {"YIELD_VALUE"}
maybe_generator = instr_names & {"YIELD_VALUE", "YIELD_FROM"}
sure_async = instr_names & {
"GET_AWAITABLE",
"GET_AITER",
"GET_ANEXT",
"BEFORE_ASYNC_WITH",
"SETUP_ASYNC_WITH",
"END_ASYNC_FOR",
}
# If performing inference or forcing an async behavior, first inspect
# the flags since this is the only way to identify iterable coroutines
if is_async in (None, True):
if bytecode.flags & CompilerFlags.COROUTINE:
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
elif bytecode.flags & CompilerFlags.ITERABLE_COROUTINE:
if sure_async:
msg = (
"The ITERABLE_COROUTINE flag is set but bytecode that"
"can only be used in async functions have been "
"detected. Please unset that flag before performing "
"inference."
)
raise ValueError(msg)
flags |= CompilerFlags.ITERABLE_COROUTINE
elif bytecode.flags & CompilerFlags.ASYNC_GENERATOR:
if not sure_generator:
flags |= CompilerFlags.COROUTINE
else:
flags |= CompilerFlags.ASYNC_GENERATOR
# If the code was not asynchronous before determine if it should now be
# asynchronous based on the opcode and the is_async argument.
else:
if sure_async:
# YIELD_FROM is not allowed in async generator
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
elif maybe_generator:
if is_async:
if sure_generator:
flags |= CompilerFlags.ASYNC_GENERATOR
else:
flags |= CompilerFlags.COROUTINE
else:
flags |= CompilerFlags.GENERATOR
elif is_async:
flags |= CompilerFlags.COROUTINE
# If the code should not be asynchronous, check first it is possible and
# next set the GENERATOR flag if relevant
else:
if sure_async:
raise ValueError(
"The is_async argument is False but bytecodes "
"that can only be used in async functions have "
"been detected."
)
if maybe_generator:
flags |= CompilerFlags.GENERATOR
flags |= bytecode.flags & CompilerFlags.FUTURE_GENERATOR_STOP
return flags
| 6,020 | Python | 32.082417 | 85 | 0.598173 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/peephole_opt.py | """
Peephole optimizer of CPython 3.6 reimplemented in pure Python using
the bytecode module.
"""
import opcode
import operator
import sys
from _pydevd_frame_eval.vendored.bytecode import Instr, Bytecode, ControlFlowGraph, BasicBlock, Compare
JUMPS_ON_TRUE = frozenset(
(
"POP_JUMP_IF_TRUE",
"JUMP_IF_TRUE_OR_POP",
)
)
NOT_COMPARE = {
Compare.IN: Compare.NOT_IN,
Compare.NOT_IN: Compare.IN,
Compare.IS: Compare.IS_NOT,
Compare.IS_NOT: Compare.IS,
}
MAX_SIZE = 20
class ExitUnchanged(Exception):
"""Exception used to skip the peephole optimizer"""
pass
class PeepholeOptimizer:
"""Python reimplementation of the peephole optimizer.
Copy of the C comment:
Perform basic peephole optimizations to components of a code object.
The consts object should still be in list form to allow new constants
to be appended.
To keep the optimizer simple, it bails out (does nothing) for code that
has a length over 32,700, and does not calculate extended arguments.
That allows us to avoid overflow and sign issues. Likewise, it bails when
the lineno table has complex encoding for gaps >= 255. EXTENDED_ARG can
appear before MAKE_FUNCTION; in this case both opcodes are skipped.
EXTENDED_ARG preceding any other opcode causes the optimizer to bail.
Optimizations are restricted to simple transformations occuring within a
single basic block. All transformations keep the code size the same or
smaller. For those that reduce size, the gaps are initially filled with
NOPs. Later those NOPs are removed and the jump addresses retargeted in
a single pass. Code offset is adjusted accordingly.
"""
def __init__(self):
# bytecode.ControlFlowGraph instance
self.code = None
self.const_stack = None
self.block_index = None
self.block = None
# index of the current instruction in self.block instructions
self.index = None
# whether we are in a LOAD_CONST sequence
self.in_consts = False
def check_result(self, value):
try:
size = len(value)
except TypeError:
return True
return size <= MAX_SIZE
def replace_load_const(self, nconst, instr, result):
# FIXME: remove temporary computed constants?
# FIXME: or at least reuse existing constants?
self.in_consts = True
load_const = Instr("LOAD_CONST", result, lineno=instr.lineno)
start = self.index - nconst - 1
self.block[start : self.index] = (load_const,)
self.index -= nconst
if nconst:
del self.const_stack[-nconst:]
self.const_stack.append(result)
self.in_consts = True
def eval_LOAD_CONST(self, instr):
self.in_consts = True
value = instr.arg
self.const_stack.append(value)
self.in_consts = True
def unaryop(self, op, instr):
try:
value = self.const_stack[-1]
result = op(value)
except IndexError:
return
if not self.check_result(result):
return
self.replace_load_const(1, instr, result)
def eval_UNARY_POSITIVE(self, instr):
return self.unaryop(operator.pos, instr)
def eval_UNARY_NEGATIVE(self, instr):
return self.unaryop(operator.neg, instr)
def eval_UNARY_INVERT(self, instr):
return self.unaryop(operator.invert, instr)
def get_next_instr(self, name):
try:
next_instr = self.block[self.index]
except IndexError:
return None
if next_instr.name == name:
return next_instr
return None
def eval_UNARY_NOT(self, instr):
# Note: UNARY_NOT <const> is not optimized
next_instr = self.get_next_instr("POP_JUMP_IF_FALSE")
if next_instr is None:
return None
# Replace UNARY_NOT+POP_JUMP_IF_FALSE with POP_JUMP_IF_TRUE
instr.set("POP_JUMP_IF_TRUE", next_instr.arg)
del self.block[self.index]
def binop(self, op, instr):
try:
left = self.const_stack[-2]
right = self.const_stack[-1]
except IndexError:
return
try:
result = op(left, right)
except Exception:
return
if not self.check_result(result):
return
self.replace_load_const(2, instr, result)
def eval_BINARY_ADD(self, instr):
return self.binop(operator.add, instr)
def eval_BINARY_SUBTRACT(self, instr):
return self.binop(operator.sub, instr)
def eval_BINARY_MULTIPLY(self, instr):
return self.binop(operator.mul, instr)
def eval_BINARY_TRUE_DIVIDE(self, instr):
return self.binop(operator.truediv, instr)
def eval_BINARY_FLOOR_DIVIDE(self, instr):
return self.binop(operator.floordiv, instr)
def eval_BINARY_MODULO(self, instr):
return self.binop(operator.mod, instr)
def eval_BINARY_POWER(self, instr):
return self.binop(operator.pow, instr)
def eval_BINARY_LSHIFT(self, instr):
return self.binop(operator.lshift, instr)
def eval_BINARY_RSHIFT(self, instr):
return self.binop(operator.rshift, instr)
def eval_BINARY_AND(self, instr):
return self.binop(operator.and_, instr)
def eval_BINARY_OR(self, instr):
return self.binop(operator.or_, instr)
def eval_BINARY_XOR(self, instr):
return self.binop(operator.xor, instr)
def eval_BINARY_SUBSCR(self, instr):
return self.binop(operator.getitem, instr)
def replace_container_of_consts(self, instr, container_type):
items = self.const_stack[-instr.arg :]
value = container_type(items)
self.replace_load_const(instr.arg, instr, value)
def build_tuple_unpack_seq(self, instr):
next_instr = self.get_next_instr("UNPACK_SEQUENCE")
if next_instr is None or next_instr.arg != instr.arg:
return
if instr.arg < 1:
return
if self.const_stack and instr.arg <= len(self.const_stack):
nconst = instr.arg
start = self.index - 1
# Rewrite LOAD_CONST instructions in the reverse order
load_consts = self.block[start - nconst : start]
self.block[start - nconst : start] = reversed(load_consts)
# Remove BUILD_TUPLE+UNPACK_SEQUENCE
self.block[start : start + 2] = ()
self.index -= 2
self.const_stack.clear()
return
if instr.arg == 1:
# Replace BUILD_TUPLE 1 + UNPACK_SEQUENCE 1 with NOP
del self.block[self.index - 1 : self.index + 1]
elif instr.arg == 2:
# Replace BUILD_TUPLE 2 + UNPACK_SEQUENCE 2 with ROT_TWO
rot2 = Instr("ROT_TWO", lineno=instr.lineno)
self.block[self.index - 1 : self.index + 1] = (rot2,)
self.index -= 1
self.const_stack.clear()
elif instr.arg == 3:
# Replace BUILD_TUPLE 3 + UNPACK_SEQUENCE 3
# with ROT_THREE + ROT_TWO
rot3 = Instr("ROT_THREE", lineno=instr.lineno)
rot2 = Instr("ROT_TWO", lineno=instr.lineno)
self.block[self.index - 1 : self.index + 1] = (rot3, rot2)
self.index -= 1
self.const_stack.clear()
def build_tuple(self, instr, container_type):
if instr.arg > len(self.const_stack):
return
next_instr = self.get_next_instr("COMPARE_OP")
if next_instr is None or next_instr.arg not in (Compare.IN, Compare.NOT_IN):
return
self.replace_container_of_consts(instr, container_type)
return True
def eval_BUILD_TUPLE(self, instr):
if not instr.arg:
return
if instr.arg <= len(self.const_stack):
self.replace_container_of_consts(instr, tuple)
else:
self.build_tuple_unpack_seq(instr)
def eval_BUILD_LIST(self, instr):
if not instr.arg:
return
if not self.build_tuple(instr, tuple):
self.build_tuple_unpack_seq(instr)
def eval_BUILD_SET(self, instr):
if not instr.arg:
return
self.build_tuple(instr, frozenset)
# Note: BUILD_SLICE is not optimized
def eval_COMPARE_OP(self, instr):
# Note: COMPARE_OP: 2 < 3 is not optimized
try:
new_arg = NOT_COMPARE[instr.arg]
except KeyError:
return
if self.get_next_instr("UNARY_NOT") is None:
return
# not (a is b) --> a is not b
# not (a in b) --> a not in b
# not (a is not b) --> a is b
# not (a not in b) --> a in b
instr.arg = new_arg
self.block[self.index - 1 : self.index + 1] = (instr,)
def jump_if_or_pop(self, instr):
# Simplify conditional jump to conditional jump where the
# result of the first test implies the success of a similar
# test or the failure of the opposite test.
#
# Arises in code like:
# "if a and b:"
# "if a or b:"
# "a and b or c"
# "(a and b) and c"
#
# x:JUMP_IF_FALSE_OR_POP y y:JUMP_IF_FALSE_OR_POP z
# --> x:JUMP_IF_FALSE_OR_POP z
#
# x:JUMP_IF_FALSE_OR_POP y y:JUMP_IF_TRUE_OR_POP z
# --> x:POP_JUMP_IF_FALSE y+3
# where y+3 is the instruction following the second test.
target_block = instr.arg
try:
target_instr = target_block[0]
except IndexError:
return
if not target_instr.is_cond_jump():
self.optimize_jump_to_cond_jump(instr)
return
if (target_instr.name in JUMPS_ON_TRUE) == (instr.name in JUMPS_ON_TRUE):
# The second jump will be taken iff the first is.
target2 = target_instr.arg
# The current opcode inherits its target's stack behaviour
instr.name = target_instr.name
instr.arg = target2
self.block[self.index - 1] = instr
self.index -= 1
else:
# The second jump is not taken if the first is (so jump past it),
# and all conditional jumps pop their argument when they're not
# taken (so change the first jump to pop its argument when it's
# taken).
if instr.name in JUMPS_ON_TRUE:
name = "POP_JUMP_IF_TRUE"
else:
name = "POP_JUMP_IF_FALSE"
new_label = self.code.split_block(target_block, 1)
instr.name = name
instr.arg = new_label
self.block[self.index - 1] = instr
self.index -= 1
def eval_JUMP_IF_FALSE_OR_POP(self, instr):
self.jump_if_or_pop(instr)
def eval_JUMP_IF_TRUE_OR_POP(self, instr):
self.jump_if_or_pop(instr)
def eval_NOP(self, instr):
# Remove NOP
del self.block[self.index - 1]
self.index -= 1
def optimize_jump_to_cond_jump(self, instr):
# Replace jumps to unconditional jumps
jump_label = instr.arg
assert isinstance(jump_label, BasicBlock), jump_label
try:
target_instr = jump_label[0]
except IndexError:
return
if instr.is_uncond_jump() and target_instr.name == "RETURN_VALUE":
# Replace JUMP_ABSOLUTE => RETURN_VALUE with RETURN_VALUE
self.block[self.index - 1] = target_instr
elif target_instr.is_uncond_jump():
# Replace JUMP_FORWARD t1 jumping to JUMP_FORWARD t2
# with JUMP_ABSOLUTE t2
jump_target2 = target_instr.arg
name = instr.name
if instr.name == "JUMP_FORWARD":
name = "JUMP_ABSOLUTE"
else:
# FIXME: reimplement this check
# if jump_target2 < 0:
# # No backward relative jumps
# return
# FIXME: remove this workaround and implement comment code ^^
if instr.opcode in opcode.hasjrel:
return
instr.name = name
instr.arg = jump_target2
self.block[self.index - 1] = instr
def optimize_jump(self, instr):
if instr.is_uncond_jump() and self.index == len(self.block):
# JUMP_ABSOLUTE at the end of a block which points to the
# following block: remove the jump, link the current block
# to the following block
block_index = self.block_index
target_block = instr.arg
target_block_index = self.code.get_block_index(target_block)
if target_block_index == block_index:
del self.block[self.index - 1]
self.block.next_block = target_block
return
self.optimize_jump_to_cond_jump(instr)
def iterblock(self, block):
self.block = block
self.index = 0
while self.index < len(block):
instr = self.block[self.index]
self.index += 1
yield instr
def optimize_block(self, block):
self.const_stack.clear()
self.in_consts = False
for instr in self.iterblock(block):
if not self.in_consts:
self.const_stack.clear()
self.in_consts = False
meth_name = "eval_%s" % instr.name
meth = getattr(self, meth_name, None)
if meth is not None:
meth(instr)
elif instr.has_jump():
self.optimize_jump(instr)
# Note: Skipping over LOAD_CONST trueconst; POP_JUMP_IF_FALSE
# <target> is not implemented, since it looks like the optimization
# is never trigerred in practice. The compiler already optimizes if
# and while statements.
def remove_dead_blocks(self):
# FIXME: remove empty blocks?
used_blocks = {id(self.code[0])}
for block in self.code:
if block.next_block is not None:
used_blocks.add(id(block.next_block))
for instr in block:
if isinstance(instr, Instr) and isinstance(instr.arg, BasicBlock):
used_blocks.add(id(instr.arg))
block_index = 0
while block_index < len(self.code):
block = self.code[block_index]
if id(block) not in used_blocks:
del self.code[block_index]
else:
block_index += 1
# FIXME: merge following blocks if block1 does not contain any
# jump and block1.next_block is block2
def optimize_cfg(self, cfg):
self.code = cfg
self.const_stack = []
self.remove_dead_blocks()
self.block_index = 0
while self.block_index < len(self.code):
block = self.code[self.block_index]
self.block_index += 1
self.optimize_block(block)
def optimize(self, code_obj):
bytecode = Bytecode.from_code(code_obj)
cfg = ControlFlowGraph.from_bytecode(bytecode)
self.optimize_cfg(cfg)
bytecode = cfg.to_bytecode()
code = bytecode.to_code()
return code
# Code transformer for the PEP 511
class CodeTransformer:
name = "pyopt"
def code_transformer(self, code, context):
if sys.flags.verbose:
print(
"Optimize %s:%s: %s"
% (code.co_filename, code.co_firstlineno, code.co_name)
)
optimizer = PeepholeOptimizer()
return optimizer.optimize(code)
| 15,740 | Python | 30.993902 | 103 | 0.581004 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/instr.py | import enum
import dis
import opcode as _opcode
import sys
from marshal import dumps as _dumps
from _pydevd_frame_eval.vendored import bytecode as _bytecode
@enum.unique
class Compare(enum.IntEnum):
LT = 0
LE = 1
EQ = 2
NE = 3
GT = 4
GE = 5
IN = 6
NOT_IN = 7
IS = 8
IS_NOT = 9
EXC_MATCH = 10
UNSET = object()
def const_key(obj):
try:
return _dumps(obj)
except ValueError:
# For other types, we use the object identifier as an unique identifier
# to ensure that they are seen as unequal.
return (type(obj), id(obj))
def _pushes_back(opname):
if opname in ["CALL_FINALLY"]:
# CALL_FINALLY pushes the address of the "finally" block instead of a
# value, hence we don't treat it as pushing back op
return False
return (
opname.startswith("UNARY_")
or opname.startswith("GET_")
# BUILD_XXX_UNPACK have been removed in 3.9
or opname.startswith("BINARY_")
or opname.startswith("INPLACE_")
or opname.startswith("BUILD_")
or opname.startswith("CALL_")
) or opname in (
"LIST_TO_TUPLE",
"LIST_EXTEND",
"SET_UPDATE",
"DICT_UPDATE",
"DICT_MERGE",
"IS_OP",
"CONTAINS_OP",
"FORMAT_VALUE",
"MAKE_FUNCTION",
"IMPORT_NAME",
# technically, these three do not push back, but leave the container
# object on TOS
"SET_ADD",
"LIST_APPEND",
"MAP_ADD",
"LOAD_ATTR",
)
def _check_lineno(lineno):
if not isinstance(lineno, int):
raise TypeError("lineno must be an int")
if lineno < 1:
raise ValueError("invalid lineno")
class SetLineno:
__slots__ = ("_lineno",)
def __init__(self, lineno):
_check_lineno(lineno)
self._lineno = lineno
@property
def lineno(self):
return self._lineno
def __eq__(self, other):
if not isinstance(other, SetLineno):
return False
return self._lineno == other._lineno
class Label:
__slots__ = ()
class _Variable:
__slots__ = ("name",)
def __init__(self, name):
self.name = name
def __eq__(self, other):
if type(self) != type(other):
return False
return self.name == other.name
def __str__(self):
return self.name
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.name)
class CellVar(_Variable):
__slots__ = ()
class FreeVar(_Variable):
__slots__ = ()
def _check_arg_int(name, arg):
if not isinstance(arg, int):
raise TypeError(
"operation %s argument must be an int, "
"got %s" % (name, type(arg).__name__)
)
if not (0 <= arg <= 2147483647):
raise ValueError(
"operation %s argument must be in " "the range 0..2,147,483,647" % name
)
if sys.version_info < (3, 8):
_stack_effects = {
# NOTE: the entries are all 2-tuples. Entry[0/False] is non-taken jumps.
# Entry[1/True] is for taken jumps.
# opcodes not in dis.stack_effect
_opcode.opmap["EXTENDED_ARG"]: (0, 0),
_opcode.opmap["NOP"]: (0, 0),
# Jump taken/not-taken are different:
_opcode.opmap["JUMP_IF_TRUE_OR_POP"]: (-1, 0),
_opcode.opmap["JUMP_IF_FALSE_OR_POP"]: (-1, 0),
_opcode.opmap["FOR_ITER"]: (1, -1),
_opcode.opmap["SETUP_WITH"]: (1, 6),
_opcode.opmap["SETUP_ASYNC_WITH"]: (0, 5),
_opcode.opmap["SETUP_EXCEPT"]: (0, 6), # as of 3.7, below for <=3.6
_opcode.opmap["SETUP_FINALLY"]: (0, 6), # as of 3.7, below for <=3.6
}
# More stack effect values that are unique to the version of Python.
if sys.version_info < (3, 7):
_stack_effects.update(
{
_opcode.opmap["SETUP_WITH"]: (7, 7),
_opcode.opmap["SETUP_EXCEPT"]: (6, 9),
_opcode.opmap["SETUP_FINALLY"]: (6, 9),
}
)
class Instr:
"""Abstract instruction."""
__slots__ = ("_name", "_opcode", "_arg", "_lineno", "offset")
def __init__(self, name, arg=UNSET, *, lineno=None, offset=None):
self._set(name, arg, lineno)
self.offset = offset
def _check_arg(self, name, opcode, arg):
if name == "EXTENDED_ARG":
raise ValueError(
"only concrete instruction can contain EXTENDED_ARG, "
"highlevel instruction can represent arbitrary argument without it"
)
if opcode >= _opcode.HAVE_ARGUMENT:
if arg is UNSET:
raise ValueError("operation %s requires an argument" % name)
else:
if arg is not UNSET:
raise ValueError("operation %s has no argument" % name)
if self._has_jump(opcode):
if not isinstance(arg, (Label, _bytecode.BasicBlock)):
raise TypeError(
"operation %s argument type must be "
"Label or BasicBlock, got %s" % (name, type(arg).__name__)
)
elif opcode in _opcode.hasfree:
if not isinstance(arg, (CellVar, FreeVar)):
raise TypeError(
"operation %s argument must be CellVar "
"or FreeVar, got %s" % (name, type(arg).__name__)
)
elif opcode in _opcode.haslocal or opcode in _opcode.hasname:
if not isinstance(arg, str):
raise TypeError(
"operation %s argument must be a str, "
"got %s" % (name, type(arg).__name__)
)
elif opcode in _opcode.hasconst:
if isinstance(arg, Label):
raise ValueError(
"label argument cannot be used " "in %s operation" % name
)
if isinstance(arg, _bytecode.BasicBlock):
raise ValueError(
"block argument cannot be used " "in %s operation" % name
)
elif opcode in _opcode.hascompare:
if not isinstance(arg, Compare):
raise TypeError(
"operation %s argument type must be "
"Compare, got %s" % (name, type(arg).__name__)
)
elif opcode >= _opcode.HAVE_ARGUMENT:
_check_arg_int(name, arg)
def _set(self, name, arg, lineno):
if not isinstance(name, str):
raise TypeError("operation name must be a str")
try:
opcode = _opcode.opmap[name]
except KeyError:
raise ValueError("invalid operation name")
# check lineno
if lineno is not None:
_check_lineno(lineno)
self._check_arg(name, opcode, arg)
self._name = name
self._opcode = opcode
self._arg = arg
self._lineno = lineno
def set(self, name, arg=UNSET):
"""Modify the instruction in-place.
Replace name and arg attributes. Don't modify lineno.
"""
self._set(name, arg, self._lineno)
def require_arg(self):
"""Does the instruction require an argument?"""
return self._opcode >= _opcode.HAVE_ARGUMENT
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._set(name, self._arg, self._lineno)
@property
def opcode(self):
return self._opcode
@opcode.setter
def opcode(self, op):
if not isinstance(op, int):
raise TypeError("operator code must be an int")
if 0 <= op <= 255:
name = _opcode.opname[op]
valid = name != "<%r>" % op
else:
valid = False
if not valid:
raise ValueError("invalid operator code")
self._set(name, self._arg, self._lineno)
@property
def arg(self):
return self._arg
@arg.setter
def arg(self, arg):
self._set(self._name, arg, self._lineno)
@property
def lineno(self):
return self._lineno
@lineno.setter
def lineno(self, lineno):
self._set(self._name, self._arg, lineno)
def stack_effect(self, jump=None):
if self._opcode < _opcode.HAVE_ARGUMENT:
arg = None
elif not isinstance(self._arg, int) or self._opcode in _opcode.hasconst:
# Argument is either a non-integer or an integer constant,
# not oparg.
arg = 0
else:
arg = self._arg
if sys.version_info < (3, 8):
effect = _stack_effects.get(self._opcode, None)
if effect is not None:
return max(effect) if jump is None else effect[jump]
return dis.stack_effect(self._opcode, arg)
else:
return dis.stack_effect(self._opcode, arg, jump=jump)
def pre_and_post_stack_effect(self, jump=None):
_effect = self.stack_effect(jump=jump)
# To compute pre size and post size to avoid segfault cause by not enough
# stack element
_opname = _opcode.opname[self._opcode]
if _opname.startswith("DUP_TOP"):
return _effect * -1, _effect * 2
if _pushes_back(_opname):
# if the op pushes value back to the stack, then the stack effect given
# by dis.stack_effect actually equals pre + post effect, therefore we need
# -1 from the stack effect as a pre condition
return _effect - 1, 1
if _opname.startswith("UNPACK_"):
# Instr(UNPACK_* , n) pops 1 and pushes n
# _effect = n - 1
# hence we return -1, _effect + 1
return -1, _effect + 1
if _opname == "FOR_ITER" and not jump:
# Since FOR_ITER needs TOS to be an iterator, which basically means
# a prerequisite of 1 on the stack
return -1, 2
if _opname == "ROT_N":
return (-self._arg, self._arg)
return {"ROT_TWO": (-2, 2), "ROT_THREE": (-3, 3), "ROT_FOUR": (-4, 4)}.get(
_opname, (_effect, 0)
)
def copy(self):
return self.__class__(self._name, self._arg, lineno=self._lineno, offset=self.offset)
def __repr__(self):
if self._arg is not UNSET:
return "<%s arg=%r lineno=%s>" % (self._name, self._arg, self._lineno)
else:
return "<%s lineno=%s>" % (self._name, self._lineno)
def _cmp_key(self, labels=None):
arg = self._arg
if self._opcode in _opcode.hasconst:
arg = const_key(arg)
elif isinstance(arg, Label) and labels is not None:
arg = labels[arg]
return (self._lineno, self._name, arg)
def __eq__(self, other):
if type(self) != type(other):
return False
return self._cmp_key() == other._cmp_key()
@staticmethod
def _has_jump(opcode):
return opcode in _opcode.hasjrel or opcode in _opcode.hasjabs
def has_jump(self):
return self._has_jump(self._opcode)
def is_cond_jump(self):
"""Is a conditional jump?"""
# Ex: POP_JUMP_IF_TRUE, JUMP_IF_FALSE_OR_POP
return "JUMP_IF_" in self._name
def is_uncond_jump(self):
"""Is an unconditional jump?"""
return self.name in {"JUMP_FORWARD", "JUMP_ABSOLUTE"}
def is_final(self):
if self._name in {
"RETURN_VALUE",
"RAISE_VARARGS",
"RERAISE",
"BREAK_LOOP",
"CONTINUE_LOOP",
}:
return True
if self.is_uncond_jump():
return True
return False
| 11,721 | Python | 28.526448 | 93 | 0.534255 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/concrete.py | import dis
import inspect
import opcode as _opcode
import struct
import sys
import types
# alias to keep the 'bytecode' variable free
from _pydevd_frame_eval.vendored import bytecode as _bytecode
from _pydevd_frame_eval.vendored.bytecode.instr import (
UNSET,
Instr,
Label,
SetLineno,
FreeVar,
CellVar,
Compare,
const_key,
_check_arg_int,
)
# - jumps use instruction
# - lineno use bytes (dis.findlinestarts(code))
# - dis displays bytes
OFFSET_AS_INSTRUCTION = sys.version_info >= (3, 10)
def _set_docstring(code, consts):
if not consts:
return
first_const = consts[0]
if isinstance(first_const, str) or first_const is None:
code.docstring = first_const
class ConcreteInstr(Instr):
"""Concrete instruction.
arg must be an integer in the range 0..2147483647.
It has a read-only size attribute.
"""
__slots__ = ("_size", "_extended_args", "offset")
def __init__(self, name, arg=UNSET, *, lineno=None, extended_args=None, offset=None):
# Allow to remember a potentially meaningless EXTENDED_ARG emitted by
# Python to properly compute the size and avoid messing up the jump
# targets
self._extended_args = extended_args
self._set(name, arg, lineno)
self.offset = offset
def _check_arg(self, name, opcode, arg):
if opcode >= _opcode.HAVE_ARGUMENT:
if arg is UNSET:
raise ValueError("operation %s requires an argument" % name)
_check_arg_int(name, arg)
else:
if arg is not UNSET:
raise ValueError("operation %s has no argument" % name)
def _set(self, name, arg, lineno):
super()._set(name, arg, lineno)
size = 2
if arg is not UNSET:
while arg > 0xFF:
size += 2
arg >>= 8
if self._extended_args is not None:
size = 2 + 2 * self._extended_args
self._size = size
@property
def size(self):
return self._size
def _cmp_key(self, labels=None):
return (self._lineno, self._name, self._arg)
def get_jump_target(self, instr_offset):
if self._opcode in _opcode.hasjrel:
s = (self._size // 2) if OFFSET_AS_INSTRUCTION else self._size
return instr_offset + s + self._arg
if self._opcode in _opcode.hasjabs:
return self._arg
return None
def assemble(self):
if self._arg is UNSET:
return bytes((self._opcode, 0))
arg = self._arg
b = [self._opcode, arg & 0xFF]
while arg > 0xFF:
arg >>= 8
b[:0] = [_opcode.EXTENDED_ARG, arg & 0xFF]
if self._extended_args:
while len(b) < self._size:
b[:0] = [_opcode.EXTENDED_ARG, 0x00]
return bytes(b)
@classmethod
def disassemble(cls, lineno, code, offset):
index = 2 * offset if OFFSET_AS_INSTRUCTION else offset
op = code[index]
if op >= _opcode.HAVE_ARGUMENT:
arg = code[index + 1]
else:
arg = UNSET
name = _opcode.opname[op]
# fabioz: added offset to ConcreteBytecode
# Need to keep an eye on https://github.com/MatthieuDartiailh/bytecode/issues/48 in
# case the library decides to add this in some other way.
return cls(name, arg, lineno=lineno, offset=index)
class ConcreteBytecode(_bytecode._BaseBytecodeList):
def __init__(self, instructions=(), *, consts=(), names=(), varnames=()):
super().__init__()
self.consts = list(consts)
self.names = list(names)
self.varnames = list(varnames)
for instr in instructions:
self._check_instr(instr)
self.extend(instructions)
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
if not isinstance(instr, (ConcreteInstr, SetLineno)):
raise ValueError(
"ConcreteBytecode must only contain "
"ConcreteInstr and SetLineno objects, "
"but %s was found" % type(instr).__name__
)
def _copy_attr_from(self, bytecode):
super()._copy_attr_from(bytecode)
if isinstance(bytecode, ConcreteBytecode):
self.consts = bytecode.consts
self.names = bytecode.names
self.varnames = bytecode.varnames
def __repr__(self):
return "<ConcreteBytecode instr#=%s>" % len(self)
def __eq__(self, other):
if type(self) != type(other):
return False
const_keys1 = list(map(const_key, self.consts))
const_keys2 = list(map(const_key, other.consts))
if const_keys1 != const_keys2:
return False
if self.names != other.names:
return False
if self.varnames != other.varnames:
return False
return super().__eq__(other)
@staticmethod
def from_code(code, *, extended_arg=False):
line_starts = dict(dis.findlinestarts(code))
# find block starts
instructions = []
offset = 0
lineno = code.co_firstlineno
while offset < (len(code.co_code) // (2 if OFFSET_AS_INSTRUCTION else 1)):
lineno_off = (2 * offset) if OFFSET_AS_INSTRUCTION else offset
if lineno_off in line_starts:
lineno = line_starts[lineno_off]
instr = ConcreteInstr.disassemble(lineno, code.co_code, offset)
instructions.append(instr)
offset += (instr.size // 2) if OFFSET_AS_INSTRUCTION else instr.size
bytecode = ConcreteBytecode()
# replace jump targets with blocks
# HINT : in some cases Python generate useless EXTENDED_ARG opcode
# with a value of zero. Such opcodes do not increases the size of the
# following opcode the way a normal EXTENDED_ARG does. As a
# consequence, they need to be tracked manually as otherwise the
# offsets in jump targets can end up being wrong.
if not extended_arg:
# The list is modified in place
bytecode._remove_extended_args(instructions)
bytecode.name = code.co_name
bytecode.filename = code.co_filename
bytecode.flags = code.co_flags
bytecode.argcount = code.co_argcount
if sys.version_info >= (3, 8):
bytecode.posonlyargcount = code.co_posonlyargcount
bytecode.kwonlyargcount = code.co_kwonlyargcount
bytecode.first_lineno = code.co_firstlineno
bytecode.names = list(code.co_names)
bytecode.consts = list(code.co_consts)
bytecode.varnames = list(code.co_varnames)
bytecode.freevars = list(code.co_freevars)
bytecode.cellvars = list(code.co_cellvars)
_set_docstring(bytecode, code.co_consts)
bytecode[:] = instructions
return bytecode
@staticmethod
def _normalize_lineno(instructions, first_lineno):
lineno = first_lineno
for instr in instructions:
# if instr.lineno is not set, it's inherited from the previous
# instruction, or from self.first_lineno
if instr.lineno is not None:
lineno = instr.lineno
if isinstance(instr, ConcreteInstr):
yield (lineno, instr)
def _assemble_code(self):
offset = 0
code_str = []
linenos = []
for lineno, instr in self._normalize_lineno(self, self.first_lineno):
code_str.append(instr.assemble())
i_size = instr.size
linenos.append(
((offset * 2) if OFFSET_AS_INSTRUCTION else offset, i_size, lineno)
)
offset += (i_size // 2) if OFFSET_AS_INSTRUCTION else i_size
code_str = b"".join(code_str)
return (code_str, linenos)
@staticmethod
def _assemble_lnotab(first_lineno, linenos):
lnotab = []
old_offset = 0
old_lineno = first_lineno
for offset, _, lineno in linenos:
dlineno = lineno - old_lineno
if dlineno == 0:
continue
# FIXME: be kind, force monotonic line numbers? add an option?
if dlineno < 0 and sys.version_info < (3, 6):
raise ValueError(
"negative line number delta is not supported " "on Python < 3.6"
)
old_lineno = lineno
doff = offset - old_offset
old_offset = offset
while doff > 255:
lnotab.append(b"\xff\x00")
doff -= 255
while dlineno < -128:
lnotab.append(struct.pack("Bb", doff, -128))
doff = 0
dlineno -= -128
while dlineno > 127:
lnotab.append(struct.pack("Bb", doff, 127))
doff = 0
dlineno -= 127
assert 0 <= doff <= 255
assert -128 <= dlineno <= 127
lnotab.append(struct.pack("Bb", doff, dlineno))
return b"".join(lnotab)
@staticmethod
def _pack_linetable(doff, dlineno, linetable):
while dlineno < -127:
linetable.append(struct.pack("Bb", 0, -127))
dlineno -= -127
while dlineno > 127:
linetable.append(struct.pack("Bb", 0, 127))
dlineno -= 127
if doff > 254:
linetable.append(struct.pack("Bb", 254, dlineno))
doff -= 254
while doff > 254:
linetable.append(b"\xfe\x00")
doff -= 254
linetable.append(struct.pack("Bb", doff, 0))
else:
linetable.append(struct.pack("Bb", doff, dlineno))
assert 0 <= doff <= 254
assert -127 <= dlineno <= 127
def _assemble_linestable(self, first_lineno, linenos):
if not linenos:
return b""
linetable = []
old_offset = 0
iter_in = iter(linenos)
offset, i_size, old_lineno = next(iter_in)
old_dlineno = old_lineno - first_lineno
for offset, i_size, lineno in iter_in:
dlineno = lineno - old_lineno
if dlineno == 0:
continue
old_lineno = lineno
doff = offset - old_offset
old_offset = offset
self._pack_linetable(doff, old_dlineno, linetable)
old_dlineno = dlineno
# Pack the line of the last instruction.
doff = offset + i_size - old_offset
self._pack_linetable(doff, old_dlineno, linetable)
return b"".join(linetable)
@staticmethod
def _remove_extended_args(instructions):
# replace jump targets with blocks
# HINT : in some cases Python generate useless EXTENDED_ARG opcode
# with a value of zero. Such opcodes do not increases the size of the
# following opcode the way a normal EXTENDED_ARG does. As a
# consequence, they need to be tracked manually as otherwise the
# offsets in jump targets can end up being wrong.
nb_extended_args = 0
extended_arg = None
index = 0
while index < len(instructions):
instr = instructions[index]
# Skip SetLineno meta instruction
if isinstance(instr, SetLineno):
index += 1
continue
if instr.name == "EXTENDED_ARG":
nb_extended_args += 1
if extended_arg is not None:
extended_arg = (extended_arg << 8) + instr.arg
else:
extended_arg = instr.arg
del instructions[index]
continue
if extended_arg is not None:
arg = (extended_arg << 8) + instr.arg
extended_arg = None
instr = ConcreteInstr(
instr.name,
arg,
lineno=instr.lineno,
extended_args=nb_extended_args,
offset=instr.offset,
)
instructions[index] = instr
nb_extended_args = 0
index += 1
if extended_arg is not None:
raise ValueError("EXTENDED_ARG at the end of the code")
def compute_stacksize(self, *, check_pre_and_post=True):
bytecode = self.to_bytecode()
cfg = _bytecode.ControlFlowGraph.from_bytecode(bytecode)
return cfg.compute_stacksize(check_pre_and_post=check_pre_and_post)
def to_code(self, stacksize=None, *, check_pre_and_post=True):
code_str, linenos = self._assemble_code()
lnotab = (
self._assemble_linestable(self.first_lineno, linenos)
if sys.version_info >= (3, 10)
else self._assemble_lnotab(self.first_lineno, linenos)
)
nlocals = len(self.varnames)
if stacksize is None:
stacksize = self.compute_stacksize(check_pre_and_post=check_pre_and_post)
if sys.version_info < (3, 8):
return types.CodeType(
self.argcount,
self.kwonlyargcount,
nlocals,
stacksize,
int(self.flags),
code_str,
tuple(self.consts),
tuple(self.names),
tuple(self.varnames),
self.filename,
self.name,
self.first_lineno,
lnotab,
tuple(self.freevars),
tuple(self.cellvars),
)
else:
return types.CodeType(
self.argcount,
self.posonlyargcount,
self.kwonlyargcount,
nlocals,
stacksize,
int(self.flags),
code_str,
tuple(self.consts),
tuple(self.names),
tuple(self.varnames),
self.filename,
self.name,
self.first_lineno,
lnotab,
tuple(self.freevars),
tuple(self.cellvars),
)
def to_bytecode(self):
# Copy instruction and remove extended args if any (in-place)
c_instructions = self[:]
self._remove_extended_args(c_instructions)
# find jump targets
jump_targets = set()
offset = 0
for instr in c_instructions:
if isinstance(instr, SetLineno):
continue
target = instr.get_jump_target(offset)
if target is not None:
jump_targets.add(target)
offset += (instr.size // 2) if OFFSET_AS_INSTRUCTION else instr.size
# create labels
jumps = []
instructions = []
labels = {}
offset = 0
ncells = len(self.cellvars)
for lineno, instr in self._normalize_lineno(c_instructions, self.first_lineno):
if offset in jump_targets:
label = Label()
labels[offset] = label
instructions.append(label)
jump_target = instr.get_jump_target(offset)
size = instr.size
arg = instr.arg
# FIXME: better error reporting
if instr.opcode in _opcode.hasconst:
arg = self.consts[arg]
elif instr.opcode in _opcode.haslocal:
arg = self.varnames[arg]
elif instr.opcode in _opcode.hasname:
arg = self.names[arg]
elif instr.opcode in _opcode.hasfree:
if arg < ncells:
name = self.cellvars[arg]
arg = CellVar(name)
else:
name = self.freevars[arg - ncells]
arg = FreeVar(name)
elif instr.opcode in _opcode.hascompare:
arg = Compare(arg)
if jump_target is None:
instr = Instr(instr.name, arg, lineno=lineno, offset=instr.offset)
else:
instr_index = len(instructions)
instructions.append(instr)
offset += (size // 2) if OFFSET_AS_INSTRUCTION else size
if jump_target is not None:
jumps.append((instr_index, jump_target))
# replace jump targets with labels
for index, jump_target in jumps:
instr = instructions[index]
# FIXME: better error reporting on missing label
label = labels[jump_target]
instructions[index] = Instr(instr.name, label, lineno=instr.lineno, offset=instr.offset)
bytecode = _bytecode.Bytecode()
bytecode._copy_attr_from(self)
nargs = bytecode.argcount + bytecode.kwonlyargcount
if sys.version_info > (3, 8):
nargs += bytecode.posonlyargcount
if bytecode.flags & inspect.CO_VARARGS:
nargs += 1
if bytecode.flags & inspect.CO_VARKEYWORDS:
nargs += 1
bytecode.argnames = self.varnames[:nargs]
_set_docstring(bytecode, self.consts)
bytecode.extend(instructions)
return bytecode
class _ConvertBytecodeToConcrete:
# Default number of passes of compute_jumps() before giving up. Refer to
# assemble_jump_offsets() in compile.c for background.
_compute_jumps_passes = 10
def __init__(self, code):
assert isinstance(code, _bytecode.Bytecode)
self.bytecode = code
# temporary variables
self.instructions = []
self.jumps = []
self.labels = {}
# used to build ConcreteBytecode() object
self.consts_indices = {}
self.consts_list = []
self.names = []
self.varnames = []
def add_const(self, value):
key = const_key(value)
if key in self.consts_indices:
return self.consts_indices[key]
index = len(self.consts_indices)
self.consts_indices[key] = index
self.consts_list.append(value)
return index
@staticmethod
def add(names, name):
try:
index = names.index(name)
except ValueError:
index = len(names)
names.append(name)
return index
def concrete_instructions(self):
ncells = len(self.bytecode.cellvars)
lineno = self.bytecode.first_lineno
for instr in self.bytecode:
if isinstance(instr, Label):
self.labels[instr] = len(self.instructions)
continue
if isinstance(instr, SetLineno):
lineno = instr.lineno
continue
if isinstance(instr, ConcreteInstr):
instr = instr.copy()
else:
assert isinstance(instr, Instr)
if instr.lineno is not None:
lineno = instr.lineno
arg = instr.arg
is_jump = isinstance(arg, Label)
if is_jump:
label = arg
# fake value, real value is set in compute_jumps()
arg = 0
elif instr.opcode in _opcode.hasconst:
arg = self.add_const(arg)
elif instr.opcode in _opcode.haslocal:
arg = self.add(self.varnames, arg)
elif instr.opcode in _opcode.hasname:
arg = self.add(self.names, arg)
elif instr.opcode in _opcode.hasfree:
if isinstance(arg, CellVar):
arg = self.bytecode.cellvars.index(arg.name)
else:
assert isinstance(arg, FreeVar)
arg = ncells + self.bytecode.freevars.index(arg.name)
elif instr.opcode in _opcode.hascompare:
if isinstance(arg, Compare):
arg = arg.value
instr = ConcreteInstr(instr.name, arg, lineno=lineno)
if is_jump:
self.jumps.append((len(self.instructions), label, instr))
self.instructions.append(instr)
def compute_jumps(self):
offsets = []
offset = 0
for index, instr in enumerate(self.instructions):
offsets.append(offset)
offset += instr.size // 2 if OFFSET_AS_INSTRUCTION else instr.size
# needed if a label is at the end
offsets.append(offset)
# fix argument of jump instructions: resolve labels
modified = False
for index, label, instr in self.jumps:
target_index = self.labels[label]
target_offset = offsets[target_index]
if instr.opcode in _opcode.hasjrel:
instr_offset = offsets[index]
target_offset -= instr_offset + (
instr.size // 2 if OFFSET_AS_INSTRUCTION else instr.size
)
old_size = instr.size
# FIXME: better error report if target_offset is negative
instr.arg = target_offset
if instr.size != old_size:
modified = True
return modified
def to_concrete_bytecode(self, compute_jumps_passes=None):
if compute_jumps_passes is None:
compute_jumps_passes = self._compute_jumps_passes
first_const = self.bytecode.docstring
if first_const is not UNSET:
self.add_const(first_const)
self.varnames.extend(self.bytecode.argnames)
self.concrete_instructions()
for pas in range(0, compute_jumps_passes):
modified = self.compute_jumps()
if not modified:
break
else:
raise RuntimeError(
"compute_jumps() failed to converge after" " %d passes" % (pas + 1)
)
concrete = ConcreteBytecode(
self.instructions,
consts=self.consts_list.copy(),
names=self.names,
varnames=self.varnames,
)
concrete._copy_attr_from(self.bytecode)
return concrete
| 22,299 | Python | 32.086053 | 100 | 0.546033 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/__init__.py | __version__ = "0.13.0.dev"
__all__ = [
"Label",
"Instr",
"SetLineno",
"Bytecode",
"ConcreteInstr",
"ConcreteBytecode",
"ControlFlowGraph",
"CompilerFlags",
"Compare",
]
from _pydevd_frame_eval.vendored.bytecode.flags import CompilerFlags
from _pydevd_frame_eval.vendored.bytecode.instr import (
UNSET,
Label,
SetLineno,
Instr,
CellVar,
FreeVar, # noqa
Compare,
)
from _pydevd_frame_eval.vendored.bytecode.bytecode import (
BaseBytecode,
_BaseBytecodeList,
_InstrList,
Bytecode,
) # noqa
from _pydevd_frame_eval.vendored.bytecode.concrete import (
ConcreteInstr,
ConcreteBytecode, # noqa
# import needed to use it in bytecode.py
_ConvertBytecodeToConcrete,
)
from _pydevd_frame_eval.vendored.bytecode.cfg import BasicBlock, ControlFlowGraph # noqa
import sys
def dump_bytecode(bytecode, *, lineno=False, stream=sys.stdout):
def format_line(index, line):
nonlocal cur_lineno, prev_lineno
if lineno:
if cur_lineno != prev_lineno:
line = "L.% 3s % 3s: %s" % (cur_lineno, index, line)
prev_lineno = cur_lineno
else:
line = " % 3s: %s" % (index, line)
else:
line = line
return line
def format_instr(instr, labels=None):
text = instr.name
arg = instr._arg
if arg is not UNSET:
if isinstance(arg, Label):
try:
arg = "<%s>" % labels[arg]
except KeyError:
arg = "<error: unknown label>"
elif isinstance(arg, BasicBlock):
try:
arg = "<%s>" % labels[id(arg)]
except KeyError:
arg = "<error: unknown block>"
else:
arg = repr(arg)
text = "%s %s" % (text, arg)
return text
indent = " " * 4
cur_lineno = bytecode.first_lineno
prev_lineno = None
if isinstance(bytecode, ConcreteBytecode):
offset = 0
for instr in bytecode:
fields = []
if instr.lineno is not None:
cur_lineno = instr.lineno
if lineno:
fields.append(format_instr(instr))
line = "".join(fields)
line = format_line(offset, line)
else:
fields.append("% 3s %s" % (offset, format_instr(instr)))
line = "".join(fields)
print(line, file=stream)
offset += instr.size
elif isinstance(bytecode, Bytecode):
labels = {}
for index, instr in enumerate(bytecode):
if isinstance(instr, Label):
labels[instr] = "label_instr%s" % index
for index, instr in enumerate(bytecode):
if isinstance(instr, Label):
label = labels[instr]
line = "%s:" % label
if index != 0:
print(file=stream)
else:
if instr.lineno is not None:
cur_lineno = instr.lineno
line = format_instr(instr, labels)
line = indent + format_line(index, line)
print(line, file=stream)
print(file=stream)
elif isinstance(bytecode, ControlFlowGraph):
labels = {}
for block_index, block in enumerate(bytecode, 1):
labels[id(block)] = "block%s" % block_index
for block_index, block in enumerate(bytecode, 1):
print("%s:" % labels[id(block)], file=stream)
prev_lineno = None
for index, instr in enumerate(block):
if instr.lineno is not None:
cur_lineno = instr.lineno
line = format_instr(instr, labels)
line = indent + format_line(index, line)
print(line, file=stream)
if block.next_block is not None:
print(indent + "-> %s" % labels[id(block.next_block)], file=stream)
print(file=stream)
else:
raise TypeError("unknown bytecode class")
| 4,152 | Python | 30.70229 | 89 | 0.526734 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/bytecode.py | # alias to keep the 'bytecode' variable free
import sys
from _pydevd_frame_eval.vendored import bytecode as _bytecode
from _pydevd_frame_eval.vendored.bytecode.instr import UNSET, Label, SetLineno, Instr
from _pydevd_frame_eval.vendored.bytecode.flags import infer_flags
class BaseBytecode:
def __init__(self):
self.argcount = 0
if sys.version_info > (3, 8):
self.posonlyargcount = 0
self.kwonlyargcount = 0
self.first_lineno = 1
self.name = "<module>"
self.filename = "<string>"
self.docstring = UNSET
self.cellvars = []
# we cannot recreate freevars from instructions because of super()
# special-case
self.freevars = []
self._flags = _bytecode.CompilerFlags(0)
def _copy_attr_from(self, bytecode):
self.argcount = bytecode.argcount
if sys.version_info > (3, 8):
self.posonlyargcount = bytecode.posonlyargcount
self.kwonlyargcount = bytecode.kwonlyargcount
self.flags = bytecode.flags
self.first_lineno = bytecode.first_lineno
self.name = bytecode.name
self.filename = bytecode.filename
self.docstring = bytecode.docstring
self.cellvars = list(bytecode.cellvars)
self.freevars = list(bytecode.freevars)
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argcount != other.argcount:
return False
if sys.version_info > (3, 8):
if self.posonlyargcount != other.posonlyargcount:
return False
if self.kwonlyargcount != other.kwonlyargcount:
return False
if self.flags != other.flags:
return False
if self.first_lineno != other.first_lineno:
return False
if self.filename != other.filename:
return False
if self.name != other.name:
return False
if self.docstring != other.docstring:
return False
if self.cellvars != other.cellvars:
return False
if self.freevars != other.freevars:
return False
if self.compute_stacksize() != other.compute_stacksize():
return False
return True
@property
def flags(self):
return self._flags
@flags.setter
def flags(self, value):
if not isinstance(value, _bytecode.CompilerFlags):
value = _bytecode.CompilerFlags(value)
self._flags = value
def update_flags(self, *, is_async=None):
self.flags = infer_flags(self, is_async)
class _BaseBytecodeList(BaseBytecode, list):
"""List subclass providing type stable slicing and copying."""
def __getitem__(self, index):
value = super().__getitem__(index)
if isinstance(index, slice):
value = type(self)(value)
value._copy_attr_from(self)
return value
def copy(self):
new = type(self)(super().copy())
new._copy_attr_from(self)
return new
def legalize(self):
"""Check that all the element of the list are valid and remove SetLineno."""
lineno_pos = []
set_lineno = None
current_lineno = self.first_lineno
for pos, instr in enumerate(self):
if isinstance(instr, SetLineno):
set_lineno = instr.lineno
lineno_pos.append(pos)
continue
# Filter out Labels
if not isinstance(instr, Instr):
continue
if set_lineno is not None:
instr.lineno = set_lineno
elif instr.lineno is None:
instr.lineno = current_lineno
else:
current_lineno = instr.lineno
for i in reversed(lineno_pos):
del self[i]
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
raise NotImplementedError()
class _InstrList(list):
def _flat(self):
instructions = []
labels = {}
jumps = []
offset = 0
for index, instr in enumerate(self):
if isinstance(instr, Label):
instructions.append("label_instr%s" % index)
labels[instr] = offset
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
target_label = instr.arg
instr = _bytecode.ConcreteInstr(instr.name, 0, lineno=instr.lineno)
jumps.append((target_label, instr))
instructions.append(instr)
offset += 1
for target_label, instr in jumps:
instr.arg = labels[target_label]
return instructions
def __eq__(self, other):
if not isinstance(other, _InstrList):
other = _InstrList(other)
return self._flat() == other._flat()
class Bytecode(_InstrList, _BaseBytecodeList):
def __init__(self, instructions=()):
BaseBytecode.__init__(self)
self.argnames = []
for instr in instructions:
self._check_instr(instr)
self.extend(instructions)
def __iter__(self):
instructions = super().__iter__()
for instr in instructions:
self._check_instr(instr)
yield instr
def _check_instr(self, instr):
if not isinstance(instr, (Label, SetLineno, Instr)):
raise ValueError(
"Bytecode must only contain Label, "
"SetLineno, and Instr objects, "
"but %s was found" % type(instr).__name__
)
def _copy_attr_from(self, bytecode):
super()._copy_attr_from(bytecode)
if isinstance(bytecode, Bytecode):
self.argnames = bytecode.argnames
@staticmethod
def from_code(code):
concrete = _bytecode.ConcreteBytecode.from_code(code)
return concrete.to_bytecode()
def compute_stacksize(self, *, check_pre_and_post=True):
cfg = _bytecode.ControlFlowGraph.from_bytecode(self)
return cfg.compute_stacksize(check_pre_and_post=check_pre_and_post)
def to_code(
self, compute_jumps_passes=None, stacksize=None, *, check_pre_and_post=True
):
# Prevent reconverting the concrete bytecode to bytecode and cfg to do the
# calculation if we need to do it.
if stacksize is None:
stacksize = self.compute_stacksize(check_pre_and_post=check_pre_and_post)
bc = self.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)
return bc.to_code(stacksize=stacksize)
def to_concrete_bytecode(self, compute_jumps_passes=None):
converter = _bytecode._ConvertBytecodeToConcrete(self)
return converter.to_concrete_bytecode(compute_jumps_passes=compute_jumps_passes)
| 6,983 | Python | 32.099526 | 88 | 0.587427 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/cfg.py | import sys
# alias to keep the 'bytecode' variable free
from _pydevd_frame_eval.vendored import bytecode as _bytecode
from _pydevd_frame_eval.vendored.bytecode.concrete import ConcreteInstr
from _pydevd_frame_eval.vendored.bytecode.flags import CompilerFlags
from _pydevd_frame_eval.vendored.bytecode.instr import Label, SetLineno, Instr
class BasicBlock(_bytecode._InstrList):
def __init__(self, instructions=None):
# a BasicBlock object, or None
self.next_block = None
if instructions:
super().__init__(instructions)
def __iter__(self):
index = 0
while index < len(self):
instr = self[index]
index += 1
if not isinstance(instr, (SetLineno, Instr)):
raise ValueError(
"BasicBlock must only contain SetLineno and Instr objects, "
"but %s was found" % instr.__class__.__name__
)
if isinstance(instr, Instr) and instr.has_jump():
if index < len(self):
raise ValueError(
"Only the last instruction of a basic " "block can be a jump"
)
if not isinstance(instr.arg, BasicBlock):
raise ValueError(
"Jump target must a BasicBlock, got %s",
type(instr.arg).__name__,
)
yield instr
def __getitem__(self, index):
value = super().__getitem__(index)
if isinstance(index, slice):
value = type(self)(value)
value.next_block = self.next_block
return value
def copy(self):
new = type(self)(super().copy())
new.next_block = self.next_block
return new
def legalize(self, first_lineno):
"""Check that all the element of the list are valid and remove SetLineno."""
lineno_pos = []
set_lineno = None
current_lineno = first_lineno
for pos, instr in enumerate(self):
if isinstance(instr, SetLineno):
set_lineno = current_lineno = instr.lineno
lineno_pos.append(pos)
continue
if set_lineno is not None:
instr.lineno = set_lineno
elif instr.lineno is None:
instr.lineno = current_lineno
else:
current_lineno = instr.lineno
for i in reversed(lineno_pos):
del self[i]
return current_lineno
def get_jump(self):
if not self:
return None
last_instr = self[-1]
if not (isinstance(last_instr, Instr) and last_instr.has_jump()):
return None
target_block = last_instr.arg
assert isinstance(target_block, BasicBlock)
return target_block
def _compute_stack_size(block, size, maxsize, *, check_pre_and_post=True):
"""Generator used to reduce the use of function stacks.
This allows to avoid nested recursion and allow to treat more cases.
HOW-TO:
Following the methods of Trampoline
(see https://en.wikipedia.org/wiki/Trampoline_(computing)),
We yield either:
- the arguments that would be used in the recursive calls, i.e,
'yield block, size, maxsize' instead of making a recursive call
'_compute_stack_size(block, size, maxsize)', if we encounter an
instruction jumping to another block or if the block is linked to
another one (ie `next_block` is set)
- the required stack from the stack if we went through all the instructions
or encountered an unconditional jump.
In the first case, the calling function is then responsible for creating a
new generator with those arguments, iterating over it till exhaustion to
determine the stacksize required by the block and resuming this function
with the determined stacksize.
"""
# If the block is currently being visited (seen = True) or if it was visited
# previously by using a larger starting size than the one in use, return the
# maxsize.
if block.seen or block.startsize >= size:
yield maxsize
def update_size(pre_delta, post_delta, size, maxsize):
size += pre_delta
if size < 0:
msg = "Failed to compute stacksize, got negative size"
raise RuntimeError(msg)
size += post_delta
maxsize = max(maxsize, size)
return size, maxsize
# Prevent recursive visit of block if two blocks are nested (jump from one
# to the other).
block.seen = True
block.startsize = size
for instr in block:
# Ignore SetLineno
if isinstance(instr, SetLineno):
continue
# For instructions with a jump first compute the stacksize required when the
# jump is taken.
if instr.has_jump():
effect = (
instr.pre_and_post_stack_effect(jump=True)
if check_pre_and_post
else (instr.stack_effect(jump=True), 0)
)
taken_size, maxsize = update_size(*effect, size, maxsize)
# Yield the parameters required to compute the stacksize required
# by the block to which the jumnp points to and resume when we now
# the maxsize.
maxsize = yield instr.arg, taken_size, maxsize
# For unconditional jumps abort early since the other instruction will
# never be seen.
if instr.is_uncond_jump():
block.seen = False
yield maxsize
# jump=False: non-taken path of jumps, or any non-jump
effect = (
instr.pre_and_post_stack_effect(jump=False)
if check_pre_and_post
else (instr.stack_effect(jump=False), 0)
)
size, maxsize = update_size(*effect, size, maxsize)
if block.next_block:
maxsize = yield block.next_block, size, maxsize
block.seen = False
yield maxsize
class ControlFlowGraph(_bytecode.BaseBytecode):
def __init__(self):
super().__init__()
self._blocks = []
self._block_index = {}
self.argnames = []
self.add_block()
def legalize(self):
"""Legalize all blocks."""
current_lineno = self.first_lineno
for block in self._blocks:
current_lineno = block.legalize(current_lineno)
def get_block_index(self, block):
try:
return self._block_index[id(block)]
except KeyError:
raise ValueError("the block is not part of this bytecode")
def _add_block(self, block):
block_index = len(self._blocks)
self._blocks.append(block)
self._block_index[id(block)] = block_index
def add_block(self, instructions=None):
block = BasicBlock(instructions)
self._add_block(block)
return block
def compute_stacksize(self, *, check_pre_and_post=True):
"""Compute the stack size by iterating through the blocks
The implementation make use of a generator function to avoid issue with
deeply nested recursions.
"""
# In the absence of any block return 0
if not self:
return 0
# Ensure that previous calculation do not impact this one.
for block in self:
block.seen = False
block.startsize = -32768 # INT_MIN
# Starting with Python 3.10, generator and coroutines start with one object
# on the stack (None, anything is an error).
initial_stack_size = 0
if sys.version_info >= (3, 10) and self.flags & (
CompilerFlags.GENERATOR
| CompilerFlags.COROUTINE
| CompilerFlags.ASYNC_GENERATOR
):
initial_stack_size = 1
# Create a generator/coroutine responsible of dealing with the first block
coro = _compute_stack_size(
self[0], initial_stack_size, 0, check_pre_and_post=check_pre_and_post
)
# Create a list of generator that have not yet been exhausted
coroutines = []
push_coroutine = coroutines.append
pop_coroutine = coroutines.pop
args = None
try:
while True:
args = coro.send(None)
# Consume the stored generators as long as they return a simple
# interger that is to be used to resume the last stored generator.
while isinstance(args, int):
coro = pop_coroutine()
args = coro.send(args)
# Otherwise we enter a new block and we store the generator under
# use and create a new one to process the new block
push_coroutine(coro)
coro = _compute_stack_size(*args, check_pre_and_post=check_pre_and_post)
except IndexError:
# The exception occurs when all the generators have been exhausted
# in which case teh last yielded value is the stacksize.
assert args is not None
return args
def __repr__(self):
return "<ControlFlowGraph block#=%s>" % len(self._blocks)
def get_instructions(self):
instructions = []
jumps = []
for block in self:
target_block = block.get_jump()
if target_block is not None:
instr = block[-1]
instr = ConcreteInstr(instr.name, 0, lineno=instr.lineno)
jumps.append((target_block, instr))
instructions.extend(block[:-1])
instructions.append(instr)
else:
instructions.extend(block)
for target_block, instr in jumps:
instr.arg = self.get_block_index(target_block)
return instructions
def __eq__(self, other):
if type(self) != type(other):
return False
if self.argnames != other.argnames:
return False
instrs1 = self.get_instructions()
instrs2 = other.get_instructions()
if instrs1 != instrs2:
return False
# FIXME: compare block.next_block
return super().__eq__(other)
def __len__(self):
return len(self._blocks)
def __iter__(self):
return iter(self._blocks)
def __getitem__(self, index):
if isinstance(index, BasicBlock):
index = self.get_block_index(index)
return self._blocks[index]
def __delitem__(self, index):
if isinstance(index, BasicBlock):
index = self.get_block_index(index)
block = self._blocks[index]
del self._blocks[index]
del self._block_index[id(block)]
for index in range(index, len(self)):
block = self._blocks[index]
self._block_index[id(block)] -= 1
def split_block(self, block, index):
if not isinstance(block, BasicBlock):
raise TypeError("expected block")
block_index = self.get_block_index(block)
if index < 0:
raise ValueError("index must be positive")
block = self._blocks[block_index]
if index == 0:
return block
if index > len(block):
raise ValueError("index out of the block")
instructions = block[index:]
if not instructions:
if block_index + 1 < len(self):
return self[block_index + 1]
del block[index:]
block2 = BasicBlock(instructions)
block.next_block = block2
for block in self[block_index + 1 :]:
self._block_index[id(block)] += 1
self._blocks.insert(block_index + 1, block2)
self._block_index[id(block2)] = block_index + 1
return block2
@staticmethod
def from_bytecode(bytecode):
# label => instruction index
label_to_block_index = {}
jumps = []
block_starts = {}
for index, instr in enumerate(bytecode):
if isinstance(instr, Label):
label_to_block_index[instr] = index
else:
if isinstance(instr, Instr) and isinstance(instr.arg, Label):
jumps.append((index, instr.arg))
for target_index, target_label in jumps:
target_index = label_to_block_index[target_label]
block_starts[target_index] = target_label
bytecode_blocks = _bytecode.ControlFlowGraph()
bytecode_blocks._copy_attr_from(bytecode)
bytecode_blocks.argnames = list(bytecode.argnames)
# copy instructions, convert labels to block labels
block = bytecode_blocks[0]
labels = {}
jumps = []
for index, instr in enumerate(bytecode):
if index in block_starts:
old_label = block_starts[index]
if index != 0:
new_block = bytecode_blocks.add_block()
if not block[-1].is_final():
block.next_block = new_block
block = new_block
if old_label is not None:
labels[old_label] = block
elif block and isinstance(block[-1], Instr):
if block[-1].is_final():
block = bytecode_blocks.add_block()
elif block[-1].has_jump():
new_block = bytecode_blocks.add_block()
block.next_block = new_block
block = new_block
if isinstance(instr, Label):
continue
# don't copy SetLineno objects
if isinstance(instr, Instr):
instr = instr.copy()
if isinstance(instr.arg, Label):
jumps.append(instr)
block.append(instr)
for instr in jumps:
label = instr.arg
instr.arg = labels[label]
return bytecode_blocks
def to_bytecode(self):
"""Convert to Bytecode."""
used_blocks = set()
for block in self:
target_block = block.get_jump()
if target_block is not None:
used_blocks.add(id(target_block))
labels = {}
jumps = []
instructions = []
for block in self:
if id(block) in used_blocks:
new_label = Label()
labels[id(block)] = new_label
instructions.append(new_label)
for instr in block:
# don't copy SetLineno objects
if isinstance(instr, Instr):
instr = instr.copy()
if isinstance(instr.arg, BasicBlock):
jumps.append(instr)
instructions.append(instr)
# Map to new labels
for instr in jumps:
instr.arg = labels[id(instr.arg)]
bytecode = _bytecode.Bytecode()
bytecode._copy_attr_from(self)
bytecode.argnames = list(self.argnames)
bytecode[:] = instructions
return bytecode
def to_code(self, stacksize=None):
"""Convert to code."""
if stacksize is None:
stacksize = self.compute_stacksize()
bc = self.to_bytecode()
return bc.to_code(stacksize=stacksize)
| 15,391 | Python | 32.172414 | 88 | 0.563316 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_concrete.py |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
#!/usr/bin/env python3
import opcode
import sys
import textwrap
import types
import unittest
from _pydevd_frame_eval.vendored.bytecode import (
UNSET,
Label,
Instr,
SetLineno,
Bytecode,
CellVar,
FreeVar,
CompilerFlags,
ConcreteInstr,
ConcreteBytecode,
)
from _pydevd_frame_eval.vendored.bytecode.concrete import OFFSET_AS_INSTRUCTION
from _pydevd_frame_eval.vendored.bytecode.tests import get_code, TestCase
class ConcreteInstrTests(TestCase):
def test_constructor(self):
with self.assertRaises(ValueError):
# need an argument
ConcreteInstr("LOAD_CONST")
with self.assertRaises(ValueError):
# must not have an argument
ConcreteInstr("ROT_TWO", 33)
# invalid argument
with self.assertRaises(TypeError):
ConcreteInstr("LOAD_CONST", 1.0)
with self.assertRaises(ValueError):
ConcreteInstr("LOAD_CONST", -1)
with self.assertRaises(TypeError):
ConcreteInstr("LOAD_CONST", 5, lineno=1.0)
with self.assertRaises(ValueError):
ConcreteInstr("LOAD_CONST", 5, lineno=-1)
# test maximum argument
with self.assertRaises(ValueError):
ConcreteInstr("LOAD_CONST", 2147483647 + 1)
instr = ConcreteInstr("LOAD_CONST", 2147483647)
self.assertEqual(instr.arg, 2147483647)
# test meaningless extended args
instr = ConcreteInstr("LOAD_FAST", 8, lineno=3, extended_args=1)
self.assertEqual(instr.name, "LOAD_FAST")
self.assertEqual(instr.arg, 8)
self.assertEqual(instr.lineno, 3)
self.assertEqual(instr.size, 4)
def test_attr(self):
instr = ConcreteInstr("LOAD_CONST", 5, lineno=12)
self.assertEqual(instr.name, "LOAD_CONST")
self.assertEqual(instr.opcode, 100)
self.assertEqual(instr.arg, 5)
self.assertEqual(instr.lineno, 12)
self.assertEqual(instr.size, 2)
def test_set(self):
instr = ConcreteInstr("LOAD_CONST", 5, lineno=3)
instr.set("NOP")
self.assertEqual(instr.name, "NOP")
self.assertIs(instr.arg, UNSET)
self.assertEqual(instr.lineno, 3)
instr.set("LOAD_FAST", 8)
self.assertEqual(instr.name, "LOAD_FAST")
self.assertEqual(instr.arg, 8)
self.assertEqual(instr.lineno, 3)
# invalid
with self.assertRaises(ValueError):
instr.set("LOAD_CONST")
with self.assertRaises(ValueError):
instr.set("NOP", 5)
def test_set_attr(self):
instr = ConcreteInstr("LOAD_CONST", 5, lineno=12)
# operator name
instr.name = "LOAD_FAST"
self.assertEqual(instr.name, "LOAD_FAST")
self.assertEqual(instr.opcode, 124)
self.assertRaises(TypeError, setattr, instr, "name", 3)
self.assertRaises(ValueError, setattr, instr, "name", "xxx")
# operator code
instr.opcode = 100
self.assertEqual(instr.name, "LOAD_CONST")
self.assertEqual(instr.opcode, 100)
self.assertRaises(ValueError, setattr, instr, "opcode", -12)
self.assertRaises(TypeError, setattr, instr, "opcode", "abc")
# extended argument
instr.arg = 0x1234ABCD
self.assertEqual(instr.arg, 0x1234ABCD)
self.assertEqual(instr.size, 8)
# small argument
instr.arg = 0
self.assertEqual(instr.arg, 0)
self.assertEqual(instr.size, 2)
# invalid argument
self.assertRaises(ValueError, setattr, instr, "arg", -1)
self.assertRaises(ValueError, setattr, instr, "arg", 2147483647 + 1)
# size attribute is read-only
self.assertRaises(AttributeError, setattr, instr, "size", 3)
# lineno
instr.lineno = 33
self.assertEqual(instr.lineno, 33)
self.assertRaises(TypeError, setattr, instr, "lineno", 1.0)
self.assertRaises(ValueError, setattr, instr, "lineno", -1)
def test_size(self):
self.assertEqual(ConcreteInstr("ROT_TWO").size, 2)
self.assertEqual(ConcreteInstr("LOAD_CONST", 3).size, 2)
self.assertEqual(ConcreteInstr("LOAD_CONST", 0x1234ABCD).size, 8)
def test_disassemble(self):
code = b"\t\x00d\x03"
instr = ConcreteInstr.disassemble(1, code, 0)
self.assertEqual(instr, ConcreteInstr("NOP", lineno=1))
instr = ConcreteInstr.disassemble(2, code, 1 if OFFSET_AS_INSTRUCTION else 2)
self.assertEqual(instr, ConcreteInstr("LOAD_CONST", 3, lineno=2))
code = b"\x90\x12\x904\x90\xabd\xcd"
instr = ConcreteInstr.disassemble(3, code, 0)
self.assertEqual(instr, ConcreteInstr("EXTENDED_ARG", 0x12, lineno=3))
def test_assemble(self):
instr = ConcreteInstr("NOP")
self.assertEqual(instr.assemble(), b"\t\x00")
instr = ConcreteInstr("LOAD_CONST", 3)
self.assertEqual(instr.assemble(), b"d\x03")
instr = ConcreteInstr("LOAD_CONST", 0x1234ABCD)
self.assertEqual(
instr.assemble(),
(b"\x90\x12\x904\x90\xabd\xcd"),
)
instr = ConcreteInstr("LOAD_CONST", 3, extended_args=1)
self.assertEqual(
instr.assemble(),
(b"\x90\x00d\x03"),
)
def test_get_jump_target(self):
jump_abs = ConcreteInstr("JUMP_ABSOLUTE", 3)
self.assertEqual(jump_abs.get_jump_target(100), 3)
jump_forward = ConcreteInstr("JUMP_FORWARD", 5)
self.assertEqual(
jump_forward.get_jump_target(10), 16 if OFFSET_AS_INSTRUCTION else 17
)
class ConcreteBytecodeTests(TestCase):
def test_repr(self):
r = repr(ConcreteBytecode())
self.assertIn("ConcreteBytecode", r)
self.assertIn("0", r)
def test_eq(self):
code = ConcreteBytecode()
self.assertFalse(code == 1)
for name, val in (
("names", ["a"]),
("varnames", ["a"]),
("consts", [1]),
("argcount", 1),
("kwonlyargcount", 2),
("flags", CompilerFlags(CompilerFlags.GENERATOR)),
("first_lineno", 10),
("filename", "xxxx.py"),
("name", "__x"),
("docstring", "x-x-x"),
("cellvars", [CellVar("x")]),
("freevars", [FreeVar("x")]),
):
c = ConcreteBytecode()
setattr(c, name, val)
# For obscure reasons using assertNotEqual here fail
self.assertFalse(code == c)
if sys.version_info > (3, 8):
c = ConcreteBytecode()
c.posonlyargcount = 10
self.assertFalse(code == c)
c = ConcreteBytecode()
c.consts = [1]
code.consts = [1]
c.append(ConcreteInstr("LOAD_CONST", 0))
self.assertFalse(code == c)
def test_attr(self):
code_obj = get_code("x = 5")
code = ConcreteBytecode.from_code(code_obj)
self.assertEqual(code.consts, [5, None])
self.assertEqual(code.names, ["x"])
self.assertEqual(code.varnames, [])
self.assertEqual(code.freevars, [])
self.assertListEqual(
list(code),
[
ConcreteInstr("LOAD_CONST", 0, lineno=1),
ConcreteInstr("STORE_NAME", 0, lineno=1),
ConcreteInstr("LOAD_CONST", 1, lineno=1),
ConcreteInstr("RETURN_VALUE", lineno=1),
],
)
# FIXME: test other attributes
def test_invalid_types(self):
code = ConcreteBytecode()
code.append(Label())
with self.assertRaises(ValueError):
list(code)
with self.assertRaises(ValueError):
code.legalize()
with self.assertRaises(ValueError):
ConcreteBytecode([Label()])
def test_to_code_lnotab(self):
# We use an actual function for the simple case to
# ensure we get lnotab right
def f():
#
#
x = 7 # noqa
y = 8 # noqa
z = 9 # noqa
fl = f.__code__.co_firstlineno
concrete = ConcreteBytecode()
concrete.consts = [None, 7, 8, 9]
concrete.varnames = ["x", "y", "z"]
concrete.first_lineno = fl
concrete.extend(
[
SetLineno(fl + 3),
ConcreteInstr("LOAD_CONST", 1),
ConcreteInstr("STORE_FAST", 0),
SetLineno(fl + 4),
ConcreteInstr("LOAD_CONST", 2),
ConcreteInstr("STORE_FAST", 1),
SetLineno(fl + 5),
ConcreteInstr("LOAD_CONST", 3),
ConcreteInstr("STORE_FAST", 2),
ConcreteInstr("LOAD_CONST", 0),
ConcreteInstr("RETURN_VALUE"),
]
)
code = concrete.to_code()
self.assertEqual(code.co_code, f.__code__.co_code)
self.assertEqual(code.co_lnotab, f.__code__.co_lnotab)
if sys.version_info >= (3, 10):
self.assertEqual(code.co_linetable, f.__code__.co_linetable)
def test_negative_lnotab(self):
# x = 7
# y = 8
concrete = ConcreteBytecode(
[
ConcreteInstr("LOAD_CONST", 0),
ConcreteInstr("STORE_NAME", 0),
# line number goes backward!
SetLineno(2),
ConcreteInstr("LOAD_CONST", 1),
ConcreteInstr("STORE_NAME", 1),
]
)
concrete.consts = [7, 8]
concrete.names = ["x", "y"]
concrete.first_lineno = 5
code = concrete.to_code()
expected = b"d\x00Z\x00d\x01Z\x01"
self.assertEqual(code.co_code, expected)
self.assertEqual(code.co_firstlineno, 5)
self.assertEqual(code.co_lnotab, b"\x04\xfd")
def test_extended_lnotab(self):
# x = 7
# 200 blank lines
# y = 8
concrete = ConcreteBytecode(
[
ConcreteInstr("LOAD_CONST", 0),
SetLineno(1 + 128),
ConcreteInstr("STORE_NAME", 0),
# line number goes backward!
SetLineno(1 + 129),
ConcreteInstr("LOAD_CONST", 1),
SetLineno(1),
ConcreteInstr("STORE_NAME", 1),
]
)
concrete.consts = [7, 8]
concrete.names = ["x", "y"]
concrete.first_lineno = 1
code = concrete.to_code()
expected = b"d\x00Z\x00d\x01Z\x01"
self.assertEqual(code.co_code, expected)
self.assertEqual(code.co_firstlineno, 1)
self.assertEqual(code.co_lnotab, b"\x02\x7f\x00\x01\x02\x01\x02\x80\x00\xff")
def test_extended_lnotab2(self):
# x = 7
# 200 blank lines
# y = 8
base_code = compile("x = 7" + "\n" * 200 + "y = 8", "", "exec")
concrete = ConcreteBytecode(
[
ConcreteInstr("LOAD_CONST", 0),
ConcreteInstr("STORE_NAME", 0),
SetLineno(201),
ConcreteInstr("LOAD_CONST", 1),
ConcreteInstr("STORE_NAME", 1),
ConcreteInstr("LOAD_CONST", 2),
ConcreteInstr("RETURN_VALUE"),
]
)
concrete.consts = [None, 7, 8]
concrete.names = ["x", "y"]
concrete.first_lineno = 1
code = concrete.to_code()
self.assertEqual(code.co_code, base_code.co_code)
self.assertEqual(code.co_firstlineno, base_code.co_firstlineno)
self.assertEqual(code.co_lnotab, base_code.co_lnotab)
if sys.version_info >= (3, 10):
self.assertEqual(code.co_linetable, base_code.co_linetable)
def test_to_bytecode_consts(self):
# x = -0.0
# x = +0.0
#
# code optimized by the CPython 3.6 peephole optimizer which emits
# duplicated constants (0.0 is twice in consts).
code = ConcreteBytecode()
code.consts = [0.0, None, -0.0, 0.0]
code.names = ["x", "y"]
code.extend(
[
ConcreteInstr("LOAD_CONST", 2, lineno=1),
ConcreteInstr("STORE_NAME", 0, lineno=1),
ConcreteInstr("LOAD_CONST", 3, lineno=2),
ConcreteInstr("STORE_NAME", 1, lineno=2),
ConcreteInstr("LOAD_CONST", 1, lineno=2),
ConcreteInstr("RETURN_VALUE", lineno=2),
]
)
code = code.to_bytecode().to_concrete_bytecode()
# the conversion changes the constant order: the order comes from
# the order of LOAD_CONST instructions
self.assertEqual(code.consts, [-0.0, 0.0, None])
code.names = ["x", "y"]
self.assertListEqual(
list(code),
[
ConcreteInstr("LOAD_CONST", 0, lineno=1),
ConcreteInstr("STORE_NAME", 0, lineno=1),
ConcreteInstr("LOAD_CONST", 1, lineno=2),
ConcreteInstr("STORE_NAME", 1, lineno=2),
ConcreteInstr("LOAD_CONST", 2, lineno=2),
ConcreteInstr("RETURN_VALUE", lineno=2),
],
)
def test_cellvar(self):
concrete = ConcreteBytecode()
concrete.cellvars = ["x"]
concrete.append(ConcreteInstr("LOAD_DEREF", 0))
code = concrete.to_code()
concrete = ConcreteBytecode.from_code(code)
self.assertEqual(concrete.cellvars, ["x"])
self.assertEqual(concrete.freevars, [])
self.assertEqual(list(concrete), [ConcreteInstr("LOAD_DEREF", 0, lineno=1)])
bytecode = concrete.to_bytecode()
self.assertEqual(bytecode.cellvars, ["x"])
self.assertEqual(list(bytecode), [Instr("LOAD_DEREF", CellVar("x"), lineno=1)])
def test_freevar(self):
concrete = ConcreteBytecode()
concrete.freevars = ["x"]
concrete.append(ConcreteInstr("LOAD_DEREF", 0))
code = concrete.to_code()
concrete = ConcreteBytecode.from_code(code)
self.assertEqual(concrete.cellvars, [])
self.assertEqual(concrete.freevars, ["x"])
self.assertEqual(list(concrete), [ConcreteInstr("LOAD_DEREF", 0, lineno=1)])
bytecode = concrete.to_bytecode()
self.assertEqual(bytecode.cellvars, [])
self.assertEqual(list(bytecode), [Instr("LOAD_DEREF", FreeVar("x"), lineno=1)])
def test_cellvar_freevar(self):
concrete = ConcreteBytecode()
concrete.cellvars = ["cell"]
concrete.freevars = ["free"]
concrete.append(ConcreteInstr("LOAD_DEREF", 0))
concrete.append(ConcreteInstr("LOAD_DEREF", 1))
code = concrete.to_code()
concrete = ConcreteBytecode.from_code(code)
self.assertEqual(concrete.cellvars, ["cell"])
self.assertEqual(concrete.freevars, ["free"])
self.assertEqual(
list(concrete),
[
ConcreteInstr("LOAD_DEREF", 0, lineno=1),
ConcreteInstr("LOAD_DEREF", 1, lineno=1),
],
)
bytecode = concrete.to_bytecode()
self.assertEqual(bytecode.cellvars, ["cell"])
self.assertEqual(
list(bytecode),
[
Instr("LOAD_DEREF", CellVar("cell"), lineno=1),
Instr("LOAD_DEREF", FreeVar("free"), lineno=1),
],
)
def test_load_classderef(self):
concrete = ConcreteBytecode()
concrete.cellvars = ["__class__"]
concrete.freevars = ["__class__"]
concrete.extend(
[ConcreteInstr("LOAD_CLASSDEREF", 1), ConcreteInstr("STORE_DEREF", 1)]
)
bytecode = concrete.to_bytecode()
self.assertEqual(bytecode.freevars, ["__class__"])
self.assertEqual(bytecode.cellvars, ["__class__"])
self.assertEqual(
list(bytecode),
[
Instr("LOAD_CLASSDEREF", FreeVar("__class__"), lineno=1),
Instr("STORE_DEREF", FreeVar("__class__"), lineno=1),
],
)
concrete = bytecode.to_concrete_bytecode()
self.assertEqual(concrete.freevars, ["__class__"])
self.assertEqual(concrete.cellvars, ["__class__"])
self.assertEqual(
list(concrete),
[
ConcreteInstr("LOAD_CLASSDEREF", 1, lineno=1),
ConcreteInstr("STORE_DEREF", 1, lineno=1),
],
)
code = concrete.to_code()
self.assertEqual(code.co_freevars, ("__class__",))
self.assertEqual(code.co_cellvars, ("__class__",))
self.assertEqual(
code.co_code,
b"\x94\x01\x89\x01",
)
def test_explicit_stacksize(self):
# Passing stacksize=... to ConcreteBytecode.to_code should result in a
# code object with the specified stacksize. We pass some silly values
# and assert that they are honored.
code_obj = get_code("print('%s' % (a,b,c))")
original_stacksize = code_obj.co_stacksize
concrete = ConcreteBytecode.from_code(code_obj)
# First with something bigger than necessary.
explicit_stacksize = original_stacksize + 42
new_code_obj = concrete.to_code(stacksize=explicit_stacksize)
self.assertEqual(new_code_obj.co_stacksize, explicit_stacksize)
# Then with something bogus. We probably don't want to advertise this
# in the documentation. If this fails then decide if it's for good
# reason, and remove if so.
explicit_stacksize = 0
new_code_obj = concrete.to_code(stacksize=explicit_stacksize)
self.assertEqual(new_code_obj.co_stacksize, explicit_stacksize)
def test_legalize(self):
concrete = ConcreteBytecode()
concrete.first_lineno = 3
concrete.consts = [7, 8, 9]
concrete.names = ["x", "y", "z"]
concrete.extend(
[
ConcreteInstr("LOAD_CONST", 0),
ConcreteInstr("STORE_NAME", 0),
ConcreteInstr("LOAD_CONST", 1, lineno=4),
ConcreteInstr("STORE_NAME", 1),
SetLineno(5),
ConcreteInstr("LOAD_CONST", 2, lineno=6),
ConcreteInstr("STORE_NAME", 2),
]
)
concrete.legalize()
self.assertListEqual(
list(concrete),
[
ConcreteInstr("LOAD_CONST", 0, lineno=3),
ConcreteInstr("STORE_NAME", 0, lineno=3),
ConcreteInstr("LOAD_CONST", 1, lineno=4),
ConcreteInstr("STORE_NAME", 1, lineno=4),
ConcreteInstr("LOAD_CONST", 2, lineno=5),
ConcreteInstr("STORE_NAME", 2, lineno=5),
],
)
def test_slice(self):
concrete = ConcreteBytecode()
concrete.first_lineno = 3
concrete.consts = [7, 8, 9]
concrete.names = ["x", "y", "z"]
concrete.extend(
[
ConcreteInstr("LOAD_CONST", 0),
ConcreteInstr("STORE_NAME", 0),
SetLineno(4),
ConcreteInstr("LOAD_CONST", 1),
ConcreteInstr("STORE_NAME", 1),
SetLineno(5),
ConcreteInstr("LOAD_CONST", 2),
ConcreteInstr("STORE_NAME", 2),
]
)
self.assertEqual(concrete, concrete[:])
def test_copy(self):
concrete = ConcreteBytecode()
concrete.first_lineno = 3
concrete.consts = [7, 8, 9]
concrete.names = ["x", "y", "z"]
concrete.extend(
[
ConcreteInstr("LOAD_CONST", 0),
ConcreteInstr("STORE_NAME", 0),
SetLineno(4),
ConcreteInstr("LOAD_CONST", 1),
ConcreteInstr("STORE_NAME", 1),
SetLineno(5),
ConcreteInstr("LOAD_CONST", 2),
ConcreteInstr("STORE_NAME", 2),
]
)
self.assertEqual(concrete, concrete.copy())
class ConcreteFromCodeTests(TestCase):
def test_extended_arg(self):
# Create a code object from arbitrary bytecode
co_code = b"\x90\x12\x904\x90\xabd\xcd"
code = get_code("x=1")
args = (
(code.co_argcount,)
if sys.version_info < (3, 8)
else (code.co_argcount, code.co_posonlyargcount)
)
args += (
code.co_kwonlyargcount,
code.co_nlocals,
code.co_stacksize,
code.co_flags,
co_code,
code.co_consts,
code.co_names,
code.co_varnames,
code.co_filename,
code.co_name,
code.co_firstlineno,
code.co_linetable if sys.version_info >= (3, 10) else code.co_lnotab,
code.co_freevars,
code.co_cellvars,
)
code = types.CodeType(*args)
# without EXTENDED_ARG opcode
bytecode = ConcreteBytecode.from_code(code)
self.assertListEqual(
list(bytecode), [ConcreteInstr("LOAD_CONST", 0x1234ABCD, lineno=1)]
)
# with EXTENDED_ARG opcode
bytecode = ConcreteBytecode.from_code(code, extended_arg=True)
expected = [
ConcreteInstr("EXTENDED_ARG", 0x12, lineno=1),
ConcreteInstr("EXTENDED_ARG", 0x34, lineno=1),
ConcreteInstr("EXTENDED_ARG", 0xAB, lineno=1),
ConcreteInstr("LOAD_CONST", 0xCD, lineno=1),
]
self.assertListEqual(list(bytecode), expected)
def test_extended_arg_make_function(self):
if (3, 9) <= sys.version_info < (3, 10):
from _pydevd_frame_eval.vendored.bytecode.tests.util_annotation import get_code as get_code_future
code_obj = get_code_future(
"""
def foo(x: int, y: int):
pass
"""
)
else:
code_obj = get_code(
"""
def foo(x: int, y: int):
pass
"""
)
# without EXTENDED_ARG
concrete = ConcreteBytecode.from_code(code_obj)
if sys.version_info >= (3, 10):
func_code = concrete.consts[2]
names = ["int", "foo"]
consts = ["x", "y", func_code, "foo", None]
const_offset = 1
name_offset = 1
first_instrs = [
ConcreteInstr("LOAD_CONST", 0, lineno=1),
ConcreteInstr("LOAD_NAME", 0, lineno=1),
ConcreteInstr("LOAD_CONST", 1, lineno=1),
ConcreteInstr("LOAD_NAME", 0, lineno=1),
ConcreteInstr("BUILD_TUPLE", 4, lineno=1),
]
elif (
sys.version_info >= (3, 7)
and concrete.flags & CompilerFlags.FUTURE_ANNOTATIONS
):
func_code = concrete.consts[2]
names = ["foo"]
consts = ["int", ("x", "y"), func_code, "foo", None]
const_offset = 1
name_offset = 0
first_instrs = [
ConcreteInstr("LOAD_CONST", 0, lineno=1),
ConcreteInstr("LOAD_CONST", 0, lineno=1),
ConcreteInstr("LOAD_CONST", 0 + const_offset, lineno=1),
ConcreteInstr("BUILD_CONST_KEY_MAP", 2, lineno=1),
]
else:
func_code = concrete.consts[1]
names = ["int", "foo"]
consts = [("x", "y"), func_code, "foo", None]
const_offset = 0
name_offset = 1
first_instrs = [
ConcreteInstr("LOAD_NAME", 0, lineno=1),
ConcreteInstr("LOAD_NAME", 0, lineno=1),
ConcreteInstr("LOAD_CONST", 0 + const_offset, lineno=1),
ConcreteInstr("BUILD_CONST_KEY_MAP", 2, lineno=1),
]
self.assertEqual(concrete.names, names)
self.assertEqual(concrete.consts, consts)
expected = first_instrs + [
ConcreteInstr("LOAD_CONST", 1 + const_offset, lineno=1),
ConcreteInstr("LOAD_CONST", 2 + const_offset, lineno=1),
ConcreteInstr("MAKE_FUNCTION", 4, lineno=1),
ConcreteInstr("STORE_NAME", name_offset, lineno=1),
ConcreteInstr("LOAD_CONST", 3 + const_offset, lineno=1),
ConcreteInstr("RETURN_VALUE", lineno=1),
]
self.assertListEqual(list(concrete), expected)
# with EXTENDED_ARG
concrete = ConcreteBytecode.from_code(code_obj, extended_arg=True)
# With future annotation the int annotation is stringified and
# stored as constant this the default behavior under Python 3.10
if sys.version_info >= (3, 10):
func_code = concrete.consts[2]
names = ["int", "foo"]
consts = ["x", "y", func_code, "foo", None]
elif concrete.flags & CompilerFlags.FUTURE_ANNOTATIONS:
func_code = concrete.consts[2]
names = ["foo"]
consts = ["int", ("x", "y"), func_code, "foo", None]
else:
func_code = concrete.consts[1]
names = ["int", "foo"]
consts = [("x", "y"), func_code, "foo", None]
self.assertEqual(concrete.names, names)
self.assertEqual(concrete.consts, consts)
self.assertListEqual(list(concrete), expected)
# The next three tests ensure we can round trip ConcreteBytecode generated
# with extended_args=True
def test_extended_arg_unpack_ex(self):
def test():
p = [1, 2, 3, 4, 5, 6]
q, r, *s, t = p
return q, r, s, t
cpython_stacksize = test.__code__.co_stacksize
test.__code__ = ConcreteBytecode.from_code(
test.__code__, extended_arg=True
).to_code()
self.assertEqual(test.__code__.co_stacksize, cpython_stacksize)
self.assertEqual(test(), (1, 2, [3, 4, 5], 6))
def test_expected_arg_with_many_consts(self):
def test():
var = 0
var = 1
var = 2
var = 3
var = 4
var = 5
var = 6
var = 7
var = 8
var = 9
var = 10
var = 11
var = 12
var = 13
var = 14
var = 15
var = 16
var = 17
var = 18
var = 19
var = 20
var = 21
var = 22
var = 23
var = 24
var = 25
var = 26
var = 27
var = 28
var = 29
var = 30
var = 31
var = 32
var = 33
var = 34
var = 35
var = 36
var = 37
var = 38
var = 39
var = 40
var = 41
var = 42
var = 43
var = 44
var = 45
var = 46
var = 47
var = 48
var = 49
var = 50
var = 51
var = 52
var = 53
var = 54
var = 55
var = 56
var = 57
var = 58
var = 59
var = 60
var = 61
var = 62
var = 63
var = 64
var = 65
var = 66
var = 67
var = 68
var = 69
var = 70
var = 71
var = 72
var = 73
var = 74
var = 75
var = 76
var = 77
var = 78
var = 79
var = 80
var = 81
var = 82
var = 83
var = 84
var = 85
var = 86
var = 87
var = 88
var = 89
var = 90
var = 91
var = 92
var = 93
var = 94
var = 95
var = 96
var = 97
var = 98
var = 99
var = 100
var = 101
var = 102
var = 103
var = 104
var = 105
var = 106
var = 107
var = 108
var = 109
var = 110
var = 111
var = 112
var = 113
var = 114
var = 115
var = 116
var = 117
var = 118
var = 119
var = 120
var = 121
var = 122
var = 123
var = 124
var = 125
var = 126
var = 127
var = 128
var = 129
var = 130
var = 131
var = 132
var = 133
var = 134
var = 135
var = 136
var = 137
var = 138
var = 139
var = 140
var = 141
var = 142
var = 143
var = 144
var = 145
var = 146
var = 147
var = 148
var = 149
var = 150
var = 151
var = 152
var = 153
var = 154
var = 155
var = 156
var = 157
var = 158
var = 159
var = 160
var = 161
var = 162
var = 163
var = 164
var = 165
var = 166
var = 167
var = 168
var = 169
var = 170
var = 171
var = 172
var = 173
var = 174
var = 175
var = 176
var = 177
var = 178
var = 179
var = 180
var = 181
var = 182
var = 183
var = 184
var = 185
var = 186
var = 187
var = 188
var = 189
var = 190
var = 191
var = 192
var = 193
var = 194
var = 195
var = 196
var = 197
var = 198
var = 199
var = 200
var = 201
var = 202
var = 203
var = 204
var = 205
var = 206
var = 207
var = 208
var = 209
var = 210
var = 211
var = 212
var = 213
var = 214
var = 215
var = 216
var = 217
var = 218
var = 219
var = 220
var = 221
var = 222
var = 223
var = 224
var = 225
var = 226
var = 227
var = 228
var = 229
var = 230
var = 231
var = 232
var = 233
var = 234
var = 235
var = 236
var = 237
var = 238
var = 239
var = 240
var = 241
var = 242
var = 243
var = 244
var = 245
var = 246
var = 247
var = 248
var = 249
var = 250
var = 251
var = 252
var = 253
var = 254
var = 255
var = 256
var = 257
var = 258
var = 259
return var
test.__code__ = ConcreteBytecode.from_code(
test.__code__, extended_arg=True
).to_code()
self.assertEqual(test.__code__.co_stacksize, 1)
self.assertEqual(test(), 259)
if sys.version_info >= (3, 6):
def test_fail_extended_arg_jump(self):
def test():
var = None
for _ in range(0, 1):
var = 0
var = 1
var = 2
var = 3
var = 4
var = 5
var = 6
var = 7
var = 8
var = 9
var = 10
var = 11
var = 12
var = 13
var = 14
var = 15
var = 16
var = 17
var = 18
var = 19
var = 20
var = 21
var = 22
var = 23
var = 24
var = 25
var = 26
var = 27
var = 28
var = 29
var = 30
var = 31
var = 32
var = 33
var = 34
var = 35
var = 36
var = 37
var = 38
var = 39
var = 40
var = 41
var = 42
var = 43
var = 44
var = 45
var = 46
var = 47
var = 48
var = 49
var = 50
var = 51
var = 52
var = 53
var = 54
var = 55
var = 56
var = 57
var = 58
var = 59
var = 60
var = 61
var = 62
var = 63
var = 64
var = 65
var = 66
var = 67
var = 68
var = 69
var = 70
return var
# Generate the bytecode with extended arguments
bytecode = ConcreteBytecode.from_code(test.__code__, extended_arg=True)
bytecode.to_code()
class BytecodeToConcreteTests(TestCase):
def test_label(self):
code = Bytecode()
label = Label()
code.extend(
[
Instr("LOAD_CONST", "hello", lineno=1),
Instr("JUMP_FORWARD", label, lineno=1),
label,
Instr("POP_TOP", lineno=1),
]
)
code = code.to_concrete_bytecode()
expected = [
ConcreteInstr("LOAD_CONST", 0, lineno=1),
ConcreteInstr("JUMP_FORWARD", 0, lineno=1),
ConcreteInstr("POP_TOP", lineno=1),
]
self.assertListEqual(list(code), expected)
self.assertListEqual(code.consts, ["hello"])
def test_label2(self):
bytecode = Bytecode()
label = Label()
bytecode.extend(
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label),
Instr("LOAD_CONST", 5, lineno=2),
Instr("STORE_NAME", "x"),
Instr("JUMP_FORWARD", label),
Instr("LOAD_CONST", 7, lineno=4),
Instr("STORE_NAME", "x"),
label,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
concrete = bytecode.to_concrete_bytecode()
expected = [
ConcreteInstr("LOAD_NAME", 0, lineno=1),
ConcreteInstr(
"POP_JUMP_IF_FALSE", 7 if OFFSET_AS_INSTRUCTION else 14, lineno=1
),
ConcreteInstr("LOAD_CONST", 0, lineno=2),
ConcreteInstr("STORE_NAME", 1, lineno=2),
ConcreteInstr("JUMP_FORWARD", 2 if OFFSET_AS_INSTRUCTION else 4, lineno=2),
ConcreteInstr("LOAD_CONST", 1, lineno=4),
ConcreteInstr("STORE_NAME", 1, lineno=4),
ConcreteInstr("LOAD_CONST", 2, lineno=4),
ConcreteInstr("RETURN_VALUE", lineno=4),
]
self.assertListEqual(list(concrete), expected)
self.assertListEqual(concrete.consts, [5, 7, None])
self.assertListEqual(concrete.names, ["test", "x"])
self.assertListEqual(concrete.varnames, [])
def test_label3(self):
"""
CPython generates useless EXTENDED_ARG 0 in some cases. We need to
properly track them as otherwise we can end up with broken offset for
jumps.
"""
source = """
def func(x):
if x == 1:
return x + 0
elif x == 2:
return x + 1
elif x == 3:
return x + 2
elif x == 4:
return x + 3
elif x == 5:
return x + 4
elif x == 6:
return x + 5
elif x == 7:
return x + 6
elif x == 8:
return x + 7
elif x == 9:
return x + 8
elif x == 10:
return x + 9
elif x == 11:
return x + 10
elif x == 12:
return x + 11
elif x == 13:
return x + 12
elif x == 14:
return x + 13
elif x == 15:
return x + 14
elif x == 16:
return x + 15
elif x == 17:
return x + 16
return -1
"""
code = get_code(source, function=True)
bcode = Bytecode.from_code(code)
concrete = bcode.to_concrete_bytecode()
self.assertIsInstance(concrete, ConcreteBytecode)
# Ensure that we do not generate broken code
loc = {}
exec(textwrap.dedent(source), loc)
func = loc["func"]
func.__code__ = bcode.to_code()
for i, x in enumerate(range(1, 18)):
self.assertEqual(func(x), x + i)
self.assertEqual(func(18), -1)
# Ensure that we properly round trip in such cases
self.assertEqual(
ConcreteBytecode.from_code(code).to_code().co_code, code.co_code
)
def test_setlineno(self):
# x = 7
# y = 8
# z = 9
concrete = ConcreteBytecode()
concrete.consts = [7, 8, 9]
concrete.names = ["x", "y", "z"]
concrete.first_lineno = 3
concrete.extend(
[
ConcreteInstr("LOAD_CONST", 0),
ConcreteInstr("STORE_NAME", 0),
SetLineno(4),
ConcreteInstr("LOAD_CONST", 1),
ConcreteInstr("STORE_NAME", 1),
SetLineno(5),
ConcreteInstr("LOAD_CONST", 2),
ConcreteInstr("STORE_NAME", 2),
]
)
code = concrete.to_bytecode()
self.assertEqual(
code,
[
Instr("LOAD_CONST", 7, lineno=3),
Instr("STORE_NAME", "x", lineno=3),
Instr("LOAD_CONST", 8, lineno=4),
Instr("STORE_NAME", "y", lineno=4),
Instr("LOAD_CONST", 9, lineno=5),
Instr("STORE_NAME", "z", lineno=5),
],
)
def test_extended_jump(self):
NOP = bytes((opcode.opmap["NOP"],))
class BigInstr(ConcreteInstr):
def __init__(self, size):
super().__init__("NOP")
self._size = size
def copy(self):
return self
def assemble(self):
return NOP * self._size
# (invalid) code using jumps > 0xffff to test extended arg
label = Label()
nb_nop = 2 ** 16
code = Bytecode(
[
Instr("JUMP_ABSOLUTE", label),
BigInstr(nb_nop),
label,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
code_obj = code.to_code()
if OFFSET_AS_INSTRUCTION:
expected = b"\x90\x80q\x02" + NOP * nb_nop + b"d\x00S\x00"
else:
expected = b"\x90\x01\x90\x00q\x06" + NOP * nb_nop + b"d\x00S\x00"
self.assertEqual(code_obj.co_code, expected)
def test_jumps(self):
# if test:
# x = 12
# else:
# x = 37
code = Bytecode()
label_else = Label()
label_return = Label()
code.extend(
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label_else),
Instr("LOAD_CONST", 12, lineno=2),
Instr("STORE_NAME", "x"),
Instr("JUMP_FORWARD", label_return),
label_else,
Instr("LOAD_CONST", 37, lineno=4),
Instr("STORE_NAME", "x"),
label_return,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE"),
]
)
code = code.to_concrete_bytecode()
expected = [
ConcreteInstr("LOAD_NAME", 0, lineno=1),
ConcreteInstr(
"POP_JUMP_IF_FALSE", 5 if OFFSET_AS_INSTRUCTION else 10, lineno=1
),
ConcreteInstr("LOAD_CONST", 0, lineno=2),
ConcreteInstr("STORE_NAME", 1, lineno=2),
ConcreteInstr("JUMP_FORWARD", 2 if OFFSET_AS_INSTRUCTION else 4, lineno=2),
ConcreteInstr("LOAD_CONST", 1, lineno=4),
ConcreteInstr("STORE_NAME", 1, lineno=4),
ConcreteInstr("LOAD_CONST", 2, lineno=4),
ConcreteInstr("RETURN_VALUE", lineno=4),
]
self.assertListEqual(list(code), expected)
self.assertListEqual(code.consts, [12, 37, None])
self.assertListEqual(code.names, ["test", "x"])
self.assertListEqual(code.varnames, [])
def test_dont_merge_constants(self):
# test two constants which are equal but have a different type
code = Bytecode()
code.extend(
[
Instr("LOAD_CONST", 5, lineno=1),
Instr("LOAD_CONST", 5.0, lineno=1),
Instr("LOAD_CONST", -0.0, lineno=1),
Instr("LOAD_CONST", +0.0, lineno=1),
]
)
code = code.to_concrete_bytecode()
expected = [
ConcreteInstr("LOAD_CONST", 0, lineno=1),
ConcreteInstr("LOAD_CONST", 1, lineno=1),
ConcreteInstr("LOAD_CONST", 2, lineno=1),
ConcreteInstr("LOAD_CONST", 3, lineno=1),
]
self.assertListEqual(list(code), expected)
self.assertListEqual(code.consts, [5, 5.0, -0.0, +0.0])
def test_cellvars(self):
code = Bytecode()
code.cellvars = ["x"]
code.freevars = ["y"]
code.extend(
[
Instr("LOAD_DEREF", CellVar("x"), lineno=1),
Instr("LOAD_DEREF", FreeVar("y"), lineno=1),
]
)
concrete = code.to_concrete_bytecode()
self.assertEqual(concrete.cellvars, ["x"])
self.assertEqual(concrete.freevars, ["y"])
code.extend(
[
ConcreteInstr("LOAD_DEREF", 0, lineno=1),
ConcreteInstr("LOAD_DEREF", 1, lineno=1),
]
)
def test_compute_jumps_convergence(self):
# Consider the following sequence of instructions:
#
# JUMP_ABSOLUTE Label1
# JUMP_ABSOLUTE Label2
# ...126 instructions...
# Label1: Offset 254 on first pass, 256 second pass
# NOP
# ... many more instructions ...
# Label2: Offset > 256 on first pass
#
# On first pass of compute_jumps(), Label2 will be at address 254, so
# that value encodes into the single byte arg of JUMP_ABSOLUTE.
#
# On second pass compute_jumps() the instr at Label1 will have offset
# of 256 so will also be given an EXTENDED_ARG.
#
# Thus we need to make an additional pass. This test only verifies
# case where 2 passes is insufficient but three is enough.
#
# On Python > 3.10 we need to double the number since the offset is now
# in term of instructions and not bytes.
# Create code from comment above.
code = Bytecode()
label1 = Label()
label2 = Label()
nop = "NOP"
code.append(Instr("JUMP_ABSOLUTE", label1))
code.append(Instr("JUMP_ABSOLUTE", label2))
# Need 254 * 2 + 2 since the arg will change by 1 instruction rather than 2
# bytes.
for x in range(4, 510 if OFFSET_AS_INSTRUCTION else 254, 2):
code.append(Instr(nop))
code.append(label1)
code.append(Instr(nop))
for x in range(
514 if OFFSET_AS_INSTRUCTION else 256,
600 if OFFSET_AS_INSTRUCTION else 300,
2,
):
code.append(Instr(nop))
code.append(label2)
code.append(Instr(nop))
# This should pass by default.
code.to_code()
# Try with max of two passes: it should raise
with self.assertRaises(RuntimeError):
code.to_code(compute_jumps_passes=2)
def test_extreme_compute_jumps_convergence(self):
"""Test of compute_jumps() requiring absurd number of passes.
NOTE: This test also serves to demonstrate that there is no worst
case: the number of passes can be unlimited (or, actually, limited by
the size of the provided code).
This is an extension of test_compute_jumps_convergence. Instead of
two jumps, where the earlier gets extended after the latter, we
instead generate a series of many jumps. Each pass of compute_jumps()
extends one more instruction, which in turn causes the one behind it
to be extended on the next pass.
"""
# N: the number of unextended instructions that can be squeezed into a
# set of bytes adressable by the arg of an unextended instruction.
# The answer is "128", but here's how we arrive at it.
max_unextended_offset = 1 << 8
unextended_branch_instr_size = 2
N = max_unextended_offset // unextended_branch_instr_size
# When using instruction rather than bytes in the offset multiply by 2
if OFFSET_AS_INSTRUCTION:
N *= 2
nop = "UNARY_POSITIVE" # don't use NOP, dis.stack_effect will raise
# The number of jumps will be equal to the number of labels. The
# number of passes of compute_jumps() required will be one greater
# than this.
labels = [Label() for x in range(0, 3 * N)]
code = Bytecode()
code.extend(
Instr("JUMP_FORWARD", labels[len(labels) - x - 1])
for x in range(0, len(labels))
)
end_of_jumps = len(code)
code.extend(Instr(nop) for x in range(0, N))
# Now insert the labels. The first is N instructions (i.e. 256
# bytes) after the last jump. Then they proceed to earlier positions
# 4 bytes at a time. While the targets are in the range of the nop
# instructions, 4 bytes is two instructions. When the targets are in
# the range of JUMP_FORWARD instructions we have to allow for the fact
# that the instructions will have been extended to four bytes each, so
# working backwards 4 bytes per label means just one instruction per
# label.
offset = end_of_jumps + N
for index in range(0, len(labels)):
code.insert(offset, labels[index])
if offset <= end_of_jumps:
offset -= 1
else:
offset -= 2
code.insert(0, Instr("LOAD_CONST", 0))
del end_of_jumps
code.append(Instr("RETURN_VALUE"))
code.to_code(compute_jumps_passes=(len(labels) + 1))
def test_general_constants(self):
"""Test if general object could be linked as constants."""
class CustomObject:
pass
class UnHashableCustomObject:
__hash__ = None
obj1 = [1, 2, 3]
obj2 = {1, 2, 3}
obj3 = CustomObject()
obj4 = UnHashableCustomObject()
code = Bytecode(
[
Instr("LOAD_CONST", obj1, lineno=1),
Instr("LOAD_CONST", obj2, lineno=1),
Instr("LOAD_CONST", obj3, lineno=1),
Instr("LOAD_CONST", obj4, lineno=1),
Instr("BUILD_TUPLE", 4, lineno=1),
Instr("RETURN_VALUE", lineno=1),
]
)
self.assertEqual(code.to_code().co_consts, (obj1, obj2, obj3, obj4))
def f():
return # pragma: no cover
f.__code__ = code.to_code()
self.assertEqual(f(), (obj1, obj2, obj3, obj4))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 49,634 | Python | 31.784016 | 126 | 0.486219 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_instr.py |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
#!/usr/bin/env python3
import opcode
import unittest
from _pydevd_frame_eval.vendored.bytecode import (
UNSET,
Label,
Instr,
CellVar,
FreeVar,
BasicBlock,
SetLineno,
Compare,
)
from _pydevd_frame_eval.vendored.bytecode.tests import TestCase
class SetLinenoTests(TestCase):
def test_lineno(self):
lineno = SetLineno(1)
self.assertEqual(lineno.lineno, 1)
def test_equality(self):
lineno = SetLineno(1)
self.assertNotEqual(lineno, 1)
self.assertEqual(lineno, SetLineno(1))
self.assertNotEqual(lineno, SetLineno(2))
class VariableTests(TestCase):
def test_str(self):
for cls in (CellVar, FreeVar):
var = cls("a")
self.assertEqual(str(var), "a")
def test_repr(self):
for cls in (CellVar, FreeVar):
var = cls("_a_x_a_")
r = repr(var)
self.assertIn("_a_x_a_", r)
self.assertIn(cls.__name__, r)
def test_eq(self):
f1 = FreeVar("a")
f2 = FreeVar("b")
c1 = CellVar("a")
c2 = CellVar("b")
for v1, v2, eq in (
(f1, f1, True),
(f1, f2, False),
(f1, c1, False),
(c1, c1, True),
(c1, c2, False),
):
if eq:
self.assertEqual(v1, v2)
else:
self.assertNotEqual(v1, v2)
class InstrTests(TestCase):
def test_constructor(self):
# invalid line number
with self.assertRaises(TypeError):
Instr("NOP", lineno="x")
with self.assertRaises(ValueError):
Instr("NOP", lineno=0)
# invalid name
with self.assertRaises(TypeError):
Instr(1)
with self.assertRaises(ValueError):
Instr("xxx")
def test_repr(self):
# No arg
r = repr(Instr("NOP", lineno=10))
self.assertIn("NOP", r)
self.assertIn("10", r)
self.assertIn("lineno", r)
# Arg
r = repr(Instr("LOAD_FAST", "_x_", lineno=10))
self.assertIn("LOAD_FAST", r)
self.assertIn("lineno", r)
self.assertIn("10", r)
self.assertIn("arg", r)
self.assertIn("_x_", r)
def test_invalid_arg(self):
label = Label()
block = BasicBlock()
# EXTENDED_ARG
self.assertRaises(ValueError, Instr, "EXTENDED_ARG", 0)
# has_jump()
self.assertRaises(TypeError, Instr, "JUMP_ABSOLUTE", 1)
self.assertRaises(TypeError, Instr, "JUMP_ABSOLUTE", 1.0)
Instr("JUMP_ABSOLUTE", label)
Instr("JUMP_ABSOLUTE", block)
# hasfree
self.assertRaises(TypeError, Instr, "LOAD_DEREF", "x")
Instr("LOAD_DEREF", CellVar("x"))
Instr("LOAD_DEREF", FreeVar("x"))
# haslocal
self.assertRaises(TypeError, Instr, "LOAD_FAST", 1)
Instr("LOAD_FAST", "x")
# hasname
self.assertRaises(TypeError, Instr, "LOAD_NAME", 1)
Instr("LOAD_NAME", "x")
# hasconst
self.assertRaises(ValueError, Instr, "LOAD_CONST") # UNSET
self.assertRaises(ValueError, Instr, "LOAD_CONST", label)
self.assertRaises(ValueError, Instr, "LOAD_CONST", block)
Instr("LOAD_CONST", 1.0)
Instr("LOAD_CONST", object())
# hascompare
self.assertRaises(TypeError, Instr, "COMPARE_OP", 1)
Instr("COMPARE_OP", Compare.EQ)
# HAVE_ARGUMENT
self.assertRaises(ValueError, Instr, "CALL_FUNCTION", -1)
self.assertRaises(TypeError, Instr, "CALL_FUNCTION", 3.0)
Instr("CALL_FUNCTION", 3)
# test maximum argument
self.assertRaises(ValueError, Instr, "CALL_FUNCTION", 2147483647 + 1)
instr = Instr("CALL_FUNCTION", 2147483647)
self.assertEqual(instr.arg, 2147483647)
# not HAVE_ARGUMENT
self.assertRaises(ValueError, Instr, "NOP", 0)
Instr("NOP")
def test_require_arg(self):
i = Instr("CALL_FUNCTION", 3)
self.assertTrue(i.require_arg())
i = Instr("NOP")
self.assertFalse(i.require_arg())
def test_attr(self):
instr = Instr("LOAD_CONST", 3, lineno=5)
self.assertEqual(instr.name, "LOAD_CONST")
self.assertEqual(instr.opcode, 100)
self.assertEqual(instr.arg, 3)
self.assertEqual(instr.lineno, 5)
# invalid values/types
self.assertRaises(ValueError, setattr, instr, "lineno", 0)
self.assertRaises(TypeError, setattr, instr, "lineno", 1.0)
self.assertRaises(TypeError, setattr, instr, "name", 5)
self.assertRaises(TypeError, setattr, instr, "opcode", 1.0)
self.assertRaises(ValueError, setattr, instr, "opcode", -1)
self.assertRaises(ValueError, setattr, instr, "opcode", 255)
# arg can take any attribute but cannot be deleted
instr.arg = -8
instr.arg = object()
self.assertRaises(AttributeError, delattr, instr, "arg")
# no argument
instr = Instr("ROT_TWO")
self.assertIs(instr.arg, UNSET)
def test_modify_op(self):
instr = Instr("LOAD_NAME", "x")
load_fast = opcode.opmap["LOAD_FAST"]
instr.opcode = load_fast
self.assertEqual(instr.name, "LOAD_FAST")
self.assertEqual(instr.opcode, load_fast)
def test_extended_arg(self):
instr = Instr("LOAD_CONST", 0x1234ABCD)
self.assertEqual(instr.arg, 0x1234ABCD)
def test_slots(self):
instr = Instr("NOP")
with self.assertRaises(AttributeError):
instr.myattr = 1
def test_compare(self):
instr = Instr("LOAD_CONST", 3, lineno=7)
self.assertEqual(instr, Instr("LOAD_CONST", 3, lineno=7))
self.assertNotEqual(instr, 1)
# different lineno
self.assertNotEqual(instr, Instr("LOAD_CONST", 3))
self.assertNotEqual(instr, Instr("LOAD_CONST", 3, lineno=6))
# different op
self.assertNotEqual(instr, Instr("LOAD_FAST", "x", lineno=7))
# different arg
self.assertNotEqual(instr, Instr("LOAD_CONST", 4, lineno=7))
def test_has_jump(self):
label = Label()
jump = Instr("JUMP_ABSOLUTE", label)
self.assertTrue(jump.has_jump())
instr = Instr("LOAD_FAST", "x")
self.assertFalse(instr.has_jump())
def test_is_cond_jump(self):
label = Label()
jump = Instr("POP_JUMP_IF_TRUE", label)
self.assertTrue(jump.is_cond_jump())
instr = Instr("LOAD_FAST", "x")
self.assertFalse(instr.is_cond_jump())
def test_is_uncond_jump(self):
label = Label()
jump = Instr("JUMP_ABSOLUTE", label)
self.assertTrue(jump.is_uncond_jump())
instr = Instr("POP_JUMP_IF_TRUE", label)
self.assertFalse(instr.is_uncond_jump())
def test_const_key_not_equal(self):
def check(value):
self.assertEqual(Instr("LOAD_CONST", value), Instr("LOAD_CONST", value))
def func():
pass
check(None)
check(0)
check(0.0)
check(b"bytes")
check("text")
check(Ellipsis)
check((1, 2, 3))
check(frozenset({1, 2, 3}))
check(func.__code__)
check(object())
def test_const_key_equal(self):
neg_zero = -0.0
pos_zero = +0.0
# int and float: 0 == 0.0
self.assertNotEqual(Instr("LOAD_CONST", 0), Instr("LOAD_CONST", 0.0))
# float: -0.0 == +0.0
self.assertNotEqual(
Instr("LOAD_CONST", neg_zero), Instr("LOAD_CONST", pos_zero)
)
# complex
self.assertNotEqual(
Instr("LOAD_CONST", complex(neg_zero, 1.0)),
Instr("LOAD_CONST", complex(pos_zero, 1.0)),
)
self.assertNotEqual(
Instr("LOAD_CONST", complex(1.0, neg_zero)),
Instr("LOAD_CONST", complex(1.0, pos_zero)),
)
# tuple
self.assertNotEqual(Instr("LOAD_CONST", (0,)), Instr("LOAD_CONST", (0.0,)))
nested_tuple1 = (0,)
nested_tuple1 = (nested_tuple1,)
nested_tuple2 = (0.0,)
nested_tuple2 = (nested_tuple2,)
self.assertNotEqual(
Instr("LOAD_CONST", nested_tuple1), Instr("LOAD_CONST", nested_tuple2)
)
# frozenset
self.assertNotEqual(
Instr("LOAD_CONST", frozenset({0})), Instr("LOAD_CONST", frozenset({0.0}))
)
def test_stack_effects(self):
# Verify all opcodes are handled and that "jump=None" really returns
# the max of the other cases.
from _pydevd_frame_eval.vendored.bytecode.concrete import ConcreteInstr
def check(instr):
jump = instr.stack_effect(jump=True)
no_jump = instr.stack_effect(jump=False)
max_effect = instr.stack_effect(jump=None)
self.assertEqual(instr.stack_effect(), max_effect)
self.assertEqual(max_effect, max(jump, no_jump))
if not instr.has_jump():
self.assertEqual(jump, no_jump)
for name, op in opcode.opmap.items():
with self.subTest(name):
# Use ConcreteInstr instead of Instr because it doesn't care
# what kind of argument it is constructed with.
if op < opcode.HAVE_ARGUMENT:
check(ConcreteInstr(name))
else:
for arg in range(256):
check(ConcreteInstr(name, arg))
# LOAD_CONST uses a concrete python object as its oparg, however, in
# dis.stack_effect(opcode.opmap['LOAD_CONST'], oparg),
# oparg should be the index of that python object in the constants.
#
# Fortunately, for an instruction whose oparg isn't equivalent to its
# form in binary files(pyc format), the stack effect is a
# constant which does not depend on its oparg.
#
# The second argument of dis.stack_effect cannot be
# more than 2**31 - 1. If stack effect of an instruction is
# independent of its oparg, we pass 0 as the second argument
# of dis.stack_effect.
# (As a result we can calculate stack_effect for
# any LOAD_CONST instructions, even for large integers)
for arg in 2 ** 31, 2 ** 32, 2 ** 63, 2 ** 64, -1:
self.assertEqual(Instr("LOAD_CONST", arg).stack_effect(), 1)
def test_code_object_containing_mutable_data(self):
from _pydevd_frame_eval.vendored.bytecode import Bytecode, Instr
from types import CodeType
def f():
def g():
return "value"
return g
f_code = Bytecode.from_code(f.__code__)
instr_load_code = None
mutable_datum = [4, 2]
for each in f_code:
if (
isinstance(each, Instr)
and each.name == "LOAD_CONST"
and isinstance(each.arg, CodeType)
):
instr_load_code = each
break
self.assertIsNotNone(instr_load_code)
g_code = Bytecode.from_code(instr_load_code.arg)
g_code[0].arg = mutable_datum
instr_load_code.arg = g_code.to_code()
f.__code__ = f_code.to_code()
self.assertIs(f()(), mutable_datum)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 11,676 | Python | 31.168044 | 126 | 0.569544 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_peephole_opt.py |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
import sys
import unittest
from _pydevd_frame_eval.vendored.bytecode import Label, Instr, Compare, Bytecode, ControlFlowGraph
from _pydevd_frame_eval.vendored.bytecode import peephole_opt
from _pydevd_frame_eval.vendored.bytecode.tests import TestCase, dump_bytecode
from unittest import mock
class Tests(TestCase):
maxDiff = 80 * 100
def optimize_blocks(self, code):
if isinstance(code, Bytecode):
code = ControlFlowGraph.from_bytecode(code)
optimizer = peephole_opt.PeepholeOptimizer()
optimizer.optimize_cfg(code)
return code
def check(self, code, *expected):
if isinstance(code, Bytecode):
code = ControlFlowGraph.from_bytecode(code)
optimizer = peephole_opt.PeepholeOptimizer()
optimizer.optimize_cfg(code)
code = code.to_bytecode()
try:
self.assertEqual(code, expected)
except AssertionError:
print("Optimized code:")
dump_bytecode(code)
print("Expected code:")
for instr in expected:
print(instr)
raise
def check_dont_optimize(self, code):
code = ControlFlowGraph.from_bytecode(code)
noopt = code.to_bytecode()
optim = self.optimize_blocks(code)
optim = optim.to_bytecode()
self.assertEqual(optim, noopt)
def test_unary_op(self):
def check_unary_op(op, value, result):
code = Bytecode(
[Instr("LOAD_CONST", value), Instr(op), Instr("STORE_NAME", "x")]
)
self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))
check_unary_op("UNARY_POSITIVE", 2, 2)
check_unary_op("UNARY_NEGATIVE", 3, -3)
check_unary_op("UNARY_INVERT", 5, -6)
def test_binary_op(self):
def check_bin_op(left, op, right, result):
code = Bytecode(
[
Instr("LOAD_CONST", left),
Instr("LOAD_CONST", right),
Instr(op),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))
check_bin_op(10, "BINARY_ADD", 20, 30)
check_bin_op(5, "BINARY_SUBTRACT", 1, 4)
check_bin_op(5, "BINARY_MULTIPLY", 3, 15)
check_bin_op(10, "BINARY_TRUE_DIVIDE", 3, 10 / 3)
check_bin_op(10, "BINARY_FLOOR_DIVIDE", 3, 3)
check_bin_op(10, "BINARY_MODULO", 3, 1)
check_bin_op(2, "BINARY_POWER", 8, 256)
check_bin_op(1, "BINARY_LSHIFT", 3, 8)
check_bin_op(16, "BINARY_RSHIFT", 3, 2)
check_bin_op(10, "BINARY_AND", 3, 2)
check_bin_op(2, "BINARY_OR", 3, 3)
check_bin_op(2, "BINARY_XOR", 3, 1)
def test_combined_unary_bin_ops(self):
# x = 1 + 3 + 7
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 3),
Instr("BINARY_ADD"),
Instr("LOAD_CONST", 7),
Instr("BINARY_ADD"),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", 11), Instr("STORE_NAME", "x"))
# x = ~(~(5))
code = Bytecode(
[
Instr("LOAD_CONST", 5),
Instr("UNARY_INVERT"),
Instr("UNARY_INVERT"),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", 5), Instr("STORE_NAME", "x"))
# "events = [(0, 'call'), (1, 'line'), (-(3), 'call')]"
code = Bytecode(
[
Instr("LOAD_CONST", 0),
Instr("LOAD_CONST", "call"),
Instr("BUILD_TUPLE", 2),
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", "line"),
Instr("BUILD_TUPLE", 2),
Instr("LOAD_CONST", 3),
Instr("UNARY_NEGATIVE"),
Instr("LOAD_CONST", "call"),
Instr("BUILD_TUPLE", 2),
Instr("BUILD_LIST", 3),
Instr("STORE_NAME", "events"),
]
)
self.check(
code,
Instr("LOAD_CONST", (0, "call")),
Instr("LOAD_CONST", (1, "line")),
Instr("LOAD_CONST", (-3, "call")),
Instr("BUILD_LIST", 3),
Instr("STORE_NAME", "events"),
)
# 'x = (1,) + (0,) * 8'
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("BUILD_TUPLE", 1),
Instr("LOAD_CONST", 0),
Instr("BUILD_TUPLE", 1),
Instr("LOAD_CONST", 8),
Instr("BINARY_MULTIPLY"),
Instr("BINARY_ADD"),
Instr("STORE_NAME", "x"),
]
)
zeros = (0,) * 8
result = (1,) + zeros
self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))
def test_max_size(self):
max_size = 3
with mock.patch.object(peephole_opt, "MAX_SIZE", max_size):
# optimized binary operation: size <= maximum size
#
# (9,) * size
size = max_size
result = (9,) * size
code = Bytecode(
[
Instr("LOAD_CONST", 9),
Instr("BUILD_TUPLE", 1),
Instr("LOAD_CONST", size),
Instr("BINARY_MULTIPLY"),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", result), Instr("STORE_NAME", "x"))
# don't optimize binary operation: size > maximum size
#
# x = (9,) * size
size = max_size + 1
code = Bytecode(
[
Instr("LOAD_CONST", 9),
Instr("BUILD_TUPLE", 1),
Instr("LOAD_CONST", size),
Instr("BINARY_MULTIPLY"),
Instr("STORE_NAME", "x"),
]
)
self.check(
code,
Instr("LOAD_CONST", (9,)),
Instr("LOAD_CONST", size),
Instr("BINARY_MULTIPLY"),
Instr("STORE_NAME", "x"),
)
def test_bin_op_dont_optimize(self):
# 1 / 0
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 0),
Instr("BINARY_TRUE_DIVIDE"),
Instr("POP_TOP"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
# 1 // 0
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 0),
Instr("BINARY_FLOOR_DIVIDE"),
Instr("POP_TOP"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
# 1 % 0
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 0),
Instr("BINARY_MODULO"),
Instr("POP_TOP"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
# 1 % 1j
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 1j),
Instr("BINARY_MODULO"),
Instr("POP_TOP"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
def test_build_tuple(self):
# x = (1, 2, 3)
code = Bytecode(
[
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 2),
Instr("LOAD_CONST", 3),
Instr("BUILD_TUPLE", 3),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_CONST", (1, 2, 3)), Instr("STORE_NAME", "x"))
def test_build_list(self):
# test = x in [1, 2, 3]
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 2),
Instr("LOAD_CONST", 3),
Instr("BUILD_LIST", 3),
Instr("COMPARE_OP", Compare.IN),
Instr("STORE_NAME", "test"),
]
)
self.check(
code,
Instr("LOAD_NAME", "x"),
Instr("LOAD_CONST", (1, 2, 3)),
Instr("COMPARE_OP", Compare.IN),
Instr("STORE_NAME", "test"),
)
def test_build_list_unpack_seq(self):
for build_list in ("BUILD_TUPLE", "BUILD_LIST"):
# x, = [a]
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr(build_list, 1),
Instr("UNPACK_SEQUENCE", 1),
Instr("STORE_NAME", "x"),
]
)
self.check(code, Instr("LOAD_NAME", "a"), Instr("STORE_NAME", "x"))
# x, y = [a, b]
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr(build_list, 2),
Instr("UNPACK_SEQUENCE", 2),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
]
)
self.check(
code,
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("ROT_TWO"),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
)
# x, y, z = [a, b, c]
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("LOAD_NAME", "c"),
Instr(build_list, 3),
Instr("UNPACK_SEQUENCE", 3),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
Instr("STORE_NAME", "z"),
]
)
self.check(
code,
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("LOAD_NAME", "c"),
Instr("ROT_THREE"),
Instr("ROT_TWO"),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
Instr("STORE_NAME", "z"),
)
def test_build_tuple_unpack_seq_const(self):
# x, y = (3, 4)
code = Bytecode(
[
Instr("LOAD_CONST", 3),
Instr("LOAD_CONST", 4),
Instr("BUILD_TUPLE", 2),
Instr("UNPACK_SEQUENCE", 2),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
]
)
self.check(
code,
Instr("LOAD_CONST", (3, 4)),
Instr("UNPACK_SEQUENCE", 2),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
)
def test_build_list_unpack_seq_const(self):
# x, y, z = [3, 4, 5]
code = Bytecode(
[
Instr("LOAD_CONST", 3),
Instr("LOAD_CONST", 4),
Instr("LOAD_CONST", 5),
Instr("BUILD_LIST", 3),
Instr("UNPACK_SEQUENCE", 3),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
Instr("STORE_NAME", "z"),
]
)
self.check(
code,
Instr("LOAD_CONST", 5),
Instr("LOAD_CONST", 4),
Instr("LOAD_CONST", 3),
Instr("STORE_NAME", "x"),
Instr("STORE_NAME", "y"),
Instr("STORE_NAME", "z"),
)
def test_build_set(self):
# test = x in {1, 2, 3}
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", 2),
Instr("LOAD_CONST", 3),
Instr("BUILD_SET", 3),
Instr("COMPARE_OP", Compare.IN),
Instr("STORE_NAME", "test"),
]
)
self.check(
code,
Instr("LOAD_NAME", "x"),
Instr("LOAD_CONST", frozenset((1, 2, 3))),
Instr("COMPARE_OP", Compare.IN),
Instr("STORE_NAME", "test"),
)
def test_compare_op_unary_not(self):
for op, not_op in (
(Compare.IN, Compare.NOT_IN), # in => not in
(Compare.NOT_IN, Compare.IN), # not in => in
(Compare.IS, Compare.IS_NOT), # is => is not
(Compare.IS_NOT, Compare.IS), # is not => is
):
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("COMPARE_OP", op),
Instr("UNARY_NOT"),
Instr("STORE_NAME", "x"),
]
)
self.check(
code,
Instr("LOAD_NAME", "a"),
Instr("LOAD_NAME", "b"),
Instr("COMPARE_OP", not_op),
Instr("STORE_NAME", "x"),
)
# don't optimize:
# x = not (a and b is True)
label_instr5 = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "a"),
Instr("JUMP_IF_FALSE_OR_POP", label_instr5),
Instr("LOAD_NAME", "b"),
Instr("LOAD_CONST", True),
Instr("COMPARE_OP", Compare.IS),
label_instr5,
Instr("UNARY_NOT"),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
def test_dont_optimize(self):
# x = 3 < 5
code = Bytecode(
[
Instr("LOAD_CONST", 3),
Instr("LOAD_CONST", 5),
Instr("COMPARE_OP", Compare.LT),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
self.check_dont_optimize(code)
# x = (10, 20, 30)[1:]
code = Bytecode(
[
Instr("LOAD_CONST", (10, 20, 30)),
Instr("LOAD_CONST", 1),
Instr("LOAD_CONST", None),
Instr("BUILD_SLICE", 2),
Instr("BINARY_SUBSCR"),
Instr("STORE_NAME", "x"),
]
)
self.check_dont_optimize(code)
def test_optimize_code_obj(self):
# Test optimize() method with a code object
#
# x = 3 + 5 => x = 8
noopt = Bytecode(
[
Instr("LOAD_CONST", 3),
Instr("LOAD_CONST", 5),
Instr("BINARY_ADD"),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
noopt = noopt.to_code()
optimizer = peephole_opt.PeepholeOptimizer()
optim = optimizer.optimize(noopt)
code = Bytecode.from_code(optim)
self.assertEqual(
code,
[
Instr("LOAD_CONST", 8, lineno=1),
Instr("STORE_NAME", "x", lineno=1),
Instr("LOAD_CONST", None, lineno=1),
Instr("RETURN_VALUE", lineno=1),
],
)
def test_return_value(self):
# return+return: remove second return
#
# def func():
# return 4
# return 5
code = Bytecode(
[
Instr("LOAD_CONST", 4, lineno=2),
Instr("RETURN_VALUE", lineno=2),
Instr("LOAD_CONST", 5, lineno=3),
Instr("RETURN_VALUE", lineno=3),
]
)
code = ControlFlowGraph.from_bytecode(code)
self.check(
code, Instr("LOAD_CONST", 4, lineno=2), Instr("RETURN_VALUE", lineno=2)
)
# return+return + return+return: remove second and fourth return
#
# def func():
# return 4
# return 5
# return 6
# return 7
code = Bytecode(
[
Instr("LOAD_CONST", 4, lineno=2),
Instr("RETURN_VALUE", lineno=2),
Instr("LOAD_CONST", 5, lineno=3),
Instr("RETURN_VALUE", lineno=3),
Instr("LOAD_CONST", 6, lineno=4),
Instr("RETURN_VALUE", lineno=4),
Instr("LOAD_CONST", 7, lineno=5),
Instr("RETURN_VALUE", lineno=5),
]
)
code = ControlFlowGraph.from_bytecode(code)
self.check(
code, Instr("LOAD_CONST", 4, lineno=2), Instr("RETURN_VALUE", lineno=2)
)
# return + JUMP_ABSOLUTE: remove JUMP_ABSOLUTE
# while 1:
# return 7
if sys.version_info < (3, 8):
setup_loop = Label()
return_label = Label()
code = Bytecode(
[
setup_loop,
Instr("SETUP_LOOP", return_label, lineno=2),
Instr("LOAD_CONST", 7, lineno=3),
Instr("RETURN_VALUE", lineno=3),
Instr("JUMP_ABSOLUTE", setup_loop, lineno=3),
Instr("POP_BLOCK", lineno=3),
return_label,
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
]
)
code = ControlFlowGraph.from_bytecode(code)
end_loop = Label()
self.check(
code,
Instr("SETUP_LOOP", end_loop, lineno=2),
Instr("LOAD_CONST", 7, lineno=3),
Instr("RETURN_VALUE", lineno=3),
end_loop,
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
)
else:
setup_loop = Label()
return_label = Label()
code = Bytecode(
[
setup_loop,
Instr("LOAD_CONST", 7, lineno=3),
Instr("RETURN_VALUE", lineno=3),
Instr("JUMP_ABSOLUTE", setup_loop, lineno=3),
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
]
)
code = ControlFlowGraph.from_bytecode(code)
self.check(
code, Instr("LOAD_CONST", 7, lineno=3), Instr("RETURN_VALUE", lineno=3)
)
def test_not_jump_if_false(self):
# Replace UNARY_NOT+POP_JUMP_IF_FALSE with POP_JUMP_IF_TRUE
#
# if not x:
# y = 9
label = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("UNARY_NOT"),
Instr("POP_JUMP_IF_FALSE", label),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "y"),
label,
]
)
code = self.optimize_blocks(code)
label = Label()
self.check(
code,
Instr("LOAD_NAME", "x"),
Instr("POP_JUMP_IF_TRUE", label),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "y"),
label,
)
def test_unconditional_jump_to_return(self):
# def func():
# if test:
# if test2:
# x = 10
# else:
# x = 20
# else:
# x = 30
label_instr11 = Label()
label_instr14 = Label()
label_instr7 = Label()
code = Bytecode(
[
Instr("LOAD_GLOBAL", "test", lineno=2),
Instr("POP_JUMP_IF_FALSE", label_instr11, lineno=2),
Instr("LOAD_GLOBAL", "test2", lineno=3),
Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=3),
Instr("LOAD_CONST", 10, lineno=4),
Instr("STORE_FAST", "x", lineno=4),
Instr("JUMP_ABSOLUTE", label_instr14, lineno=4),
label_instr7,
Instr("LOAD_CONST", 20, lineno=6),
Instr("STORE_FAST", "x", lineno=6),
Instr("JUMP_FORWARD", label_instr14, lineno=6),
label_instr11,
Instr("LOAD_CONST", 30, lineno=8),
Instr("STORE_FAST", "x", lineno=8),
label_instr14,
Instr("LOAD_CONST", None, lineno=8),
Instr("RETURN_VALUE", lineno=8),
]
)
label1 = Label()
label3 = Label()
label4 = Label()
self.check(
code,
Instr("LOAD_GLOBAL", "test", lineno=2),
Instr("POP_JUMP_IF_FALSE", label3, lineno=2),
Instr("LOAD_GLOBAL", "test2", lineno=3),
Instr("POP_JUMP_IF_FALSE", label1, lineno=3),
Instr("LOAD_CONST", 10, lineno=4),
Instr("STORE_FAST", "x", lineno=4),
Instr("JUMP_ABSOLUTE", label4, lineno=4),
label1,
Instr("LOAD_CONST", 20, lineno=6),
Instr("STORE_FAST", "x", lineno=6),
Instr("JUMP_FORWARD", label4, lineno=6),
label3,
Instr("LOAD_CONST", 30, lineno=8),
Instr("STORE_FAST", "x", lineno=8),
label4,
Instr("LOAD_CONST", None, lineno=8),
Instr("RETURN_VALUE", lineno=8),
)
def test_unconditional_jumps(self):
# def func():
# if x:
# if y:
# func()
label_instr7 = Label()
code = Bytecode(
[
Instr("LOAD_GLOBAL", "x", lineno=2),
Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=2),
Instr("LOAD_GLOBAL", "y", lineno=3),
Instr("POP_JUMP_IF_FALSE", label_instr7, lineno=3),
Instr("LOAD_GLOBAL", "func", lineno=4),
Instr("CALL_FUNCTION", 0, lineno=4),
Instr("POP_TOP", lineno=4),
label_instr7,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
]
)
label_return = Label()
self.check(
code,
Instr("LOAD_GLOBAL", "x", lineno=2),
Instr("POP_JUMP_IF_FALSE", label_return, lineno=2),
Instr("LOAD_GLOBAL", "y", lineno=3),
Instr("POP_JUMP_IF_FALSE", label_return, lineno=3),
Instr("LOAD_GLOBAL", "func", lineno=4),
Instr("CALL_FUNCTION", 0, lineno=4),
Instr("POP_TOP", lineno=4),
label_return,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
)
def test_jump_to_return(self):
# def func(condition):
# return 'yes' if condition else 'no'
label_instr4 = Label()
label_instr6 = Label()
code = Bytecode(
[
Instr("LOAD_FAST", "condition"),
Instr("POP_JUMP_IF_FALSE", label_instr4),
Instr("LOAD_CONST", "yes"),
Instr("JUMP_FORWARD", label_instr6),
label_instr4,
Instr("LOAD_CONST", "no"),
label_instr6,
Instr("RETURN_VALUE"),
]
)
label = Label()
self.check(
code,
Instr("LOAD_FAST", "condition"),
Instr("POP_JUMP_IF_FALSE", label),
Instr("LOAD_CONST", "yes"),
Instr("RETURN_VALUE"),
label,
Instr("LOAD_CONST", "no"),
Instr("RETURN_VALUE"),
)
def test_jump_if_true_to_jump_if_false(self):
# Replace JUMP_IF_TRUE_OR_POP jumping to POP_JUMP_IF_FALSE <target>
# with POP_JUMP_IF_TRUE <offset after the second POP_JUMP_IF_FALSE>
#
# if x or y:
# z = 1
label_instr3 = Label()
label_instr7 = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("JUMP_IF_TRUE_OR_POP", label_instr3),
Instr("LOAD_NAME", "y"),
label_instr3,
Instr("POP_JUMP_IF_FALSE", label_instr7),
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "z"),
label_instr7,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
label_instr4 = Label()
label_instr7 = Label()
self.check(
code,
Instr("LOAD_NAME", "x"),
Instr("POP_JUMP_IF_TRUE", label_instr4),
Instr("LOAD_NAME", "y"),
Instr("POP_JUMP_IF_FALSE", label_instr7),
label_instr4,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "z"),
label_instr7,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
)
def test_jump_if_false_to_jump_if_false(self):
# Replace JUMP_IF_FALSE_OR_POP jumping to POP_JUMP_IF_FALSE <label>
# with POP_JUMP_IF_FALSE <label>
#
# while n > 0 and start > 3:
# func()
if sys.version_info < (3, 8):
label_instr1 = Label()
label_instr15 = Label()
label_instr17 = Label()
label_instr9 = Label()
code = Bytecode(
[
Instr("SETUP_LOOP", label_instr17),
label_instr1,
Instr("LOAD_NAME", "n"),
Instr("LOAD_CONST", 0),
Instr("COMPARE_OP", Compare.GT),
# JUMP_IF_FALSE_OR_POP jumps to POP_JUMP_IF_FALSE
# which jumps to label_instr15
Instr("JUMP_IF_FALSE_OR_POP", label_instr9),
Instr("LOAD_NAME", "start"),
Instr("LOAD_CONST", 3),
Instr("COMPARE_OP", Compare.GT),
label_instr9,
Instr("POP_JUMP_IF_FALSE", label_instr15),
Instr("LOAD_NAME", "func"),
Instr("CALL_FUNCTION", 0),
Instr("POP_TOP"),
Instr("JUMP_ABSOLUTE", label_instr1),
label_instr15,
Instr("POP_BLOCK"),
label_instr17,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
label_instr1 = Label()
label_instr14 = Label()
label_instr16 = Label()
self.check(
code,
Instr("SETUP_LOOP", label_instr16),
label_instr1,
Instr("LOAD_NAME", "n"),
Instr("LOAD_CONST", 0),
Instr("COMPARE_OP", Compare.GT),
Instr("POP_JUMP_IF_FALSE", label_instr14),
Instr("LOAD_NAME", "start"),
Instr("LOAD_CONST", 3),
Instr("COMPARE_OP", Compare.GT),
Instr("POP_JUMP_IF_FALSE", label_instr14),
Instr("LOAD_NAME", "func"),
Instr("CALL_FUNCTION", 0),
Instr("POP_TOP"),
Instr("JUMP_ABSOLUTE", label_instr1),
label_instr14,
Instr("POP_BLOCK"),
label_instr16,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
)
else:
label_instr1 = Label()
label_instr15 = Label()
label_instr9 = Label()
code = Bytecode(
[
label_instr1,
Instr("LOAD_NAME", "n"),
Instr("LOAD_CONST", 0),
Instr("COMPARE_OP", Compare.GT),
# JUMP_IF_FALSE_OR_POP jumps to POP_JUMP_IF_FALSE
# which jumps to label_instr15
Instr("JUMP_IF_FALSE_OR_POP", label_instr9),
Instr("LOAD_NAME", "start"),
Instr("LOAD_CONST", 3),
Instr("COMPARE_OP", Compare.GT),
label_instr9,
Instr("POP_JUMP_IF_FALSE", label_instr15),
Instr("LOAD_NAME", "func"),
Instr("CALL_FUNCTION", 0),
Instr("POP_TOP"),
Instr("JUMP_ABSOLUTE", label_instr1),
label_instr15,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
label_instr1 = Label()
label_instr14 = Label()
self.check(
code,
label_instr1,
Instr("LOAD_NAME", "n"),
Instr("LOAD_CONST", 0),
Instr("COMPARE_OP", Compare.GT),
Instr("POP_JUMP_IF_FALSE", label_instr14),
Instr("LOAD_NAME", "start"),
Instr("LOAD_CONST", 3),
Instr("COMPARE_OP", Compare.GT),
Instr("POP_JUMP_IF_FALSE", label_instr14),
Instr("LOAD_NAME", "func"),
Instr("CALL_FUNCTION", 0),
Instr("POP_TOP"),
Instr("JUMP_ABSOLUTE", label_instr1),
label_instr14,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
)
def test_nop(self):
code = Bytecode(
[Instr("LOAD_NAME", "x"), Instr("NOP"), Instr("STORE_NAME", "test")]
)
self.check(code, Instr("LOAD_NAME", "x"), Instr("STORE_NAME", "test"))
def test_dead_code_jump(self):
label = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("JUMP_ABSOLUTE", label),
# dead code
Instr("LOAD_NAME", "y"),
Instr("STORE_NAME", "test"),
label,
Instr("STORE_NAME", "test"),
]
)
self.check(code, Instr("LOAD_NAME", "x"), Instr("STORE_NAME", "test"))
def test_uncond_jump_to_uncond_jump(self):
# Replace JUMP_FORWARD t1 jumping to JUMP_FORWARD t2
# with JUMP_ABSOLUTE t2
label = Label()
label2 = Label()
label3 = Label()
label4 = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "test"),
Instr("POP_JUMP_IF_TRUE", label),
# redundant jump
Instr("JUMP_FORWARD", label2),
label,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "x"),
Instr("LOAD_NAME", "test"),
Instr("POP_JUMP_IF_TRUE", label3),
label2,
Instr("JUMP_FORWARD", label4),
label3,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "x"),
label4,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
]
)
label = Label()
label3 = Label()
label4 = Label()
self.check(
code,
Instr("LOAD_NAME", "test"),
Instr("POP_JUMP_IF_TRUE", label),
# JUMP_FORWARD label2 was replaced with JUMP_ABSOLUTE label4
Instr("JUMP_ABSOLUTE", label4),
label,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "x"),
Instr("LOAD_NAME", "test"),
Instr("POP_JUMP_IF_TRUE", label3),
Instr("JUMP_FORWARD", label4),
label3,
Instr("LOAD_CONST", 1),
Instr("STORE_NAME", "x"),
label4,
Instr("LOAD_CONST", None),
Instr("RETURN_VALUE"),
)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 32,993 | Python | 32.462475 | 126 | 0.423453 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/__init__.py | import sys
import textwrap
import types
import unittest
from _pydevd_frame_eval.vendored.bytecode import (
UNSET,
Label,
Instr,
ConcreteInstr,
BasicBlock, # noqa
Bytecode,
ControlFlowGraph,
ConcreteBytecode,
)
def _format_instr_list(block, labels, lineno):
instr_list = []
for instr in block:
if not isinstance(instr, Label):
if isinstance(instr, ConcreteInstr):
cls_name = "ConcreteInstr"
else:
cls_name = "Instr"
arg = instr.arg
if arg is not UNSET:
if isinstance(arg, Label):
arg = labels[arg]
elif isinstance(arg, BasicBlock):
arg = labels[id(arg)]
else:
arg = repr(arg)
if lineno:
text = "%s(%r, %s, lineno=%s)" % (
cls_name,
instr.name,
arg,
instr.lineno,
)
else:
text = "%s(%r, %s)" % (cls_name, instr.name, arg)
else:
if lineno:
text = "%s(%r, lineno=%s)" % (cls_name, instr.name, instr.lineno)
else:
text = "%s(%r)" % (cls_name, instr.name)
else:
text = labels[instr]
instr_list.append(text)
return "[%s]" % ",\n ".join(instr_list)
def dump_bytecode(code, lineno=False):
"""
Use this function to write unit tests: copy/paste its output to
write a self.assertBlocksEqual() check.
"""
print()
if isinstance(code, (Bytecode, ConcreteBytecode)):
is_concrete = isinstance(code, ConcreteBytecode)
if is_concrete:
block = list(code)
else:
block = code
indent = " " * 8
labels = {}
for index, instr in enumerate(block):
if isinstance(instr, Label):
name = "label_instr%s" % index
labels[instr] = name
if is_concrete:
name = "ConcreteBytecode"
print(indent + "code = %s()" % name)
if code.argcount:
print(indent + "code.argcount = %s" % code.argcount)
if sys.version_info > (3, 8):
if code.posonlyargcount:
print(indent + "code.posonlyargcount = %s" % code.posonlyargcount)
if code.kwonlyargcount:
print(indent + "code.kwargonlycount = %s" % code.kwonlyargcount)
print(indent + "code.flags = %#x" % code.flags)
if code.consts:
print(indent + "code.consts = %r" % code.consts)
if code.names:
print(indent + "code.names = %r" % code.names)
if code.varnames:
print(indent + "code.varnames = %r" % code.varnames)
for name in sorted(labels.values()):
print(indent + "%s = Label()" % name)
if is_concrete:
text = indent + "code.extend("
indent = " " * len(text)
else:
text = indent + "code = Bytecode("
indent = " " * len(text)
lines = _format_instr_list(code, labels, lineno).splitlines()
last_line = len(lines) - 1
for index, line in enumerate(lines):
if index == 0:
print(text + lines[0])
elif index == last_line:
print(indent + line + ")")
else:
print(indent + line)
print()
else:
assert isinstance(code, ControlFlowGraph)
labels = {}
for block_index, block in enumerate(code):
labels[id(block)] = "code[%s]" % block_index
for block_index, block in enumerate(code):
text = _format_instr_list(block, labels, lineno)
if block_index != len(code) - 1:
text += ","
print(text)
print()
def get_code(source, *, filename="<string>", function=False):
source = textwrap.dedent(source).strip()
code = compile(source, filename, "exec")
if function:
sub_code = [
const for const in code.co_consts if isinstance(const, types.CodeType)
]
if len(sub_code) != 1:
raise ValueError("unable to find function code")
code = sub_code[0]
return code
def disassemble(source, *, filename="<string>", function=False):
code = get_code(source, filename=filename, function=function)
return Bytecode.from_code(code)
class TestCase(unittest.TestCase):
def assertBlocksEqual(self, code, *expected_blocks):
self.assertEqual(len(code), len(expected_blocks))
for block1, block2 in zip(code, expected_blocks):
block_index = code.get_block_index(block1)
self.assertListEqual(
list(block1), block2, "Block #%s is different" % block_index
)
| 4,996 | Python | 31.238709 | 86 | 0.509207 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/util_annotation.py | from __future__ import annotations
import textwrap
import types
def get_code(source, *, filename="<string>", function=False):
source = textwrap.dedent(source).strip()
code = compile(source, filename, "exec")
if function:
sub_code = [
const for const in code.co_consts if isinstance(const, types.CodeType)
]
if len(sub_code) != 1:
raise ValueError("unable to find function code")
code = sub_code[0]
return code
| 485 | Python | 25.999999 | 82 | 0.624742 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_flags.py |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
#!/usr/bin/env python3
import unittest
from _pydevd_frame_eval.vendored.bytecode import (
CompilerFlags,
ConcreteBytecode,
ConcreteInstr,
Bytecode,
ControlFlowGraph,
)
from _pydevd_frame_eval.vendored.bytecode.flags import infer_flags
class FlagsTests(unittest.TestCase):
def test_type_validation_on_inference(self):
with self.assertRaises(ValueError):
infer_flags(1)
def test_flag_inference(self):
# Check no loss of non-infered flags
code = ControlFlowGraph()
code.flags |= (
CompilerFlags.NEWLOCALS
| CompilerFlags.VARARGS
| CompilerFlags.VARKEYWORDS
| CompilerFlags.NESTED
| CompilerFlags.FUTURE_GENERATOR_STOP
)
code.update_flags()
for f in (
CompilerFlags.NEWLOCALS,
CompilerFlags.VARARGS,
CompilerFlags.VARKEYWORDS,
CompilerFlags.NESTED,
CompilerFlags.NOFREE,
CompilerFlags.OPTIMIZED,
CompilerFlags.FUTURE_GENERATOR_STOP,
):
self.assertTrue(bool(code.flags & f))
# Infer optimized and nofree
code = Bytecode()
flags = infer_flags(code)
self.assertTrue(bool(flags & CompilerFlags.OPTIMIZED))
self.assertTrue(bool(flags & CompilerFlags.NOFREE))
code.append(ConcreteInstr("STORE_NAME", 1))
flags = infer_flags(code)
self.assertFalse(bool(flags & CompilerFlags.OPTIMIZED))
self.assertTrue(bool(flags & CompilerFlags.NOFREE))
code.append(ConcreteInstr("STORE_DEREF", 2))
code.update_flags()
self.assertFalse(bool(code.flags & CompilerFlags.OPTIMIZED))
self.assertFalse(bool(code.flags & CompilerFlags.NOFREE))
def test_async_gen_no_flag_is_async_None(self):
# Test inference in the absence of any flag set on the bytecode
# Infer generator
code = ConcreteBytecode()
code.append(ConcreteInstr("YIELD_VALUE"))
code.update_flags()
self.assertTrue(bool(code.flags & CompilerFlags.GENERATOR))
# Infer coroutine
code = ConcreteBytecode()
code.append(ConcreteInstr("GET_AWAITABLE"))
code.update_flags()
self.assertTrue(bool(code.flags & CompilerFlags.COROUTINE))
# Infer coroutine or async generator
for i, expected in (
("YIELD_VALUE", CompilerFlags.ASYNC_GENERATOR),
("YIELD_FROM", CompilerFlags.COROUTINE),
):
code = ConcreteBytecode()
code.append(ConcreteInstr("GET_AWAITABLE"))
code.append(ConcreteInstr(i))
code.update_flags()
self.assertTrue(bool(code.flags & expected))
def test_async_gen_no_flag_is_async_True(self):
# Test inference when we request an async function
# Force coroutine
code = ConcreteBytecode()
code.update_flags(is_async=True)
self.assertTrue(bool(code.flags & CompilerFlags.COROUTINE))
# Infer coroutine or async generator
for i, expected in (
("YIELD_VALUE", CompilerFlags.ASYNC_GENERATOR),
("YIELD_FROM", CompilerFlags.COROUTINE),
):
code = ConcreteBytecode()
code.append(ConcreteInstr(i))
code.update_flags(is_async=True)
self.assertTrue(bool(code.flags & expected))
def test_async_gen_no_flag_is_async_False(self):
# Test inference when we request a non-async function
# Infer generator
code = ConcreteBytecode()
code.append(ConcreteInstr("YIELD_VALUE"))
code.flags = CompilerFlags(CompilerFlags.COROUTINE)
code.update_flags(is_async=False)
self.assertTrue(bool(code.flags & CompilerFlags.GENERATOR))
# Abort on coroutine
code = ConcreteBytecode()
code.append(ConcreteInstr("GET_AWAITABLE"))
code.flags = CompilerFlags(CompilerFlags.COROUTINE)
with self.assertRaises(ValueError):
code.update_flags(is_async=False)
def test_async_gen_flags(self):
# Test inference in the presence of pre-existing flags
for is_async in (None, True):
# Infer generator
code = ConcreteBytecode()
code.append(ConcreteInstr("YIELD_VALUE"))
for f, expected in (
(CompilerFlags.COROUTINE, CompilerFlags.ASYNC_GENERATOR),
(CompilerFlags.ASYNC_GENERATOR, CompilerFlags.ASYNC_GENERATOR),
(CompilerFlags.ITERABLE_COROUTINE, CompilerFlags.ITERABLE_COROUTINE),
):
code.flags = CompilerFlags(f)
code.update_flags(is_async=is_async)
self.assertTrue(bool(code.flags & expected))
# Infer coroutine
code = ConcreteBytecode()
code.append(ConcreteInstr("YIELD_FROM"))
for f, expected in (
(CompilerFlags.COROUTINE, CompilerFlags.COROUTINE),
(CompilerFlags.ASYNC_GENERATOR, CompilerFlags.COROUTINE),
(CompilerFlags.ITERABLE_COROUTINE, CompilerFlags.ITERABLE_COROUTINE),
):
code.flags = CompilerFlags(f)
code.update_flags(is_async=is_async)
self.assertTrue(bool(code.flags & expected))
# Crash on ITERABLE_COROUTINE with async bytecode
code = ConcreteBytecode()
code.append(ConcreteInstr("GET_AWAITABLE"))
code.flags = CompilerFlags(CompilerFlags.ITERABLE_COROUTINE)
with self.assertRaises(ValueError):
code.update_flags(is_async=is_async)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 6,009 | Python | 36.5625 | 126 | 0.625894 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_misc.py |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
#!/usr/bin/env python3
import contextlib
import io
import sys
import textwrap
import unittest
from _pydevd_frame_eval.vendored import bytecode
from _pydevd_frame_eval.vendored.bytecode import Label, Instr, Bytecode, BasicBlock, ControlFlowGraph
from _pydevd_frame_eval.vendored.bytecode.concrete import OFFSET_AS_INSTRUCTION
from _pydevd_frame_eval.vendored.bytecode.tests import disassemble
class DumpCodeTests(unittest.TestCase):
maxDiff = 80 * 100
def check_dump_bytecode(self, code, expected, lineno=None):
with contextlib.redirect_stdout(io.StringIO()) as stderr:
if lineno is not None:
bytecode.dump_bytecode(code, lineno=True)
else:
bytecode.dump_bytecode(code)
output = stderr.getvalue()
self.assertEqual(output, expected)
def test_bytecode(self):
source = """
def func(test):
if test == 1:
return 1
elif test == 2:
return 2
return 3
"""
code = disassemble(source, function=True)
# without line numbers
enum_repr = "<Compare.EQ: 2>"
expected = f"""
LOAD_FAST 'test'
LOAD_CONST 1
COMPARE_OP {enum_repr}
POP_JUMP_IF_FALSE <label_instr6>
LOAD_CONST 1
RETURN_VALUE
label_instr6:
LOAD_FAST 'test'
LOAD_CONST 2
COMPARE_OP {enum_repr}
POP_JUMP_IF_FALSE <label_instr13>
LOAD_CONST 2
RETURN_VALUE
label_instr13:
LOAD_CONST 3
RETURN_VALUE
"""[
1:
].rstrip(
" "
)
self.check_dump_bytecode(code, expected)
# with line numbers
expected = f"""
L. 2 0: LOAD_FAST 'test'
1: LOAD_CONST 1
2: COMPARE_OP {enum_repr}
3: POP_JUMP_IF_FALSE <label_instr6>
L. 3 4: LOAD_CONST 1
5: RETURN_VALUE
label_instr6:
L. 4 7: LOAD_FAST 'test'
8: LOAD_CONST 2
9: COMPARE_OP {enum_repr}
10: POP_JUMP_IF_FALSE <label_instr13>
L. 5 11: LOAD_CONST 2
12: RETURN_VALUE
label_instr13:
L. 6 14: LOAD_CONST 3
15: RETURN_VALUE
"""[
1:
].rstrip(
" "
)
self.check_dump_bytecode(code, expected, lineno=True)
def test_bytecode_broken_label(self):
label = Label()
code = Bytecode([Instr("JUMP_ABSOLUTE", label)])
expected = " JUMP_ABSOLUTE <error: unknown label>\n\n"
self.check_dump_bytecode(code, expected)
def test_blocks_broken_jump(self):
block = BasicBlock()
code = ControlFlowGraph()
code[0].append(Instr("JUMP_ABSOLUTE", block))
expected = textwrap.dedent(
"""
block1:
JUMP_ABSOLUTE <error: unknown block>
"""
).lstrip("\n")
self.check_dump_bytecode(code, expected)
def test_bytecode_blocks(self):
source = """
def func(test):
if test == 1:
return 1
elif test == 2:
return 2
return 3
"""
code = disassemble(source, function=True)
code = ControlFlowGraph.from_bytecode(code)
# without line numbers
enum_repr = "<Compare.EQ: 2>"
expected = textwrap.dedent(
f"""
block1:
LOAD_FAST 'test'
LOAD_CONST 1
COMPARE_OP {enum_repr}
POP_JUMP_IF_FALSE <block3>
-> block2
block2:
LOAD_CONST 1
RETURN_VALUE
block3:
LOAD_FAST 'test'
LOAD_CONST 2
COMPARE_OP {enum_repr}
POP_JUMP_IF_FALSE <block5>
-> block4
block4:
LOAD_CONST 2
RETURN_VALUE
block5:
LOAD_CONST 3
RETURN_VALUE
"""
).lstrip()
self.check_dump_bytecode(code, expected)
# with line numbers
expected = textwrap.dedent(
f"""
block1:
L. 2 0: LOAD_FAST 'test'
1: LOAD_CONST 1
2: COMPARE_OP {enum_repr}
3: POP_JUMP_IF_FALSE <block3>
-> block2
block2:
L. 3 0: LOAD_CONST 1
1: RETURN_VALUE
block3:
L. 4 0: LOAD_FAST 'test'
1: LOAD_CONST 2
2: COMPARE_OP {enum_repr}
3: POP_JUMP_IF_FALSE <block5>
-> block4
block4:
L. 5 0: LOAD_CONST 2
1: RETURN_VALUE
block5:
L. 6 0: LOAD_CONST 3
1: RETURN_VALUE
"""
).lstrip()
self.check_dump_bytecode(code, expected, lineno=True)
def test_concrete_bytecode(self):
source = """
def func(test):
if test == 1:
return 1
elif test == 2:
return 2
return 3
"""
code = disassemble(source, function=True)
code = code.to_concrete_bytecode()
# without line numbers
expected = f"""
0 LOAD_FAST 0
2 LOAD_CONST 1
4 COMPARE_OP 2
6 POP_JUMP_IF_FALSE {6 if OFFSET_AS_INSTRUCTION else 12}
8 LOAD_CONST 1
10 RETURN_VALUE
12 LOAD_FAST 0
14 LOAD_CONST 2
16 COMPARE_OP 2
18 POP_JUMP_IF_FALSE {12 if OFFSET_AS_INSTRUCTION else 24}
20 LOAD_CONST 2
22 RETURN_VALUE
24 LOAD_CONST 3
26 RETURN_VALUE
""".lstrip(
"\n"
)
self.check_dump_bytecode(code, expected)
# with line numbers
expected = f"""
L. 2 0: LOAD_FAST 0
2: LOAD_CONST 1
4: COMPARE_OP 2
6: POP_JUMP_IF_FALSE {6 if OFFSET_AS_INSTRUCTION else 12}
L. 3 8: LOAD_CONST 1
10: RETURN_VALUE
L. 4 12: LOAD_FAST 0
14: LOAD_CONST 2
16: COMPARE_OP 2
18: POP_JUMP_IF_FALSE {12 if OFFSET_AS_INSTRUCTION else 24}
L. 5 20: LOAD_CONST 2
22: RETURN_VALUE
L. 6 24: LOAD_CONST 3
26: RETURN_VALUE
""".lstrip(
"\n"
)
self.check_dump_bytecode(code, expected, lineno=True)
def test_type_validation(self):
class T:
first_lineno = 1
with self.assertRaises(TypeError):
bytecode.dump_bytecode(T())
class MiscTests(unittest.TestCase):
def skip_test_version(self):
import setup
self.assertEqual(bytecode.__version__, setup.VERSION)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 7,149 | Python | 25.383764 | 126 | 0.51126 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_cfg.py |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
#!/usr/bin/env python3
import io
import sys
import unittest
import contextlib
from _pydevd_frame_eval.vendored.bytecode import (
Label,
Compare,
SetLineno,
Instr,
Bytecode,
BasicBlock,
ControlFlowGraph,
)
from _pydevd_frame_eval.vendored.bytecode.concrete import OFFSET_AS_INSTRUCTION
from _pydevd_frame_eval.vendored.bytecode.tests import disassemble as _disassemble, TestCase
def disassemble(
source, *, filename="<string>", function=False, remove_last_return_none=False
):
code = _disassemble(source, filename=filename, function=function)
blocks = ControlFlowGraph.from_bytecode(code)
if remove_last_return_none:
# drop LOAD_CONST+RETURN_VALUE to only keep 2 instructions,
# to make unit tests shorter
block = blocks[-1]
test = (
block[-2].name == "LOAD_CONST"
and block[-2].arg is None
and block[-1].name == "RETURN_VALUE"
)
if not test:
raise ValueError(
"unable to find implicit RETURN_VALUE <None>: %s" % block[-2:]
)
del block[-2:]
return blocks
class BlockTests(unittest.TestCase):
def test_iter_invalid_types(self):
# Labels are not allowed in basic blocks
block = BasicBlock()
block.append(Label())
with self.assertRaises(ValueError):
list(block)
with self.assertRaises(ValueError):
block.legalize(1)
# Only one jump allowed and only at the end
block = BasicBlock()
block2 = BasicBlock()
block.extend([Instr("JUMP_ABSOLUTE", block2), Instr("NOP")])
with self.assertRaises(ValueError):
list(block)
with self.assertRaises(ValueError):
block.legalize(1)
# jump target must be a BasicBlock
block = BasicBlock()
label = Label()
block.extend([Instr("JUMP_ABSOLUTE", label)])
with self.assertRaises(ValueError):
list(block)
with self.assertRaises(ValueError):
block.legalize(1)
def test_slice(self):
block = BasicBlock([Instr("NOP")])
next_block = BasicBlock()
block.next_block = next_block
self.assertEqual(block, block[:])
self.assertIs(next_block, block[:].next_block)
def test_copy(self):
block = BasicBlock([Instr("NOP")])
next_block = BasicBlock()
block.next_block = next_block
self.assertEqual(block, block.copy())
self.assertIs(next_block, block.copy().next_block)
class BytecodeBlocksTests(TestCase):
maxDiff = 80 * 100
def test_constructor(self):
code = ControlFlowGraph()
self.assertEqual(code.name, "<module>")
self.assertEqual(code.filename, "<string>")
self.assertEqual(code.flags, 0)
self.assertBlocksEqual(code, [])
def test_attr(self):
source = """
first_line = 1
def func(arg1, arg2, *, arg3):
x = 1
y = 2
return arg1
"""
code = disassemble(source, filename="hello.py", function=True)
self.assertEqual(code.argcount, 2)
self.assertEqual(code.filename, "hello.py")
self.assertEqual(code.first_lineno, 3)
if sys.version_info > (3, 8):
self.assertEqual(code.posonlyargcount, 0)
self.assertEqual(code.kwonlyargcount, 1)
self.assertEqual(code.name, "func")
self.assertEqual(code.cellvars, [])
code.name = "name"
code.filename = "filename"
code.flags = 123
self.assertEqual(code.name, "name")
self.assertEqual(code.filename, "filename")
self.assertEqual(code.flags, 123)
# FIXME: test non-empty cellvars
def test_add_del_block(self):
code = ControlFlowGraph()
code[0].append(Instr("LOAD_CONST", 0))
block = code.add_block()
self.assertEqual(len(code), 2)
self.assertIs(block, code[1])
code[1].append(Instr("LOAD_CONST", 2))
self.assertBlocksEqual(code, [Instr("LOAD_CONST", 0)], [Instr("LOAD_CONST", 2)])
del code[0]
self.assertBlocksEqual(code, [Instr("LOAD_CONST", 2)])
del code[0]
self.assertEqual(len(code), 0)
def test_setlineno(self):
# x = 7
# y = 8
# z = 9
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
]
)
blocks = ControlFlowGraph.from_bytecode(code)
self.assertBlocksEqual(
blocks,
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
],
)
def test_legalize(self):
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", 8, lineno=4),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9, lineno=6),
Instr("STORE_NAME", "z"),
]
)
blocks = ControlFlowGraph.from_bytecode(code)
blocks.legalize()
self.assertBlocksEqual(
blocks,
[
Instr("LOAD_CONST", 7, lineno=3),
Instr("STORE_NAME", "x", lineno=3),
Instr("LOAD_CONST", 8, lineno=4),
Instr("STORE_NAME", "y", lineno=4),
Instr("LOAD_CONST", 9, lineno=5),
Instr("STORE_NAME", "z", lineno=5),
],
)
def test_repr(self):
r = repr(ControlFlowGraph())
self.assertIn("ControlFlowGraph", r)
self.assertIn("1", r)
def test_to_bytecode(self):
# if test:
# x = 2
# x = 5
blocks = ControlFlowGraph()
blocks.add_block()
blocks.add_block()
blocks[0].extend(
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", blocks[2], lineno=1),
]
)
blocks[1].extend(
[
Instr("LOAD_CONST", 5, lineno=2),
Instr("STORE_NAME", "x", lineno=2),
Instr("JUMP_FORWARD", blocks[2], lineno=2),
]
)
blocks[2].extend(
[
Instr("LOAD_CONST", 7, lineno=3),
Instr("STORE_NAME", "x", lineno=3),
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
]
)
bytecode = blocks.to_bytecode()
label = Label()
self.assertEqual(
bytecode,
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label, lineno=1),
Instr("LOAD_CONST", 5, lineno=2),
Instr("STORE_NAME", "x", lineno=2),
Instr("JUMP_FORWARD", label, lineno=2),
label,
Instr("LOAD_CONST", 7, lineno=3),
Instr("STORE_NAME", "x", lineno=3),
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
],
)
# FIXME: test other attributes
def test_label_at_the_end(self):
label = Label()
code = Bytecode(
[
Instr("LOAD_NAME", "x"),
Instr("UNARY_NOT"),
Instr("POP_JUMP_IF_FALSE", label),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "y"),
label,
]
)
cfg = ControlFlowGraph.from_bytecode(code)
self.assertBlocksEqual(
cfg,
[
Instr("LOAD_NAME", "x"),
Instr("UNARY_NOT"),
Instr("POP_JUMP_IF_FALSE", cfg[2]),
],
[Instr("LOAD_CONST", 9), Instr("STORE_NAME", "y")],
[],
)
def test_from_bytecode(self):
bytecode = Bytecode()
label = Label()
bytecode.extend(
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label, lineno=1),
Instr("LOAD_CONST", 5, lineno=2),
Instr("STORE_NAME", "x", lineno=2),
Instr("JUMP_FORWARD", label, lineno=2),
# dead code!
Instr("LOAD_CONST", 7, lineno=4),
Instr("STORE_NAME", "x", lineno=4),
Label(), # unused label
label,
Label(), # unused label
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
]
)
blocks = ControlFlowGraph.from_bytecode(bytecode)
label2 = blocks[3]
self.assertBlocksEqual(
blocks,
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label2, lineno=1),
],
[
Instr("LOAD_CONST", 5, lineno=2),
Instr("STORE_NAME", "x", lineno=2),
Instr("JUMP_FORWARD", label2, lineno=2),
],
[Instr("LOAD_CONST", 7, lineno=4), Instr("STORE_NAME", "x", lineno=4)],
[Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4)],
)
# FIXME: test other attributes
def test_from_bytecode_loop(self):
# for x in (1, 2, 3):
# if x == 2:
# break
# continue
if sys.version_info < (3, 8):
label_loop_start = Label()
label_loop_exit = Label()
label_loop_end = Label()
code = Bytecode()
code.extend(
(
Instr("SETUP_LOOP", label_loop_end, lineno=1),
Instr("LOAD_CONST", (1, 2, 3), lineno=1),
Instr("GET_ITER", lineno=1),
label_loop_start,
Instr("FOR_ITER", label_loop_exit, lineno=1),
Instr("STORE_NAME", "x", lineno=1),
Instr("LOAD_NAME", "x", lineno=2),
Instr("LOAD_CONST", 2, lineno=2),
Instr("COMPARE_OP", Compare.EQ, lineno=2),
Instr("POP_JUMP_IF_FALSE", label_loop_start, lineno=2),
Instr("BREAK_LOOP", lineno=3),
Instr("JUMP_ABSOLUTE", label_loop_start, lineno=4),
Instr("JUMP_ABSOLUTE", label_loop_start, lineno=4),
label_loop_exit,
Instr("POP_BLOCK", lineno=4),
label_loop_end,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
)
)
blocks = ControlFlowGraph.from_bytecode(code)
expected = [
[Instr("SETUP_LOOP", blocks[8], lineno=1)],
[Instr("LOAD_CONST", (1, 2, 3), lineno=1), Instr("GET_ITER", lineno=1)],
[Instr("FOR_ITER", blocks[7], lineno=1)],
[
Instr("STORE_NAME", "x", lineno=1),
Instr("LOAD_NAME", "x", lineno=2),
Instr("LOAD_CONST", 2, lineno=2),
Instr("COMPARE_OP", Compare.EQ, lineno=2),
Instr("POP_JUMP_IF_FALSE", blocks[2], lineno=2),
],
[Instr("BREAK_LOOP", lineno=3)],
[Instr("JUMP_ABSOLUTE", blocks[2], lineno=4)],
[Instr("JUMP_ABSOLUTE", blocks[2], lineno=4)],
[Instr("POP_BLOCK", lineno=4)],
[Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4)],
]
self.assertBlocksEqual(blocks, *expected)
else:
label_loop_start = Label()
label_loop_exit = Label()
code = Bytecode()
code.extend(
(
Instr("LOAD_CONST", (1, 2, 3), lineno=1),
Instr("GET_ITER", lineno=1),
label_loop_start,
Instr("FOR_ITER", label_loop_exit, lineno=1),
Instr("STORE_NAME", "x", lineno=1),
Instr("LOAD_NAME", "x", lineno=2),
Instr("LOAD_CONST", 2, lineno=2),
Instr("COMPARE_OP", Compare.EQ, lineno=2),
Instr("POP_JUMP_IF_FALSE", label_loop_start, lineno=2),
Instr("JUMP_ABSOLUTE", label_loop_exit, lineno=3),
Instr("JUMP_ABSOLUTE", label_loop_start, lineno=4),
Instr("JUMP_ABSOLUTE", label_loop_start, lineno=4),
label_loop_exit,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
)
)
blocks = ControlFlowGraph.from_bytecode(code)
expected = [
[Instr("LOAD_CONST", (1, 2, 3), lineno=1), Instr("GET_ITER", lineno=1)],
[Instr("FOR_ITER", blocks[6], lineno=1)],
[
Instr("STORE_NAME", "x", lineno=1),
Instr("LOAD_NAME", "x", lineno=2),
Instr("LOAD_CONST", 2, lineno=2),
Instr("COMPARE_OP", Compare.EQ, lineno=2),
Instr("POP_JUMP_IF_FALSE", blocks[1], lineno=2),
],
[Instr("JUMP_ABSOLUTE", blocks[6], lineno=3)],
[Instr("JUMP_ABSOLUTE", blocks[1], lineno=4)],
[Instr("JUMP_ABSOLUTE", blocks[1], lineno=4)],
[Instr("LOAD_CONST", None, lineno=4), Instr("RETURN_VALUE", lineno=4)],
]
self.assertBlocksEqual(blocks, *expected)
class BytecodeBlocksFunctionalTests(TestCase):
def test_eq(self):
# compare codes with multiple blocks and labels,
# Code.__eq__() renumbers labels to get equal labels
source = "x = 1 if test else 2"
code1 = disassemble(source)
code2 = disassemble(source)
self.assertEqual(code1, code2)
# Type mismatch
self.assertFalse(code1 == 1)
# argnames mismatch
cfg = ControlFlowGraph()
cfg.argnames = 10
self.assertFalse(code1 == cfg)
# instr mismatch
cfg = ControlFlowGraph()
cfg.argnames = code1.argnames
self.assertFalse(code1 == cfg)
def check_getitem(self, code):
# check internal Code block indexes (index by index, index by label)
for block_index, block in enumerate(code):
self.assertIs(code[block_index], block)
self.assertIs(code[block], block)
self.assertEqual(code.get_block_index(block), block_index)
def test_delitem(self):
cfg = ControlFlowGraph()
b = cfg.add_block()
del cfg[b]
self.assertEqual(len(cfg.get_instructions()), 0)
def sample_code(self):
code = disassemble("x = 1", remove_last_return_none=True)
self.assertBlocksEqual(
code, [Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)]
)
return code
def test_split_block(self):
code = self.sample_code()
code[0].append(Instr("NOP", lineno=1))
label = code.split_block(code[0], 2)
self.assertIs(label, code[1])
self.assertBlocksEqual(
code,
[Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)],
[Instr("NOP", lineno=1)],
)
self.check_getitem(code)
label2 = code.split_block(code[0], 1)
self.assertIs(label2, code[1])
self.assertBlocksEqual(
code,
[Instr("LOAD_CONST", 1, lineno=1)],
[Instr("STORE_NAME", "x", lineno=1)],
[Instr("NOP", lineno=1)],
)
self.check_getitem(code)
with self.assertRaises(TypeError):
code.split_block(1, 1)
with self.assertRaises(ValueError) as e:
code.split_block(code[0], -2)
self.assertIn("positive", e.exception.args[0])
def test_split_block_end(self):
code = self.sample_code()
# split at the end of the last block requires to add a new empty block
label = code.split_block(code[0], 2)
self.assertIs(label, code[1])
self.assertBlocksEqual(
code,
[Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)],
[],
)
self.check_getitem(code)
# split at the end of a block which is not the end doesn't require to
# add a new block
label = code.split_block(code[0], 2)
self.assertIs(label, code[1])
self.assertBlocksEqual(
code,
[Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)],
[],
)
def test_split_block_dont_split(self):
code = self.sample_code()
# FIXME: is it really useful to support that?
block = code.split_block(code[0], 0)
self.assertIs(block, code[0])
self.assertBlocksEqual(
code, [Instr("LOAD_CONST", 1, lineno=1), Instr("STORE_NAME", "x", lineno=1)]
)
def test_split_block_error(self):
code = self.sample_code()
with self.assertRaises(ValueError):
# invalid index
code.split_block(code[0], 3)
def test_to_code(self):
# test resolution of jump labels
bytecode = ControlFlowGraph()
bytecode.first_lineno = 3
bytecode.argcount = 3
if sys.version_info > (3, 8):
bytecode.posonlyargcount = 0
bytecode.kwonlyargcount = 2
bytecode.name = "func"
bytecode.filename = "hello.py"
bytecode.flags = 0x43
bytecode.argnames = ("arg", "arg2", "arg3", "kwonly", "kwonly2")
bytecode.docstring = None
block0 = bytecode[0]
block1 = bytecode.add_block()
block2 = bytecode.add_block()
block0.extend(
[
Instr("LOAD_FAST", "x", lineno=4),
Instr("POP_JUMP_IF_FALSE", block2, lineno=4),
]
)
block1.extend(
[Instr("LOAD_FAST", "arg", lineno=5), Instr("STORE_FAST", "x", lineno=5)]
)
block2.extend(
[
Instr("LOAD_CONST", 3, lineno=6),
Instr("STORE_FAST", "x", lineno=6),
Instr("LOAD_FAST", "x", lineno=7),
Instr("RETURN_VALUE", lineno=7),
]
)
if OFFSET_AS_INSTRUCTION:
# The argument of the jump is divided by 2
expected = (
b"|\x05" b"r\x04" b"|\x00" b"}\x05" b"d\x01" b"}\x05" b"|\x05" b"S\x00"
)
else:
expected = (
b"|\x05" b"r\x08" b"|\x00" b"}\x05" b"d\x01" b"}\x05" b"|\x05" b"S\x00"
)
code = bytecode.to_code()
self.assertEqual(code.co_consts, (None, 3))
self.assertEqual(code.co_argcount, 3)
if sys.version_info > (3, 8):
self.assertEqual(code.co_posonlyargcount, 0)
self.assertEqual(code.co_kwonlyargcount, 2)
self.assertEqual(code.co_nlocals, 6)
self.assertEqual(code.co_stacksize, 1)
# FIXME: don't use hardcoded constants
self.assertEqual(code.co_flags, 0x43)
self.assertEqual(code.co_code, expected)
self.assertEqual(code.co_names, ())
self.assertEqual(
code.co_varnames, ("arg", "arg2", "arg3", "kwonly", "kwonly2", "x")
)
self.assertEqual(code.co_filename, "hello.py")
self.assertEqual(code.co_name, "func")
self.assertEqual(code.co_firstlineno, 3)
# verify stacksize argument is honored
explicit_stacksize = code.co_stacksize + 42
code = bytecode.to_code(stacksize=explicit_stacksize)
self.assertEqual(code.co_stacksize, explicit_stacksize)
def test_get_block_index(self):
blocks = ControlFlowGraph()
block0 = blocks[0]
block1 = blocks.add_block()
block2 = blocks.add_block()
self.assertEqual(blocks.get_block_index(block0), 0)
self.assertEqual(blocks.get_block_index(block1), 1)
self.assertEqual(blocks.get_block_index(block2), 2)
other_block = BasicBlock()
self.assertRaises(ValueError, blocks.get_block_index, other_block)
class CFGStacksizeComputationTests(TestCase):
def check_stack_size(self, func):
code = func.__code__
bytecode = Bytecode.from_code(code)
cfg = ControlFlowGraph.from_bytecode(bytecode)
self.assertEqual(code.co_stacksize, cfg.compute_stacksize())
def test_empty_code(self):
cfg = ControlFlowGraph()
del cfg[0]
self.assertEqual(cfg.compute_stacksize(), 0)
def test_handling_of_set_lineno(self):
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
]
)
self.assertEqual(code.compute_stacksize(), 1)
def test_invalid_stacksize(self):
code = Bytecode()
code.extend([Instr("STORE_NAME", "x")])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_stack_size_computation_and(self):
def test(arg1, *args, **kwargs): # pragma: no cover
return arg1 and args # Test JUMP_IF_FALSE_OR_POP
self.check_stack_size(test)
def test_stack_size_computation_or(self):
def test(arg1, *args, **kwargs): # pragma: no cover
return arg1 or args # Test JUMP_IF_TRUE_OR_POP
self.check_stack_size(test)
def test_stack_size_computation_if_else(self):
def test(arg1, *args, **kwargs): # pragma: no cover
if args:
return 0
elif kwargs:
return 1
else:
return 2
self.check_stack_size(test)
def test_stack_size_computation_for_loop_continue(self):
def test(arg1, *args, **kwargs): # pragma: no cover
for k in kwargs:
if k in args:
continue
else:
return 1
self.check_stack_size(test)
def test_stack_size_computation_while_loop_break(self):
def test(arg1, *args, **kwargs): # pragma: no cover
while True:
if arg1:
break
self.check_stack_size(test)
def test_stack_size_computation_with(self):
def test(arg1, *args, **kwargs): # pragma: no cover
with open(arg1) as f:
return f.read()
self.check_stack_size(test)
def test_stack_size_computation_try_except(self):
def test(arg1, *args, **kwargs): # pragma: no cover
try:
return args[0]
except Exception:
return 2
self.check_stack_size(test)
def test_stack_size_computation_try_finally(self):
def test(arg1, *args, **kwargs): # pragma: no cover
try:
return args[0]
finally:
return 2
self.check_stack_size(test)
def test_stack_size_computation_try_except_finally(self):
def test(arg1, *args, **kwargs): # pragma: no cover
try:
return args[0]
except Exception:
return 2
finally:
print("Interrupt")
self.check_stack_size(test)
def test_stack_size_computation_try_except_else_finally(self):
def test(arg1, *args, **kwargs): # pragma: no cover
try:
return args[0]
except Exception:
return 2
else:
return arg1
finally:
print("Interrupt")
self.check_stack_size(test)
def test_stack_size_computation_nested_try_except_finally(self):
def test(arg1, *args, **kwargs): # pragma: no cover
k = 1
try:
getattr(arg1, k)
except AttributeError:
pass
except Exception:
try:
assert False
except Exception:
return 2
finally:
print("unexpected")
finally:
print("attempted to get {}".format(k))
self.check_stack_size(test)
def test_stack_size_computation_nested_try_except_else_finally(self):
def test(*args, **kwargs):
try:
v = args[1]
except IndexError:
try:
w = kwargs["value"]
except KeyError:
return -1
else:
return w
finally:
print("second finally")
else:
return v
finally:
print("first finally")
# A direct comparison of the stack depth fails because CPython
# generate dead code that is used in stack computation.
cpython_stacksize = test.__code__.co_stacksize
test.__code__ = Bytecode.from_code(test.__code__).to_code()
self.assertLessEqual(test.__code__.co_stacksize, cpython_stacksize)
with contextlib.redirect_stdout(io.StringIO()) as stdout:
self.assertEqual(test(1, 4), 4)
self.assertEqual(stdout.getvalue(), "first finally\n")
with contextlib.redirect_stdout(io.StringIO()) as stdout:
self.assertEqual(test([], value=3), 3)
self.assertEqual(stdout.getvalue(), "second finally\nfirst finally\n")
with contextlib.redirect_stdout(io.StringIO()) as stdout:
self.assertEqual(test([], name=None), -1)
self.assertEqual(stdout.getvalue(), "second finally\nfirst finally\n")
def test_stack_size_with_dead_code(self):
# Simply demonstrate more directly the previously mentioned issue.
def test(*args): # pragma: no cover
return 0
try:
a = args[0]
except IndexError:
return -1
else:
return a
test.__code__ = Bytecode.from_code(test.__code__).to_code()
self.assertEqual(test.__code__.co_stacksize, 1)
self.assertEqual(test(1), 0)
def test_huge_code_with_numerous_blocks(self):
def base_func(x):
pass
def mk_if_then_else(depth):
instructions = []
for i in range(depth):
label_else = Label()
instructions.extend(
[
Instr("LOAD_FAST", "x"),
Instr("POP_JUMP_IF_FALSE", label_else),
Instr("LOAD_GLOBAL", "f{}".format(i)),
Instr("RETURN_VALUE"),
label_else,
]
)
instructions.extend([Instr("LOAD_CONST", None), Instr("RETURN_VALUE")])
return instructions
bytecode = Bytecode(mk_if_then_else(5000))
bytecode.compute_stacksize()
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 28,547 | Python | 33.107527 | 126 | 0.507374 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_code.py |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
import unittest
from _pydevd_frame_eval.vendored.bytecode import ConcreteBytecode, Bytecode, ControlFlowGraph
from _pydevd_frame_eval.vendored.bytecode.tests import get_code
class CodeTests(unittest.TestCase):
"""Check that bytecode.from_code(code).to_code() returns code."""
def check(self, source, function=False):
ref_code = get_code(source, function=function)
code = ConcreteBytecode.from_code(ref_code).to_code()
self.assertEqual(code, ref_code)
code = Bytecode.from_code(ref_code).to_code()
self.assertEqual(code, ref_code)
bytecode = Bytecode.from_code(ref_code)
blocks = ControlFlowGraph.from_bytecode(bytecode)
code = blocks.to_bytecode().to_code()
self.assertEqual(code, ref_code)
def test_loop(self):
self.check(
"""
for x in range(1, 10):
x += 1
if x == 3:
continue
x -= 1
if x > 7:
break
x = 0
print(x)
"""
)
def test_varargs(self):
self.check(
"""
def func(a, b, *varargs):
pass
""",
function=True,
)
def test_kwargs(self):
self.check(
"""
def func(a, b, **kwargs):
pass
""",
function=True,
)
def test_kwonlyargs(self):
self.check(
"""
def func(*, arg, arg2):
pass
""",
function=True,
)
# Added because Python 3.10 added some special beahavior with respect to
# generators in term of stack size
def test_generator_func(self):
self.check(
"""
def func(arg, arg2):
yield
""",
function=True,
)
def test_async_func(self):
self.check(
"""
async def func(arg, arg2):
pass
""",
function=True,
)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 2,425 | Python | 24.80851 | 126 | 0.516289 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_frame_eval/vendored/bytecode/tests/test_bytecode.py |
import pytest
from tests_python.debugger_unittest import IS_PY36_OR_GREATER, IS_CPYTHON
from tests_python.debug_constants import TEST_CYTHON
pytestmark = pytest.mark.skipif(not IS_PY36_OR_GREATER or not IS_CPYTHON or not TEST_CYTHON, reason='Requires CPython >= 3.6')
#!/usr/bin/env python3
import sys
import textwrap
import unittest
from _pydevd_frame_eval.vendored.bytecode import Label, Instr, FreeVar, Bytecode, SetLineno, ConcreteInstr
from _pydevd_frame_eval.vendored.bytecode.tests import TestCase, get_code
class BytecodeTests(TestCase):
maxDiff = 80 * 100
def test_constructor(self):
code = Bytecode()
self.assertEqual(code.name, "<module>")
self.assertEqual(code.filename, "<string>")
self.assertEqual(code.flags, 0)
self.assertEqual(code, [])
def test_invalid_types(self):
code = Bytecode()
code.append(123)
with self.assertRaises(ValueError):
list(code)
with self.assertRaises(ValueError):
code.legalize()
with self.assertRaises(ValueError):
Bytecode([123])
def test_legalize(self):
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
Instr("LOAD_CONST", 8, lineno=4),
Instr("STORE_NAME", "y"),
Label(),
SetLineno(5),
Instr("LOAD_CONST", 9, lineno=6),
Instr("STORE_NAME", "z"),
]
)
code.legalize()
self.assertListEqual(
code,
[
Instr("LOAD_CONST", 7, lineno=3),
Instr("STORE_NAME", "x", lineno=3),
Instr("LOAD_CONST", 8, lineno=4),
Instr("STORE_NAME", "y", lineno=4),
Label(),
Instr("LOAD_CONST", 9, lineno=5),
Instr("STORE_NAME", "z", lineno=5),
],
)
def test_slice(self):
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
]
)
sliced_code = code[:]
self.assertEqual(code, sliced_code)
for name in (
"argcount",
"posonlyargcount",
"kwonlyargcount",
"first_lineno",
"name",
"filename",
"docstring",
"cellvars",
"freevars",
"argnames",
):
self.assertEqual(
getattr(code, name, None), getattr(sliced_code, name, None)
)
def test_copy(self):
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
]
)
copy_code = code.copy()
self.assertEqual(code, copy_code)
for name in (
"argcount",
"posonlyargcount",
"kwonlyargcount",
"first_lineno",
"name",
"filename",
"docstring",
"cellvars",
"freevars",
"argnames",
):
self.assertEqual(getattr(code, name, None), getattr(copy_code, name, None))
def test_from_code(self):
code = get_code(
"""
if test:
x = 1
else:
x = 2
"""
)
bytecode = Bytecode.from_code(code)
label_else = Label()
label_exit = Label()
if sys.version_info < (3, 10):
self.assertEqual(
bytecode,
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label_else, lineno=1),
Instr("LOAD_CONST", 1, lineno=2),
Instr("STORE_NAME", "x", lineno=2),
Instr("JUMP_FORWARD", label_exit, lineno=2),
label_else,
Instr("LOAD_CONST", 2, lineno=4),
Instr("STORE_NAME", "x", lineno=4),
label_exit,
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
],
)
# Control flow handling appears to have changed under Python 3.10
else:
self.assertEqual(
bytecode,
[
Instr("LOAD_NAME", "test", lineno=1),
Instr("POP_JUMP_IF_FALSE", label_else, lineno=1),
Instr("LOAD_CONST", 1, lineno=2),
Instr("STORE_NAME", "x", lineno=2),
Instr("LOAD_CONST", None, lineno=2),
Instr("RETURN_VALUE", lineno=2),
label_else,
Instr("LOAD_CONST", 2, lineno=4),
Instr("STORE_NAME", "x", lineno=4),
Instr("LOAD_CONST", None, lineno=4),
Instr("RETURN_VALUE", lineno=4),
],
)
def test_from_code_freevars(self):
ns = {}
exec(
textwrap.dedent(
"""
def create_func():
x = 1
def func():
return x
return func
func = create_func()
"""
),
ns,
ns,
)
code = ns["func"].__code__
bytecode = Bytecode.from_code(code)
self.assertEqual(
bytecode,
[
Instr("LOAD_DEREF", FreeVar("x"), lineno=5),
Instr("RETURN_VALUE", lineno=5),
],
)
def test_from_code_load_fast(self):
code = get_code(
"""
def func():
x = 33
y = x
""",
function=True,
)
code = Bytecode.from_code(code)
self.assertEqual(
code,
[
Instr("LOAD_CONST", 33, lineno=2),
Instr("STORE_FAST", "x", lineno=2),
Instr("LOAD_FAST", "x", lineno=3),
Instr("STORE_FAST", "y", lineno=3),
Instr("LOAD_CONST", None, lineno=3),
Instr("RETURN_VALUE", lineno=3),
],
)
def test_setlineno(self):
# x = 7
# y = 8
# z = 9
code = Bytecode()
code.first_lineno = 3
code.extend(
[
Instr("LOAD_CONST", 7),
Instr("STORE_NAME", "x"),
SetLineno(4),
Instr("LOAD_CONST", 8),
Instr("STORE_NAME", "y"),
SetLineno(5),
Instr("LOAD_CONST", 9),
Instr("STORE_NAME", "z"),
]
)
concrete = code.to_concrete_bytecode()
self.assertEqual(concrete.consts, [7, 8, 9])
self.assertEqual(concrete.names, ["x", "y", "z"])
self.assertListEqual(
list(concrete),
[
ConcreteInstr("LOAD_CONST", 0, lineno=3),
ConcreteInstr("STORE_NAME", 0, lineno=3),
ConcreteInstr("LOAD_CONST", 1, lineno=4),
ConcreteInstr("STORE_NAME", 1, lineno=4),
ConcreteInstr("LOAD_CONST", 2, lineno=5),
ConcreteInstr("STORE_NAME", 2, lineno=5),
],
)
def test_to_code(self):
code = Bytecode()
code.first_lineno = 50
code.extend(
[
Instr("LOAD_NAME", "print"),
Instr("LOAD_CONST", "%s"),
Instr("LOAD_GLOBAL", "a"),
Instr("BINARY_MODULO"),
Instr("CALL_FUNCTION", 1),
Instr("RETURN_VALUE"),
]
)
co = code.to_code()
# hopefully this is obvious from inspection? :-)
self.assertEqual(co.co_stacksize, 3)
co = code.to_code(stacksize=42)
self.assertEqual(co.co_stacksize, 42)
def test_negative_size_unary(self):
opnames = (
"UNARY_POSITIVE",
"UNARY_NEGATIVE",
"UNARY_NOT",
"UNARY_INVERT",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr(opname)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_unary_with_disable_check_of_pre_and_post(self):
opnames = (
"UNARY_POSITIVE",
"UNARY_NEGATIVE",
"UNARY_NOT",
"UNARY_INVERT",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr(opname)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 0)
def test_negative_size_binary(self):
opnames = (
"BINARY_POWER",
"BINARY_MULTIPLY",
"BINARY_MATRIX_MULTIPLY",
"BINARY_FLOOR_DIVIDE",
"BINARY_TRUE_DIVIDE",
"BINARY_MODULO",
"BINARY_ADD",
"BINARY_SUBTRACT",
"BINARY_SUBSCR",
"BINARY_LSHIFT",
"BINARY_RSHIFT",
"BINARY_AND",
"BINARY_XOR",
"BINARY_OR",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr(opname)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_binary_with_disable_check_of_pre_and_post(self):
opnames = (
"BINARY_POWER",
"BINARY_MULTIPLY",
"BINARY_MATRIX_MULTIPLY",
"BINARY_FLOOR_DIVIDE",
"BINARY_TRUE_DIVIDE",
"BINARY_MODULO",
"BINARY_ADD",
"BINARY_SUBTRACT",
"BINARY_SUBSCR",
"BINARY_LSHIFT",
"BINARY_RSHIFT",
"BINARY_AND",
"BINARY_XOR",
"BINARY_OR",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr(opname)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 1)
def test_negative_size_call(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("CALL_FUNCTION", 0)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_unpack(self):
opnames = (
"UNPACK_SEQUENCE",
"UNPACK_EX",
)
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr(opname, 1)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_build(self):
opnames = (
"BUILD_TUPLE",
"BUILD_LIST",
"BUILD_SET",
)
if sys.version_info >= (3, 6):
opnames = (*opnames, "BUILD_STRING")
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr(opname, 1)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_build_map(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr("BUILD_MAP", 1)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_negative_size_build_map_with_disable_check_of_pre_and_post(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr("BUILD_MAP", 1)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 1)
@unittest.skipIf(sys.version_info < (3, 6), "Inexistent opcode")
def test_negative_size_build_const_map(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", ("a",)), Instr("BUILD_CONST_KEY_MAP", 1)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
@unittest.skipIf(sys.version_info < (3, 6), "Inexistent opcode")
def test_negative_size_build_const_map_with_disable_check_of_pre_and_post(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", ("a",)), Instr("BUILD_CONST_KEY_MAP", 1)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 1)
def test_empty_dup(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("DUP_TOP")])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_not_enough_dup(self):
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr("DUP_TOP_TWO")])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_not_enough_rot(self):
opnames = ["ROT_TWO", "ROT_THREE"]
if sys.version_info >= (3, 8):
opnames.append("ROT_FOUR")
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr(opname)])
with self.assertRaises(RuntimeError):
code.compute_stacksize()
def test_not_enough_rot_with_disable_check_of_pre_and_post(self):
opnames = ["ROT_TWO", "ROT_THREE"]
if sys.version_info >= (3, 8):
opnames.append("ROT_FOUR")
for opname in opnames:
with self.subTest():
code = Bytecode()
code.first_lineno = 1
code.extend([Instr("LOAD_CONST", 1), Instr(opname)])
co = code.to_code(check_pre_and_post=False)
self.assertEqual(co.co_stacksize, 1)
def test_for_iter_stack_effect_computation(self):
with self.subTest():
code = Bytecode()
code.first_lineno = 1
lab1 = Label()
lab2 = Label()
code.extend(
[
lab1,
Instr("FOR_ITER", lab2),
Instr("STORE_FAST", "i"),
Instr("JUMP_ABSOLUTE", lab1),
lab2,
]
)
with self.assertRaises(RuntimeError):
# Use compute_stacksize since the code is so broken that conversion
# to from concrete is actually broken
code.compute_stacksize(check_pre_and_post=False)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| 15,909 | Python | 31.535787 | 126 | 0.471117 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_resolver.py | from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_utils import hasattr_checked, DAPGrouper, Timer
from io import StringIO
import traceback
from os.path import basename
from functools import partial
from _pydevd_bundle.pydevd_constants import IS_PY36_OR_GREATER, \
MethodWrapperType, RETURN_VALUES_DICT, DebugInfoHolder, IS_PYPY, GENERATED_LEN_ATTR_NAME
from _pydevd_bundle.pydevd_safe_repr import SafeRepr
# Note: 300 is already a lot to see in the outline (after that the user should really use the shell to get things)
# and this also means we'll pass less information to the client side (which makes debugging faster).
MAX_ITEMS_TO_HANDLE = 300
TOO_LARGE_MSG = 'Too large to show contents. Max items to show: ' + str(MAX_ITEMS_TO_HANDLE)
TOO_LARGE_ATTR = 'Unable to handle:'
#=======================================================================================================================
# UnableToResolveVariableException
#=======================================================================================================================
class UnableToResolveVariableException(Exception):
pass
try:
from collections import OrderedDict
except:
OrderedDict = dict
try:
import java.lang # @UnresolvedImport
except:
pass
#=======================================================================================================================
# See: pydevd_extension_api module for resolver interface
#=======================================================================================================================
def sorted_attributes_key(attr_name):
if attr_name.startswith('__'):
if attr_name.endswith('__'):
# __ double under before and after __
return (3, attr_name)
else:
# __ double under before
return (2, attr_name)
elif attr_name.startswith('_'):
# _ single under
return (1, attr_name)
else:
# Regular (Before anything)
return (0, attr_name)
#=======================================================================================================================
# DefaultResolver
#=======================================================================================================================
class DefaultResolver:
'''
DefaultResolver is the class that'll actually resolve how to show some variable.
'''
def resolve(self, var, attribute):
return getattr(var, attribute)
def get_contents_debug_adapter_protocol(self, obj, fmt=None):
if MethodWrapperType:
dct, used___dict__ = self._get_py_dictionary(obj)
else:
dct = self._get_jy_dictionary(obj)[0]
lst = sorted(dct.items(), key=lambda tup: sorted_attributes_key(tup[0]))
if used___dict__:
eval_name = '.__dict__[%s]'
else:
eval_name = '.%s'
ret = []
for attr_name, attr_value in lst:
entry = (attr_name, attr_value, eval_name % attr_name)
ret.append(entry)
return ret
def get_dictionary(self, var, names=None, used___dict__=False):
if MethodWrapperType:
return self._get_py_dictionary(var, names, used___dict__=used___dict__)[0]
else:
return self._get_jy_dictionary(var)[0]
def _get_jy_dictionary(self, obj):
ret = {}
found = java.util.HashMap()
original = obj
if hasattr_checked(obj, '__class__') and obj.__class__ == java.lang.Class:
# get info about superclasses
classes = []
classes.append(obj)
c = obj.getSuperclass()
while c != None:
classes.append(c)
c = c.getSuperclass()
# get info about interfaces
interfs = []
for obj in classes:
interfs.extend(obj.getInterfaces())
classes.extend(interfs)
# now is the time when we actually get info on the declared methods and fields
for obj in classes:
declaredMethods = obj.getDeclaredMethods()
declaredFields = obj.getDeclaredFields()
for i in range(len(declaredMethods)):
name = declaredMethods[i].getName()
ret[name] = declaredMethods[i].toString()
found.put(name, 1)
for i in range(len(declaredFields)):
name = declaredFields[i].getName()
found.put(name, 1)
# if declaredFields[i].isAccessible():
declaredFields[i].setAccessible(True)
# ret[name] = declaredFields[i].get( declaredFields[i] )
try:
ret[name] = declaredFields[i].get(original)
except:
ret[name] = declaredFields[i].toString()
# this simple dir does not always get all the info, that's why we have the part before
# (e.g.: if we do a dir on String, some methods that are from other interfaces such as
# charAt don't appear)
try:
d = dir(original)
for name in d:
if found.get(name) != 1:
ret[name] = getattr(original, name)
except:
# sometimes we're unable to do a dir
pass
return ret
def get_names(self, var):
used___dict__ = False
try:
names = dir(var)
except Exception:
names = []
if not names:
if hasattr_checked(var, '__dict__'):
names = list(var.__dict__)
used___dict__ = True
return names, used___dict__
def _get_py_dictionary(self, var, names=None, used___dict__=False):
'''
:return tuple(names, used___dict__), where used___dict__ means we have to access
using obj.__dict__[name] instead of getattr(obj, name)
'''
# On PyPy we never show functions. This is because of a corner case where PyPy becomes
# absurdly slow -- it takes almost half a second to introspect a single numpy function (so,
# the related test, "test_case_16_resolve_numpy_array", times out... this probably isn't
# specific to numpy, but to any library where the CPython bridge is used, but as we
# can't be sure in the debugger, we play it safe and don't show it at all).
filter_function = IS_PYPY
if not names:
names, used___dict__ = self.get_names(var)
d = {}
# Be aware that the order in which the filters are applied attempts to
# optimize the operation by removing as many items as possible in the
# first filters, leaving fewer items for later filters
timer = Timer()
cls = type(var)
for name in names:
try:
name_as_str = name
if name_as_str.__class__ != str:
name_as_str = '%r' % (name_as_str,)
if not used___dict__:
attr = getattr(var, name)
else:
attr = var.__dict__[name]
# filter functions?
if filter_function:
if inspect.isroutine(attr) or isinstance(attr, MethodWrapperType):
continue
except:
# if some error occurs getting it, let's put it to the user.
strIO = StringIO()
traceback.print_exc(file=strIO)
attr = strIO.getvalue()
finally:
timer.report_if_getting_attr_slow(cls, name_as_str)
d[name_as_str] = attr
return d, used___dict__
class DAPGrouperResolver:
def get_contents_debug_adapter_protocol(self, obj, fmt=None):
return obj.get_contents_debug_adapter_protocol()
_basic_immutable_types = (int, float, complex, str, bytes, type(None), bool, frozenset)
def _does_obj_repr_evaluate_to_obj(obj):
'''
If obj is an object where evaluating its representation leads to
the same object, return True, otherwise, return False.
'''
try:
if isinstance(obj, tuple):
for o in obj:
if not _does_obj_repr_evaluate_to_obj(o):
return False
return True
else:
return isinstance(obj, _basic_immutable_types)
except:
return False
#=======================================================================================================================
# DictResolver
#=======================================================================================================================
class DictResolver:
sort_keys = not IS_PY36_OR_GREATER
def resolve(self, dct, key):
if key in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):
return None
if '(' not in key:
# we have to treat that because the dict resolver is also used to directly resolve the global and local
# scopes (which already have the items directly)
try:
return dct[key]
except:
return getattr(dct, key)
# ok, we have to iterate over the items to find the one that matches the id, because that's the only way
# to actually find the reference from the string we have before.
expected_id = int(key.split('(')[-1][:-1])
for key, val in dct.items():
if id(key) == expected_id:
return val
raise UnableToResolveVariableException()
def key_to_str(self, key, fmt=None):
if fmt is not None:
if fmt.get('hex', False):
safe_repr = SafeRepr()
safe_repr.convert_to_hex = True
return safe_repr(key)
return '%r' % (key,)
def init_dict(self):
return {}
def get_contents_debug_adapter_protocol(self, dct, fmt=None):
'''
This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str))
'''
ret = []
i = 0
found_representations = set()
for key, val in dct.items():
i += 1
key_as_str = self.key_to_str(key, fmt)
if key_as_str not in found_representations:
found_representations.add(key_as_str)
else:
# If the key would be a duplicate, add the key id (otherwise
# VSCode won't show all keys correctly).
# See: https://github.com/microsoft/debugpy/issues/148
key_as_str = '%s (id: %s)' % (key_as_str, id(key))
found_representations.add(key_as_str)
if _does_obj_repr_evaluate_to_obj(key):
s = self.key_to_str(key) # do not format the key
eval_key_str = '[%s]' % (s,)
else:
eval_key_str = None
ret.append((key_as_str, val, eval_key_str))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
# in case the class extends built-in type and has some additional fields
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(dct, fmt)
if from_default_resolver:
ret = from_default_resolver + ret
if self.sort_keys:
ret = sorted(ret, key=lambda tup: sorted_attributes_key(tup[0]))
ret.append((GENERATED_LEN_ATTR_NAME, len(dct), partial(_apply_evaluate_name, evaluate_name='len(%s)')))
return ret
def get_dictionary(self, dct):
ret = self.init_dict()
i = 0
for key, val in dct.items():
i += 1
# we need to add the id because otherwise we cannot find the real object to get its contents later on.
key = '%s (%s)' % (self.key_to_str(key), id(key))
ret[key] = val
if i > MAX_ITEMS_TO_HANDLE:
ret[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.get_dictionary(dct)
ret.update(additional_fields)
ret[GENERATED_LEN_ATTR_NAME] = len(dct)
return ret
def _apply_evaluate_name(parent_name, evaluate_name):
return evaluate_name % (parent_name,)
#=======================================================================================================================
# TupleResolver
#=======================================================================================================================
class TupleResolver: # to enumerate tuples and lists
def resolve(self, var, attribute):
'''
@param var: that's the original attribute
@param attribute: that's the key passed in the dict (as a string)
'''
if attribute in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):
return None
try:
return var[int(attribute)]
except:
return getattr(var, attribute)
def get_contents_debug_adapter_protocol(self, lst, fmt=None):
'''
This method is to be used in the case where the variables are all saved by its id (and as
such don't need to have the `resolve` method called later on, so, keys don't need to
embed the reference in the key).
Note that the return should be ordered.
:return list(tuple(name:str, value:object, evaluateName:str))
'''
l = len(lst)
ret = []
format_str = '%0' + str(int(len(str(l - 1)))) + 'd'
if fmt is not None and fmt.get('hex', False):
format_str = '0x%0' + str(int(len(hex(l).lstrip('0x')))) + 'x'
for i, item in enumerate(lst):
ret.append((format_str % i, item, '[%s]' % i))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
# Needed in case the class extends the built-in type and has some additional fields.
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(lst, fmt=fmt)
if from_default_resolver:
ret = from_default_resolver + ret
ret.append((GENERATED_LEN_ATTR_NAME, len(lst), partial(_apply_evaluate_name, evaluate_name='len(%s)')))
return ret
def get_dictionary(self, var, fmt={}):
l = len(var)
d = {}
format_str = '%0' + str(int(len(str(l - 1)))) + 'd'
if fmt is not None and fmt.get('hex', False):
format_str = '0x%0' + str(int(len(hex(l).lstrip('0x')))) + 'x'
for i, item in enumerate(var):
d[format_str % i] = item
if i > MAX_ITEMS_TO_HANDLE:
d[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.get_dictionary(var)
d.update(additional_fields)
d[GENERATED_LEN_ATTR_NAME] = len(var)
return d
#=======================================================================================================================
# SetResolver
#=======================================================================================================================
class SetResolver:
'''
Resolves a set as dict id(object)->object
'''
def get_contents_debug_adapter_protocol(self, obj, fmt=None):
ret = []
for i, item in enumerate(obj):
ret.append((str(id(item)), item, None))
if i > MAX_ITEMS_TO_HANDLE:
ret.append((TOO_LARGE_ATTR, TOO_LARGE_MSG, None))
break
# Needed in case the class extends the built-in type and has some additional fields.
from_default_resolver = defaultResolver.get_contents_debug_adapter_protocol(obj, fmt=fmt)
if from_default_resolver:
ret = from_default_resolver + ret
ret.append((GENERATED_LEN_ATTR_NAME, len(obj), partial(_apply_evaluate_name, evaluate_name='len(%s)')))
return ret
def resolve(self, var, attribute):
if attribute in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):
return None
try:
attribute = int(attribute)
except:
return getattr(var, attribute)
for v in var:
if id(v) == attribute:
return v
raise UnableToResolveVariableException('Unable to resolve %s in %s' % (attribute, var))
def get_dictionary(self, var):
d = {}
for i, item in enumerate(var):
d[str(id(item))] = item
if i > MAX_ITEMS_TO_HANDLE:
d[TOO_LARGE_ATTR] = TOO_LARGE_MSG
break
# in case if the class extends built-in type and has some additional fields
additional_fields = defaultResolver.get_dictionary(var)
d.update(additional_fields)
d[GENERATED_LEN_ATTR_NAME] = len(var)
return d
def change_var_from_name(self, container, name, new_value):
# The name given in this case must be the id(item), so, we can actually
# iterate in the set and see which item matches the given id.
try:
# Check that the new value can actually be added to a set (i.e.: it's hashable/comparable).
set().add(new_value)
except:
return None
for item in container:
if str(id(item)) == name:
container.remove(item)
container.add(new_value)
return str(id(new_value))
return None
#=======================================================================================================================
# InstanceResolver
#=======================================================================================================================
class InstanceResolver:
def resolve(self, var, attribute):
field = var.__class__.getDeclaredField(attribute)
field.setAccessible(True)
return field.get(var)
def get_dictionary(self, obj):
ret = {}
declaredFields = obj.__class__.getDeclaredFields()
for i in range(len(declaredFields)):
name = declaredFields[i].getName()
try:
declaredFields[i].setAccessible(True)
ret[name] = declaredFields[i].get(obj)
except:
pydev_log.exception()
return ret
#=======================================================================================================================
# JyArrayResolver
#=======================================================================================================================
class JyArrayResolver:
'''
This resolves a regular Object[] array from java
'''
def resolve(self, var, attribute):
if attribute == GENERATED_LEN_ATTR_NAME:
return None
return var[int(attribute)]
def get_dictionary(self, obj):
ret = {}
for i in range(len(obj)):
ret[ i ] = obj[i]
ret[GENERATED_LEN_ATTR_NAME] = len(obj)
return ret
#=======================================================================================================================
# MultiValueDictResolver
#=======================================================================================================================
class MultiValueDictResolver(DictResolver):
def resolve(self, dct, key):
if key in (GENERATED_LEN_ATTR_NAME, TOO_LARGE_ATTR):
return None
# ok, we have to iterate over the items to find the one that matches the id, because that's the only way
# to actually find the reference from the string we have before.
expected_id = int(key.split('(')[-1][:-1])
for key in list(dct.keys()):
val = dct.getlist(key)
if id(key) == expected_id:
return val
raise UnableToResolveVariableException()
#=======================================================================================================================
# DjangoFormResolver
#=======================================================================================================================
class DjangoFormResolver(DefaultResolver):
def get_dictionary(self, var, names=None):
# Do not call self.errors because it is a property and has side effects.
names, used___dict__ = self.get_names(var)
has_errors_attr = False
if "errors" in names:
has_errors_attr = True
names.remove("errors")
d = defaultResolver.get_dictionary(var, names=names, used___dict__=used___dict__)
if has_errors_attr:
try:
errors_attr = getattr(var, "_errors")
except:
errors_attr = None
d["errors"] = errors_attr
return d
#=======================================================================================================================
# DequeResolver
#=======================================================================================================================
class DequeResolver(TupleResolver):
def get_dictionary(self, var):
d = TupleResolver.get_dictionary(self, var)
d['maxlen'] = getattr(var, 'maxlen', None)
return d
#=======================================================================================================================
# OrderedDictResolver
#=======================================================================================================================
class OrderedDictResolver(DictResolver):
sort_keys = False
def init_dict(self):
return OrderedDict()
#=======================================================================================================================
# FrameResolver
#=======================================================================================================================
class FrameResolver:
'''
This resolves a frame.
'''
def resolve(self, obj, attribute):
if attribute == '__internals__':
return defaultResolver.get_dictionary(obj)
if attribute == 'stack':
return self.get_frame_stack(obj)
if attribute == 'f_locals':
return obj.f_locals
return None
def get_dictionary(self, obj):
ret = {}
ret['__internals__'] = defaultResolver.get_dictionary(obj)
ret['stack'] = self.get_frame_stack(obj)
ret['f_locals'] = obj.f_locals
return ret
def get_frame_stack(self, frame):
ret = []
if frame is not None:
ret.append(self.get_frame_name(frame))
while frame.f_back:
frame = frame.f_back
ret.append(self.get_frame_name(frame))
return ret
def get_frame_name(self, frame):
if frame is None:
return 'None'
try:
name = basename(frame.f_code.co_filename)
return 'frame: %s [%s:%s] id:%s' % (frame.f_code.co_name, name, frame.f_lineno, id(frame))
except:
return 'frame object'
defaultResolver = DefaultResolver()
dictResolver = DictResolver()
tupleResolver = TupleResolver()
instanceResolver = InstanceResolver()
jyArrayResolver = JyArrayResolver()
setResolver = SetResolver()
multiValueDictResolver = MultiValueDictResolver()
djangoFormResolver = DjangoFormResolver()
dequeResolver = DequeResolver()
orderedDictResolver = OrderedDictResolver()
frameResolver = FrameResolver()
dapGrouperResolver = DAPGrouperResolver()
class InspectStub:
def isbuiltin(self, _args):
return False
def isroutine(self, object):
return False
try:
import inspect
except:
inspect = InspectStub()
def get_var_scope(attr_name, attr_value, evaluate_name, handle_return_values):
if attr_name.startswith("'"):
if attr_name.endswith("'"):
attr_name = attr_name[1:-1]
else:
i = attr_name.find("__' (")
if i >= 0:
# Handle attr_name such as: >>'__name__' (1732494379184)<<
attr_name = attr_name[1: i + 2]
if handle_return_values and attr_name == RETURN_VALUES_DICT:
return ''
elif attr_name == GENERATED_LEN_ATTR_NAME:
return ''
if attr_name.startswith('__') and attr_name.endswith('__'):
return DAPGrouper.SCOPE_SPECIAL_VARS
if attr_name.startswith('_') or attr_name.endswith('__'):
return DAPGrouper.SCOPE_PROTECTED_VARS
try:
if inspect.isroutine(attr_value) or isinstance(attr_value, MethodWrapperType):
return DAPGrouper.SCOPE_FUNCTION_VARS
elif inspect.isclass(attr_value):
return DAPGrouper.SCOPE_CLASS_VARS
except:
# It's possible that isinstance throws an exception when dealing with user-code.
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 0:
pydev_log.exception()
return ''
| 25,500 | Python | 34.222376 | 120 | 0.504549 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_console.py | '''An helper file for the pydev debugger (REPL) console
'''
import sys
import traceback
from _pydevd_bundle.pydevconsole_code import InteractiveConsole, _EvalAwaitInNewEventLoop
from _pydev_bundle import _pydev_completer
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from _pydev_bundle.pydev_imports import Exec
from _pydev_bundle.pydev_override import overrides
from _pydevd_bundle import pydevd_save_locals
from _pydevd_bundle.pydevd_io import IOBuf
from pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle.pydevd_xml import make_valid_xml_value
import inspect
from _pydevd_bundle.pydevd_save_locals import update_globals_and_locals
CONSOLE_OUTPUT = "output"
CONSOLE_ERROR = "error"
#=======================================================================================================================
# ConsoleMessage
#=======================================================================================================================
class ConsoleMessage:
"""Console Messages
"""
def __init__(self):
self.more = False
# List of tuple [('error', 'error_message'), ('message_list', 'output_message')]
self.console_messages = []
def add_console_message(self, message_type, message):
"""add messages in the console_messages list
"""
for m in message.split("\n"):
if m.strip():
self.console_messages.append((message_type, m))
def update_more(self, more):
"""more is set to true if further input is required from the user
else more is set to false
"""
self.more = more
def to_xml(self):
"""Create an XML for console message_list, error and more (true/false)
<xml>
<message_list>console message_list</message_list>
<error>console error</error>
<more>true/false</more>
</xml>
"""
makeValid = make_valid_xml_value
xml = '<xml><more>%s</more>' % (self.more)
for message_type, message in self.console_messages:
xml += '<%s message="%s"></%s>' % (message_type, makeValid(message), message_type)
xml += '</xml>'
return xml
#=======================================================================================================================
# _DebugConsoleStdIn
#=======================================================================================================================
class _DebugConsoleStdIn(BaseStdIn):
@overrides(BaseStdIn.readline)
def readline(self, *args, **kwargs):
sys.stderr.write('Warning: Reading from stdin is still not supported in this console.\n')
return '\n'
#=======================================================================================================================
# DebugConsole
#=======================================================================================================================
class DebugConsole(InteractiveConsole, BaseInterpreterInterface):
"""Wrapper around code.InteractiveConsole, in order to send
errors and outputs to the debug console
"""
@overrides(BaseInterpreterInterface.create_std_in)
def create_std_in(self, *args, **kwargs):
try:
if not self.__buffer_output:
return sys.stdin
except:
pass
return _DebugConsoleStdIn() # If buffered, raw_input is not supported in this console.
@overrides(InteractiveConsole.push)
def push(self, line, frame, buffer_output=True):
"""Change built-in stdout and stderr methods by the
new custom StdMessage.
execute the InteractiveConsole.push.
Change the stdout and stderr back be the original built-ins
:param buffer_output: if False won't redirect the output.
Return boolean (True if more input is required else False),
output_messages and input_messages
"""
self.__buffer_output = buffer_output
more = False
if buffer_output:
original_stdout = sys.stdout
original_stderr = sys.stderr
try:
try:
self.frame = frame
if buffer_output:
out = sys.stdout = IOBuf()
err = sys.stderr = IOBuf()
more = self.add_exec(line)
except Exception:
exc = get_exception_traceback_str()
if buffer_output:
err.buflist.append("Internal Error: %s" % (exc,))
else:
sys.stderr.write("Internal Error: %s\n" % (exc,))
finally:
# Remove frame references.
self.frame = None
frame = None
if buffer_output:
sys.stdout = original_stdout
sys.stderr = original_stderr
if buffer_output:
return more, out.buflist, err.buflist
else:
return more, [], []
@overrides(BaseInterpreterInterface.do_add_exec)
def do_add_exec(self, line):
return InteractiveConsole.push(self, line)
@overrides(InteractiveConsole.runcode)
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
updated_globals = self.get_namespace()
initial_globals = updated_globals.copy()
updated_locals = None
is_async = False
if hasattr(inspect, 'CO_COROUTINE'):
is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
if is_async:
t = _EvalAwaitInNewEventLoop(code, updated_globals, updated_locals)
t.start()
t.join()
update_globals_and_locals(updated_globals, initial_globals, self.frame)
if t.exc:
raise t.exc[1].with_traceback(t.exc[2])
else:
try:
exec(code, updated_globals, updated_locals)
finally:
update_globals_and_locals(updated_globals, initial_globals, self.frame)
except SystemExit:
raise
except:
# In case sys.excepthook called, use original excepthook #PyDev-877: Debug console freezes with Python 3.5+
# (showtraceback does it on python 3.5 onwards)
sys.excepthook = sys.__excepthook__
try:
self.showtraceback()
finally:
sys.__excepthook__ = sys.excepthook
def get_namespace(self):
dbg_namespace = {}
dbg_namespace.update(self.frame.f_globals)
dbg_namespace.update(self.frame.f_locals) # locals later because it has precedence over the actual globals
return dbg_namespace
#=======================================================================================================================
# InteractiveConsoleCache
#=======================================================================================================================
class InteractiveConsoleCache:
thread_id = None
frame_id = None
interactive_console_instance = None
# Note: On Jython 2.1 we can't use classmethod or staticmethod, so, just make the functions below free-functions.
def get_interactive_console(thread_id, frame_id, frame, console_message):
"""returns the global interactive console.
interactive console should have been initialized by this time
:rtype: DebugConsole
"""
if InteractiveConsoleCache.thread_id == thread_id and InteractiveConsoleCache.frame_id == frame_id:
return InteractiveConsoleCache.interactive_console_instance
InteractiveConsoleCache.interactive_console_instance = DebugConsole()
InteractiveConsoleCache.thread_id = thread_id
InteractiveConsoleCache.frame_id = frame_id
console_stacktrace = traceback.extract_stack(frame, limit=1)
if console_stacktrace:
current_context = console_stacktrace[0] # top entry from stacktrace
context_message = 'File "%s", line %s, in %s' % (current_context[0], current_context[1], current_context[2])
console_message.add_console_message(CONSOLE_OUTPUT, "[Current context]: %s" % (context_message,))
return InteractiveConsoleCache.interactive_console_instance
def clear_interactive_console():
InteractiveConsoleCache.thread_id = None
InteractiveConsoleCache.frame_id = None
InteractiveConsoleCache.interactive_console_instance = None
def execute_console_command(frame, thread_id, frame_id, line, buffer_output=True):
"""fetch an interactive console instance from the cache and
push the received command to the console.
create and return an instance of console_message
"""
console_message = ConsoleMessage()
interpreter = get_interactive_console(thread_id, frame_id, frame, console_message)
more, output_messages, error_messages = interpreter.push(line, frame, buffer_output)
console_message.update_more(more)
for message in output_messages:
console_message.add_console_message(CONSOLE_OUTPUT, message)
for message in error_messages:
console_message.add_console_message(CONSOLE_ERROR, message)
return console_message
def get_description(frame, thread_id, frame_id, expression):
console_message = ConsoleMessage()
interpreter = get_interactive_console(thread_id, frame_id, frame, console_message)
try:
interpreter.frame = frame
return interpreter.getDescription(expression)
finally:
interpreter.frame = None
def get_completions(frame, act_tok):
""" fetch all completions, create xml for the same
return the completions xml
"""
return _pydev_completer.generate_completions_as_xml(frame, act_tok)
| 10,179 | Python | 36.564576 | 120 | 0.580411 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_constants.py | '''
This module holds the constants used for specifying the states of the debugger.
'''
from __future__ import nested_scopes
import platform
import weakref
import struct
import warnings
import functools
from contextlib import contextmanager
STATE_RUN = 1
STATE_SUSPEND = 2
PYTHON_SUSPEND = 1
DJANGO_SUSPEND = 2
JINJA2_SUSPEND = 3
int_types = (int,)
# types does not include a MethodWrapperType
try:
MethodWrapperType = type([].__str__)
except:
MethodWrapperType = None
import sys # Note: the sys import must be here anyways (others depend on it)
# Preload codecs to avoid imports to them later on which can potentially halt the debugger.
import codecs as _codecs
for _codec in ["ascii", "utf8", "utf-8", "latin1", "latin-1", "idna"]:
_codecs.lookup(_codec)
class DebugInfoHolder:
# we have to put it here because it can be set through the command line (so, the
# already imported references would not have it).
# General information
DEBUG_TRACE_LEVEL = 0 # 0 = critical, 1 = info, 2 = debug, 3 = verbose
# Flags to debug specific points of the code.
DEBUG_RECORD_SOCKET_READS = False
DEBUG_TRACE_BREAKPOINTS = -1
PYDEVD_DEBUG_FILE = None
# Any filename that starts with these strings is not traced nor shown to the user.
# In Python 3.7 "<frozen ..." appears multiple times during import and should be ignored for the user.
# In PyPy "<builtin> ..." can appear and should be ignored for the user.
# <attrs is used internally by attrs
# <__array_function__ is used by numpy
IGNORE_BASENAMES_STARTING_WITH = ('<frozen ', '<builtin', '<attrs', '<__array_function__')
# Note: <string> has special heuristics to know whether it should be traced or not (it's part of
# user code when it's the <string> used in python -c and part of the library otherwise).
# Any filename that starts with these strings is considered user (project) code. Note
# that files for which we have a source mapping are also considered as a part of the project.
USER_CODE_BASENAMES_STARTING_WITH = ('<ipython',)
# Any filename that starts with these strings is considered library code (note: checked after USER_CODE_BASENAMES_STARTING_WITH).
LIBRARY_CODE_BASENAMES_STARTING_WITH = ('<',)
IS_CPYTHON = platform.python_implementation() == 'CPython'
# Hold a reference to the original _getframe (because psyco will change that as soon as it's imported)
IS_IRONPYTHON = sys.platform == 'cli'
try:
get_frame = sys._getframe
if IS_IRONPYTHON:
def get_frame():
try:
return sys._getframe()
except ValueError:
pass
except AttributeError:
def get_frame():
raise AssertionError('sys._getframe not available (possible causes: enable -X:Frames on IronPython?)')
# Used to determine the maximum size of each variable passed to eclipse -- having a big value here may make
# the communication slower -- as the variables are being gathered lazily in the latest version of eclipse,
# this value was raised from 200 to 1000.
MAXIMUM_VARIABLE_REPRESENTATION_SIZE = 1000
# Prefix for saving functions return values in locals
RETURN_VALUES_DICT = '__pydevd_ret_val_dict'
GENERATED_LEN_ATTR_NAME = 'len()'
import os
from _pydevd_bundle import pydevd_vm_type
# Constant detects when running on Jython/windows properly later on.
IS_WINDOWS = sys.platform == 'win32'
IS_LINUX = sys.platform in ('linux', 'linux2')
IS_MAC = sys.platform == 'darwin'
IS_64BIT_PROCESS = sys.maxsize > (2 ** 32)
IS_JYTHON = pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON
IS_PYPY = platform.python_implementation() == 'PyPy'
if IS_JYTHON:
import java.lang.System # @UnresolvedImport
IS_WINDOWS = java.lang.System.getProperty("os.name").lower().startswith("windows")
USE_CUSTOM_SYS_CURRENT_FRAMES = not hasattr(sys, '_current_frames') or IS_PYPY
USE_CUSTOM_SYS_CURRENT_FRAMES_MAP = USE_CUSTOM_SYS_CURRENT_FRAMES and (IS_PYPY or IS_IRONPYTHON)
if USE_CUSTOM_SYS_CURRENT_FRAMES:
# Some versions of Jython don't have it (but we can provide a replacement)
if IS_JYTHON:
from java.lang import NoSuchFieldException
from org.python.core import ThreadStateMapping
try:
cachedThreadState = ThreadStateMapping.getDeclaredField('globalThreadStates') # Dev version
except NoSuchFieldException:
cachedThreadState = ThreadStateMapping.getDeclaredField('cachedThreadState') # Release Jython 2.7.0
cachedThreadState.accessible = True
thread_states = cachedThreadState.get(ThreadStateMapping)
def _current_frames():
as_array = thread_states.entrySet().toArray()
ret = {}
for thread_to_state in as_array:
thread = thread_to_state.getKey()
if thread is None:
continue
thread_state = thread_to_state.getValue()
if thread_state is None:
continue
frame = thread_state.frame
if frame is None:
continue
ret[thread.getId()] = frame
return ret
elif USE_CUSTOM_SYS_CURRENT_FRAMES_MAP:
constructed_tid_to_last_frame = {}
# IronPython doesn't have it. Let's use our workaround...
def _current_frames():
return constructed_tid_to_last_frame
else:
raise RuntimeError('Unable to proceed (sys._current_frames not available in this Python implementation).')
else:
_current_frames = sys._current_frames
IS_PYTHON_STACKLESS = "stackless" in sys.version.lower()
CYTHON_SUPPORTED = False
python_implementation = platform.python_implementation()
if python_implementation == 'CPython':
# Only available for CPython!
CYTHON_SUPPORTED = True
#=======================================================================================================================
# Python 3?
#=======================================================================================================================
IS_PY36_OR_GREATER = sys.version_info >= (3, 6)
IS_PY37_OR_GREATER = sys.version_info >= (3, 7)
IS_PY38_OR_GREATER = sys.version_info >= (3, 8)
IS_PY39_OR_GREATER = sys.version_info >= (3, 9)
IS_PY310_OR_GREATER = sys.version_info >= (3, 10)
IS_PY311_OR_GREATER = sys.version_info >= (3, 11)
def version_str(v):
return '.'.join((str(x) for x in v[:3])) + ''.join((str(x) for x in v[3:]))
PY_VERSION_STR = version_str(sys.version_info)
try:
PY_IMPL_VERSION_STR = version_str(sys.implementation.version)
except AttributeError:
PY_IMPL_VERSION_STR = ''
try:
PY_IMPL_NAME = sys.implementation.name
except AttributeError:
PY_IMPL_NAME = ''
ENV_TRUE_LOWER_VALUES = ('yes', 'true', '1')
ENV_FALSE_LOWER_VALUES = ('no', 'false', '0')
def is_true_in_env(env_key):
if isinstance(env_key, tuple):
# If a tuple, return True if any of those ends up being true.
for v in env_key:
if is_true_in_env(v):
return True
return False
else:
return os.getenv(env_key, '').lower() in ENV_TRUE_LOWER_VALUES
def as_float_in_env(env_key, default):
value = os.getenv(env_key)
if value is None:
return default
try:
return float(value)
except Exception:
raise RuntimeError(
'Error: expected the env variable: %s to be set to a float value. Found: %s' % (
env_key, value))
def as_int_in_env(env_key, default):
value = os.getenv(env_key)
if value is None:
return default
try:
return int(value)
except Exception:
raise RuntimeError(
'Error: expected the env variable: %s to be set to a int value. Found: %s' % (
env_key, value))
# If true in env, use gevent mode.
SUPPORT_GEVENT = is_true_in_env('GEVENT_SUPPORT')
# Opt-in support to show gevent paused greenlets. False by default because if too many greenlets are
# paused the UI can slow-down (i.e.: if 1000 greenlets are paused, each one would be shown separate
# as a different thread, but if the UI isn't optimized for that the experience is lacking...).
GEVENT_SHOW_PAUSED_GREENLETS = is_true_in_env('GEVENT_SHOW_PAUSED_GREENLETS')
DISABLE_FILE_VALIDATION = is_true_in_env('PYDEVD_DISABLE_FILE_VALIDATION')
GEVENT_SUPPORT_NOT_SET_MSG = os.getenv(
'GEVENT_SUPPORT_NOT_SET_MSG',
'It seems that the gevent monkey-patching is being used.\n'
'Please set an environment variable with:\n'
'GEVENT_SUPPORT=True\n'
'to enable gevent support in the debugger.'
)
USE_LIB_COPY = SUPPORT_GEVENT
INTERACTIVE_MODE_AVAILABLE = sys.platform in ('darwin', 'win32') or os.getenv('DISPLAY') is not None
# If true in env, forces cython to be used (raises error if not available).
# If false in env, disables it.
# If not specified, uses default heuristic to determine if it should be loaded.
USE_CYTHON_FLAG = os.getenv('PYDEVD_USE_CYTHON')
if USE_CYTHON_FLAG is not None:
USE_CYTHON_FLAG = USE_CYTHON_FLAG.lower()
if USE_CYTHON_FLAG not in ENV_TRUE_LOWER_VALUES and USE_CYTHON_FLAG not in ENV_FALSE_LOWER_VALUES:
raise RuntimeError('Unexpected value for PYDEVD_USE_CYTHON: %s (enable with one of: %s, disable with one of: %s)' % (
USE_CYTHON_FLAG, ENV_TRUE_LOWER_VALUES, ENV_FALSE_LOWER_VALUES))
else:
if not CYTHON_SUPPORTED:
USE_CYTHON_FLAG = 'no'
# If true in env, forces frame eval to be used (raises error if not available).
# If false in env, disables it.
# If not specified, uses default heuristic to determine if it should be loaded.
PYDEVD_USE_FRAME_EVAL = os.getenv('PYDEVD_USE_FRAME_EVAL', '').lower()
PYDEVD_IPYTHON_COMPATIBLE_DEBUGGING = is_true_in_env('PYDEVD_IPYTHON_COMPATIBLE_DEBUGGING')
# If specified in PYDEVD_IPYTHON_CONTEXT it must be a string with the basename
# and then the name of 2 methods in which the evaluate is done.
PYDEVD_IPYTHON_CONTEXT = ('interactiveshell.py', 'run_code', 'run_ast_nodes')
_ipython_ctx = os.getenv('PYDEVD_IPYTHON_CONTEXT')
if _ipython_ctx:
PYDEVD_IPYTHON_CONTEXT = tuple(x.strip() for x in _ipython_ctx.split(','))
assert len(PYDEVD_IPYTHON_CONTEXT) == 3, 'Invalid PYDEVD_IPYTHON_CONTEXT: %s' % (_ipython_ctx,)
# Use to disable loading the lib to set tracing to all threads (default is using heuristics based on where we're running).
LOAD_NATIVE_LIB_FLAG = os.getenv('PYDEVD_LOAD_NATIVE_LIB', '').lower()
LOG_TIME = os.getenv('PYDEVD_LOG_TIME', 'true').lower() in ENV_TRUE_LOWER_VALUES
SHOW_COMPILE_CYTHON_COMMAND_LINE = is_true_in_env('PYDEVD_SHOW_COMPILE_CYTHON_COMMAND_LINE')
LOAD_VALUES_ASYNC = is_true_in_env('PYDEVD_LOAD_VALUES_ASYNC')
DEFAULT_VALUE = "__pydevd_value_async"
ASYNC_EVAL_TIMEOUT_SEC = 60
NEXT_VALUE_SEPARATOR = "__pydev_val__"
BUILTINS_MODULE_NAME = 'builtins'
SHOW_DEBUG_INFO_ENV = is_true_in_env(('PYCHARM_DEBUG', 'PYDEV_DEBUG', 'PYDEVD_DEBUG'))
# Pandas customization.
PANDAS_MAX_ROWS = as_int_in_env('PYDEVD_PANDAS_MAX_ROWS', 60)
PANDAS_MAX_COLS = as_int_in_env('PYDEVD_PANDAS_MAX_COLS', 10)
PANDAS_MAX_COLWIDTH = as_int_in_env('PYDEVD_PANDAS_MAX_COLWIDTH', 50)
# If getting an attribute or computing some value is too slow, let the user know if the given timeout elapses.
PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT = as_float_in_env('PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT', 0.15)
# This timeout is used to track the time to send a message saying that the evaluation
# is taking too long and possible mitigations.
PYDEVD_WARN_EVALUATION_TIMEOUT = as_float_in_env('PYDEVD_WARN_EVALUATION_TIMEOUT', 3.)
# If True in env shows a thread dump when the evaluation times out.
PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT = is_true_in_env('PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT')
# This timeout is used only when the mode that all threads are stopped/resumed at once is used
# (i.e.: multi_threads_single_notification)
#
# In this mode, if some evaluation doesn't finish until this timeout, we notify the user
# and then resume all threads until the evaluation finishes.
#
# A negative value will disable the timeout and a value of 0 will automatically run all threads
# (without any notification) when the evaluation is started and pause all threads when the
# evaluation is finished. A positive value will run run all threads after the timeout
# elapses.
PYDEVD_UNBLOCK_THREADS_TIMEOUT = as_float_in_env('PYDEVD_UNBLOCK_THREADS_TIMEOUT', -1.)
# Timeout to interrupt a thread (so, if some evaluation doesn't finish until this
# timeout, the thread doing the evaluation is interrupted).
# A value <= 0 means this is disabled.
# See: _pydevd_bundle.pydevd_timeout.create_interrupt_this_thread_callback for details
# on how the thread interruption works (there are some caveats related to it).
PYDEVD_INTERRUPT_THREAD_TIMEOUT = as_float_in_env('PYDEVD_INTERRUPT_THREAD_TIMEOUT', -1)
# If PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS is set to False, the patching to hide pydevd threads won't be applied.
PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS = os.getenv('PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS', 'true').lower() in ENV_TRUE_LOWER_VALUES
EXCEPTION_TYPE_UNHANDLED = 'UNHANDLED'
EXCEPTION_TYPE_USER_UNHANDLED = 'USER_UNHANDLED'
EXCEPTION_TYPE_HANDLED = 'HANDLED'
if SHOW_DEBUG_INFO_ENV:
# show debug info before the debugger start
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = True
DebugInfoHolder.DEBUG_TRACE_LEVEL = 3
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = 1
DebugInfoHolder.PYDEVD_DEBUG_FILE = os.getenv('PYDEVD_DEBUG_FILE')
def protect_libraries_from_patching():
"""
In this function we delete some modules from `sys.modules` dictionary and import them again inside
`_pydev_saved_modules` in order to save their original copies there. After that we can use these
saved modules within the debugger to protect them from patching by external libraries (e.g. gevent).
"""
patched = ['threading', 'thread', '_thread', 'time', 'socket', 'queue', 'select',
'xmlrpclib', 'SimpleXMLRPCServer', 'BaseHTTPServer', 'SocketServer',
'xmlrpc.client', 'xmlrpc.server', 'http.server', 'socketserver']
for name in patched:
try:
__import__(name)
except:
pass
patched_modules = dict([(k, v) for k, v in sys.modules.items()
if k in patched])
for name in patched_modules:
del sys.modules[name]
# import for side effects
import _pydev_bundle._pydev_saved_modules
for name in patched_modules:
sys.modules[name] = patched_modules[name]
if USE_LIB_COPY:
protect_libraries_from_patching()
from _pydev_bundle._pydev_saved_modules import thread, threading
_fork_safe_locks = []
if IS_JYTHON:
def ForkSafeLock(rlock=False):
if rlock:
return threading.RLock()
else:
return threading.Lock()
else:
class ForkSafeLock(object):
'''
A lock which is fork-safe (when a fork is done, `pydevd_constants.after_fork()`
should be called to reset the locks in the new process to avoid deadlocks
from a lock which was locked during the fork).
Note:
Unlike `threading.Lock` this class is not completely atomic, so, doing:
lock = ForkSafeLock()
with lock:
...
is different than using `threading.Lock` directly because the tracing may
find an additional function call on `__enter__` and on `__exit__`, so, it's
not recommended to use this in all places, only where the forking may be important
(so, for instance, the locks on PyDB should not be changed to this lock because
of that -- and those should all be collected in the new process because PyDB itself
should be completely cleared anyways).
It's possible to overcome this limitation by using `ForkSafeLock.acquire` and
`ForkSafeLock.release` instead of the context manager (as acquire/release are
bound to the original implementation, whereas __enter__/__exit__ is not due to Python
limitations).
'''
def __init__(self, rlock=False):
self._rlock = rlock
self._init()
_fork_safe_locks.append(weakref.ref(self))
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
return self._lock.__exit__(exc_type, exc_val, exc_tb)
def _init(self):
if self._rlock:
self._lock = threading.RLock()
else:
self._lock = thread.allocate_lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
_fork_safe_locks.append(weakref.ref(self))
def after_fork():
'''
Must be called after a fork operation (will reset the ForkSafeLock).
'''
global _fork_safe_locks
locks = _fork_safe_locks[:]
_fork_safe_locks = []
for lock in locks:
lock = lock()
if lock is not None:
lock._init()
_thread_id_lock = ForkSafeLock()
thread_get_ident = thread.get_ident
def as_str(s):
assert isinstance(s, str)
return s
@contextmanager
def filter_all_warnings():
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
yield
def silence_warnings_decorator(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
with filter_all_warnings():
return func(*args, **kwargs)
return new_func
def sorted_dict_repr(d):
s = sorted(d.items(), key=lambda x:str(x[0]))
return '{' + ', '.join(('%r: %r' % x) for x in s) + '}'
def iter_chars(b):
# In Python 2, we can iterate bytes or str with individual characters, but Python 3 onwards
# changed that behavior so that when iterating bytes we actually get ints!
if isinstance(b, bytes):
# i.e.: do something as struct.unpack('3c', b)
return iter(struct.unpack(str(len(b)) + 'c', b))
return iter(b)
if IS_JYTHON:
def NO_FTRACE(frame, event, arg):
return None
else:
_curr_trace = sys.gettrace()
# Set a temporary trace which does nothing for us to test (otherwise setting frame.f_trace has no
# effect).
def _temp_trace(frame, event, arg):
return None
sys.settrace(_temp_trace)
def _check_ftrace_set_none():
'''
Will throw an error when executing a line event
'''
sys._getframe().f_trace = None
_line_event = 1
_line_event = 2
try:
_check_ftrace_set_none()
def NO_FTRACE(frame, event, arg):
frame.f_trace = None
return None
except TypeError:
def NO_FTRACE(frame, event, arg):
# In Python <= 2.6 and <= 3.4, if we're tracing a method, frame.f_trace may not be set
# to None, it must always be set to a tracing function.
# See: tests_python.test_tracing_gotchas.test_tracing_gotchas
#
# Note: Python 2.7 sometimes works and sometimes it doesn't depending on the minor
# version because of https://bugs.python.org/issue20041 (although bug reports didn't
# include the minor version, so, mark for any Python 2.7 as I'm not completely sure
# the fix in later 2.7 versions is the same one we're dealing with).
return None
sys.settrace(_curr_trace)
#=======================================================================================================================
# get_pid
#=======================================================================================================================
def get_pid():
try:
return os.getpid()
except AttributeError:
try:
# Jython does not have it!
import java.lang.management.ManagementFactory # @UnresolvedImport -- just for jython
pid = java.lang.management.ManagementFactory.getRuntimeMXBean().getName()
return pid.replace('@', '_')
except:
# ok, no pid available (will be unable to debug multiple processes)
return '000001'
def clear_cached_thread_id(thread):
with _thread_id_lock:
try:
if thread.__pydevd_id__ != 'console_main':
# The console_main is a special thread id used in the console and its id should never be reset
# (otherwise we may no longer be able to get its variables -- see: https://www.brainwy.com/tracker/PyDev/776).
del thread.__pydevd_id__
except AttributeError:
pass
# Don't let threads be collected (so that id(thread) is guaranteed to be unique).
_thread_id_to_thread_found = {}
def _get_or_compute_thread_id_with_lock(thread, is_current_thread):
with _thread_id_lock:
# We do a new check with the lock in place just to be sure that nothing changed
tid = getattr(thread, '__pydevd_id__', None)
if tid is not None:
return tid
_thread_id_to_thread_found[id(thread)] = thread
# Note: don't use thread.ident because a new thread may have the
# same id from an old thread.
pid = get_pid()
tid = 'pid_%s_id_%s' % (pid, id(thread))
thread.__pydevd_id__ = tid
return tid
def get_current_thread_id(thread):
'''
Note: the difference from get_current_thread_id to get_thread_id is that
for the current thread we can get the thread id while the thread.ident
is still not set in the Thread instance.
'''
try:
# Fast path without getting lock.
tid = thread.__pydevd_id__
if tid is None:
# Fix for https://www.brainwy.com/tracker/PyDev/645
# if __pydevd_id__ is None, recalculate it... also, use an heuristic
# that gives us always the same id for the thread (using thread.ident or id(thread)).
raise AttributeError()
except AttributeError:
tid = _get_or_compute_thread_id_with_lock(thread, is_current_thread=True)
return tid
def get_thread_id(thread):
try:
# Fast path without getting lock.
tid = thread.__pydevd_id__
if tid is None:
# Fix for https://www.brainwy.com/tracker/PyDev/645
# if __pydevd_id__ is None, recalculate it... also, use an heuristic
# that gives us always the same id for the thread (using thread.ident or id(thread)).
raise AttributeError()
except AttributeError:
tid = _get_or_compute_thread_id_with_lock(thread, is_current_thread=False)
return tid
def set_thread_id(thread, thread_id):
with _thread_id_lock:
thread.__pydevd_id__ = thread_id
#=======================================================================================================================
# Null
#=======================================================================================================================
class Null:
"""
Gotten from: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
"""
def __init__(self, *args, **kwargs):
return None
def __call__(self, *args, **kwargs):
return self
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
return self
def __getattr__(self, mname):
if len(mname) > 4 and mname[:2] == '__' and mname[-2:] == '__':
# Don't pretend to implement special method names.
raise AttributeError(mname)
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
def __repr__(self):
return "<Null>"
def __str__(self):
return "Null"
def __len__(self):
return 0
def __getitem__(self):
return self
def __setitem__(self, *args, **kwargs):
pass
def write(self, *args, **kwargs):
pass
def __nonzero__(self):
return 0
def __iter__(self):
return iter(())
# Default instance
NULL = Null()
class KeyifyList(object):
def __init__(self, inner, key):
self.inner = inner
self.key = key
def __len__(self):
return len(self.inner)
def __getitem__(self, k):
return self.key(self.inner[k])
def call_only_once(func):
'''
To be used as a decorator
@call_only_once
def func():
print 'Calling func only this time'
Actually, in PyDev it must be called as:
func = call_only_once(func) to support older versions of Python.
'''
def new_func(*args, **kwargs):
if not new_func._called:
new_func._called = True
return func(*args, **kwargs)
new_func._called = False
return new_func
# Protocol where each line is a new message (text is quoted to prevent new lines).
# payload is xml
QUOTED_LINE_PROTOCOL = 'quoted-line'
ARGUMENT_QUOTED_LINE_PROTOCOL = 'protocol-quoted-line'
# Uses http protocol to provide a new message.
# i.e.: Content-Length:xxx\r\n\r\npayload
# payload is xml
HTTP_PROTOCOL = 'http'
ARGUMENT_HTTP_PROTOCOL = 'protocol-http'
# Message is sent without any header.
# payload is json
JSON_PROTOCOL = 'json'
ARGUMENT_JSON_PROTOCOL = 'json-dap'
# Same header as the HTTP_PROTOCOL
# payload is json
HTTP_JSON_PROTOCOL = 'http_json'
ARGUMENT_HTTP_JSON_PROTOCOL = 'json-dap-http'
ARGUMENT_PPID = 'ppid'
class _GlobalSettings:
protocol = QUOTED_LINE_PROTOCOL
def set_protocol(protocol):
expected = (HTTP_PROTOCOL, QUOTED_LINE_PROTOCOL, JSON_PROTOCOL, HTTP_JSON_PROTOCOL)
assert protocol in expected, 'Protocol (%s) should be one of: %s' % (
protocol, expected)
_GlobalSettings.protocol = protocol
def get_protocol():
return _GlobalSettings.protocol
def is_json_protocol():
return _GlobalSettings.protocol in (JSON_PROTOCOL, HTTP_JSON_PROTOCOL)
class GlobalDebuggerHolder:
'''
Holder for the global debugger.
'''
global_dbg = None # Note: don't rename (the name is used in our attach to process)
def get_global_debugger():
return GlobalDebuggerHolder.global_dbg
GetGlobalDebugger = get_global_debugger # Backward-compatibility
def set_global_debugger(dbg):
GlobalDebuggerHolder.global_dbg = dbg
if __name__ == '__main__':
if Null():
sys.stdout.write('here\n')
| 26,450 | Python | 32.355612 | 145 | 0.637883 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_extension_api.py | import abc
# borrowed from from six
def _with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
# =======================================================================================================================
# AbstractResolver
# =======================================================================================================================
class _AbstractResolver(_with_metaclass(abc.ABCMeta)):
"""
This class exists only for documentation purposes to explain how to create a resolver.
Some examples on how to resolve things:
- list: get_dictionary could return a dict with index->item and use the index to resolve it later
- set: get_dictionary could return a dict with id(object)->object and reiterate in that array to resolve it later
- arbitrary instance: get_dictionary could return dict with attr_name->attr and use getattr to resolve it later
"""
@abc.abstractmethod
def resolve(self, var, attribute):
"""
In this method, we'll resolve some child item given the string representation of the item in the key
representing the previously asked dictionary.
@param var: this is the actual variable to be resolved.
@param attribute: this is the string representation of a key previously returned in get_dictionary.
"""
raise NotImplementedError
@abc.abstractmethod
def get_dictionary(self, var):
"""
@param var: this is the variable that should have its children gotten.
@return: a dictionary where each pair key, value should be shown to the user as children items
in the variables view for the given var.
"""
raise NotImplementedError
class _AbstractProvider(_with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def can_provide(self, type_object, type_name):
raise NotImplementedError
# =======================================================================================================================
# API CLASSES:
# =======================================================================================================================
class TypeResolveProvider(_AbstractResolver, _AbstractProvider):
"""
Implement this in an extension to provide a custom resolver, see _AbstractResolver
"""
class StrPresentationProvider(_AbstractProvider):
"""
Implement this in an extension to provide a str presentation for a type
"""
@abc.abstractmethod
def get_str(self, val):
raise NotImplementedError
class DebuggerEventHandler(_with_metaclass(abc.ABCMeta)):
"""
Implement this to receive lifecycle events from the debugger
"""
def on_debugger_modules_loaded(self, **kwargs):
"""
This method invoked after all debugger modules are loaded. Useful for importing and/or patching debugger
modules at a safe time
:param kwargs: This is intended to be flexible dict passed from the debugger.
Currently passes the debugger version
"""
| 3,288 | Python | 36.375 | 121 | 0.575122 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_suspended_frames.py | from contextlib import contextmanager
import sys
from _pydevd_bundle.pydevd_constants import get_frame, RETURN_VALUES_DICT, \
ForkSafeLock, GENERATED_LEN_ATTR_NAME, silence_warnings_decorator
from _pydevd_bundle.pydevd_xml import get_variable_details, get_type
from _pydev_bundle.pydev_override import overrides
from _pydevd_bundle.pydevd_resolver import sorted_attributes_key, TOO_LARGE_ATTR, get_var_scope
from _pydevd_bundle.pydevd_safe_repr import SafeRepr
from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_vars
from _pydev_bundle.pydev_imports import Exec
from _pydevd_bundle.pydevd_frame_utils import FramesList
from _pydevd_bundle.pydevd_utils import ScopeRequest, DAPGrouper, Timer
class _AbstractVariable(object):
# Default attributes in class, set in instance.
name = None
value = None
evaluate_name = None
def __init__(self, py_db):
assert py_db is not None
self.py_db = py_db
def get_name(self):
return self.name
def get_value(self):
return self.value
def get_variable_reference(self):
return id(self.value)
def get_var_data(self, fmt=None, **safe_repr_custom_attrs):
'''
:param dict fmt:
Format expected by the DAP (keys: 'hex': bool, 'rawString': bool)
'''
timer = Timer()
safe_repr = SafeRepr()
if fmt is not None:
safe_repr.convert_to_hex = fmt.get('hex', False)
safe_repr.raw_value = fmt.get('rawString', False)
for key, val in safe_repr_custom_attrs.items():
setattr(safe_repr, key, val)
type_name, _type_qualifier, _is_exception_on_eval, resolver, value = get_variable_details(
self.value, to_string=safe_repr)
is_raw_string = type_name in ('str', 'bytes', 'bytearray')
attributes = []
if is_raw_string:
attributes.append('rawString')
name = self.name
if self._is_return_value:
attributes.append('readOnly')
name = '(return) %s' % (name,)
elif name in (TOO_LARGE_ATTR, GENERATED_LEN_ATTR_NAME):
attributes.append('readOnly')
try:
if self.value.__class__ == DAPGrouper:
type_name = ''
except:
pass # Ignore errors accessing __class__.
var_data = {
'name': name,
'value': value,
'type': type_name,
}
if self.evaluate_name is not None:
var_data['evaluateName'] = self.evaluate_name
if resolver is not None: # I.e.: it's a container
var_data['variablesReference'] = self.get_variable_reference()
else:
var_data['variablesReference'] = 0 # It's mandatory (although if == 0 it doesn't have children).
if len(attributes) > 0:
var_data['presentationHint'] = {'attributes': attributes}
timer.report_if_compute_repr_attr_slow('', name, type_name)
return var_data
def get_children_variables(self, fmt=None, scope=None):
raise NotImplementedError()
def get_child_variable_named(self, name, fmt=None, scope=None):
for child_var in self.get_children_variables(fmt=fmt, scope=scope):
if child_var.get_name() == name:
return child_var
return None
def _group_entries(self, lst, handle_return_values):
scope_to_grouper = {}
group_entries = []
if isinstance(self.value, DAPGrouper):
new_lst = lst
else:
new_lst = []
get_presentation = self.py_db.variable_presentation.get_presentation
# Now that we have the contents, group items.
for attr_name, attr_value, evaluate_name in lst:
scope = get_var_scope(attr_name, attr_value, evaluate_name, handle_return_values)
entry = (attr_name, attr_value, evaluate_name)
if scope:
presentation = get_presentation(scope)
if presentation == 'hide':
continue
elif presentation == 'inline':
new_lst.append(entry)
else: # group
if scope not in scope_to_grouper:
grouper = DAPGrouper(scope)
scope_to_grouper[scope] = grouper
else:
grouper = scope_to_grouper[scope]
grouper.contents_debug_adapter_protocol.append(entry)
else:
new_lst.append(entry)
for scope in DAPGrouper.SCOPES_SORTED:
grouper = scope_to_grouper.get(scope)
if grouper is not None:
group_entries.append((scope, grouper, None))
return new_lst, group_entries
class _ObjectVariable(_AbstractVariable):
def __init__(self, py_db, name, value, register_variable, is_return_value=False, evaluate_name=None, frame=None):
_AbstractVariable.__init__(self, py_db)
self.frame = frame
self.name = name
self.value = value
self._register_variable = register_variable
self._register_variable(self)
self._is_return_value = is_return_value
self.evaluate_name = evaluate_name
@silence_warnings_decorator
@overrides(_AbstractVariable.get_children_variables)
def get_children_variables(self, fmt=None, scope=None):
_type, _type_name, resolver = get_type(self.value)
children_variables = []
if resolver is not None: # i.e.: it's a container.
if hasattr(resolver, 'get_contents_debug_adapter_protocol'):
# The get_contents_debug_adapter_protocol needs to return sorted.
lst = resolver.get_contents_debug_adapter_protocol(self.value, fmt=fmt)
else:
# If there's no special implementation, the default is sorting the keys.
dct = resolver.get_dictionary(self.value)
lst = sorted(dct.items(), key=lambda tup: sorted_attributes_key(tup[0]))
# No evaluate name in this case.
lst = [(key, value, None) for (key, value) in lst]
lst, group_entries = self._group_entries(lst, handle_return_values=False)
if group_entries:
lst = group_entries + lst
parent_evaluate_name = self.evaluate_name
if parent_evaluate_name:
for key, val, evaluate_name in lst:
if evaluate_name is not None:
if callable(evaluate_name):
evaluate_name = evaluate_name(parent_evaluate_name)
else:
evaluate_name = parent_evaluate_name + evaluate_name
variable = _ObjectVariable(
self.py_db, key, val, self._register_variable, evaluate_name=evaluate_name, frame=self.frame)
children_variables.append(variable)
else:
for key, val, evaluate_name in lst:
# No evaluate name
variable = _ObjectVariable(self.py_db, key, val, self._register_variable, frame=self.frame)
children_variables.append(variable)
return children_variables
def change_variable(self, name, value, py_db, fmt=None):
children_variable = self.get_child_variable_named(name)
if children_variable is None:
return None
var_data = children_variable.get_var_data()
evaluate_name = var_data.get('evaluateName')
if not evaluate_name:
# Note: right now we only pass control to the resolver in the cases where
# there's no evaluate name (the idea being that if we can evaluate it,
# we can use that evaluation to set the value too -- if in the future
# a case where this isn't true is found this logic may need to be changed).
_type, _type_name, container_resolver = get_type(self.value)
if hasattr(container_resolver, 'change_var_from_name'):
try:
new_value = eval(value)
except:
return None
new_key = container_resolver.change_var_from_name(self.value, name, new_value)
if new_key is not None:
return _ObjectVariable(
self.py_db, new_key, new_value, self._register_variable, evaluate_name=None, frame=self.frame)
return None
else:
return None
frame = self.frame
if frame is None:
return None
try:
# This handles the simple cases (such as dict, list, object)
Exec('%s=%s' % (evaluate_name, value), frame.f_globals, frame.f_locals)
except:
return None
return self.get_child_variable_named(name, fmt=fmt)
def sorted_variables_key(obj):
return sorted_attributes_key(obj.name)
class _FrameVariable(_AbstractVariable):
def __init__(self, py_db, frame, register_variable):
_AbstractVariable.__init__(self, py_db)
self.frame = frame
self.name = self.frame.f_code.co_name
self.value = frame
self._register_variable = register_variable
self._register_variable(self)
def change_variable(self, name, value, py_db, fmt=None):
frame = self.frame
pydevd_vars.change_attr_expression(frame, name, value, py_db)
return self.get_child_variable_named(name, fmt=fmt)
@silence_warnings_decorator
@overrides(_AbstractVariable.get_children_variables)
def get_children_variables(self, fmt=None, scope=None):
children_variables = []
if scope is not None:
assert isinstance(scope, ScopeRequest)
scope = scope.scope
if scope in ('locals', None):
dct = self.frame.f_locals
elif scope == 'globals':
dct = self.frame.f_globals
else:
raise AssertionError('Unexpected scope: %s' % (scope,))
lst, group_entries = self._group_entries([(x[0], x[1], None) for x in list(dct.items()) if x[0] != '_pydev_stop_at_break'], handle_return_values=True)
group_variables = []
for key, val, _ in group_entries:
# Make sure that the contents in the group are also sorted.
val.contents_debug_adapter_protocol.sort(key=lambda v:sorted_attributes_key(v[0]))
variable = _ObjectVariable(self.py_db, key, val, self._register_variable, False, key, frame=self.frame)
group_variables.append(variable)
for key, val, _ in lst:
is_return_value = key == RETURN_VALUES_DICT
if is_return_value:
for return_key, return_value in val.items():
variable = _ObjectVariable(
self.py_db, return_key, return_value, self._register_variable, is_return_value, '%s[%r]' % (key, return_key), frame=self.frame)
children_variables.append(variable)
else:
variable = _ObjectVariable(self.py_db, key, val, self._register_variable, is_return_value, key, frame=self.frame)
children_variables.append(variable)
# Frame variables always sorted.
children_variables.sort(key=sorted_variables_key)
if group_variables:
# Groups have priority over other variables.
children_variables = group_variables + children_variables
return children_variables
class _FramesTracker(object):
'''
This is a helper class to be used to track frames when a thread becomes suspended.
'''
def __init__(self, suspended_frames_manager, py_db):
self._suspended_frames_manager = suspended_frames_manager
self.py_db = py_db
self._frame_id_to_frame = {}
# Note that a given frame may appear in multiple threads when we have custom
# frames added, but as those are coroutines, this map will point to the actual
# main thread (which is the one that needs to be suspended for us to get the
# variables).
self._frame_id_to_main_thread_id = {}
# A map of the suspended thread id -> list(frames ids) -- note that
# frame ids are kept in order (the first one is the suspended frame).
self._thread_id_to_frame_ids = {}
self._thread_id_to_frames_list = {}
# The main suspended thread (if this is a coroutine this isn't the id of the
# coroutine thread, it's the id of the actual suspended thread).
self._main_thread_id = None
# Helper to know if it was already untracked.
self._untracked = False
# We need to be thread-safe!
self._lock = ForkSafeLock()
self._variable_reference_to_variable = {}
def _register_variable(self, variable):
variable_reference = variable.get_variable_reference()
self._variable_reference_to_variable[variable_reference] = variable
def obtain_as_variable(self, name, value, evaluate_name=None, frame=None):
if evaluate_name is None:
evaluate_name = name
variable_reference = id(value)
variable = self._variable_reference_to_variable.get(variable_reference)
if variable is not None:
return variable
# Still not created, let's do it now.
return _ObjectVariable(
self.py_db, name, value, self._register_variable, is_return_value=False, evaluate_name=evaluate_name, frame=frame)
def get_main_thread_id(self):
return self._main_thread_id
def get_variable(self, variable_reference):
return self._variable_reference_to_variable[variable_reference]
def track(self, thread_id, frames_list, frame_custom_thread_id=None):
'''
:param thread_id:
The thread id to be used for this frame.
:param FramesList frames_list:
A list of frames to be tracked (the first is the topmost frame which is suspended at the given thread).
:param frame_custom_thread_id:
If None this this is the id of the thread id for the custom frame (i.e.: coroutine).
'''
assert frames_list.__class__ == FramesList
with self._lock:
coroutine_or_main_thread_id = frame_custom_thread_id or thread_id
if coroutine_or_main_thread_id in self._suspended_frames_manager._thread_id_to_tracker:
sys.stderr.write('pydevd: Something is wrong. Tracker being added twice to the same thread id.\n')
self._suspended_frames_manager._thread_id_to_tracker[coroutine_or_main_thread_id] = self
self._main_thread_id = thread_id
frame_ids_from_thread = self._thread_id_to_frame_ids.setdefault(
coroutine_or_main_thread_id, [])
self._thread_id_to_frames_list[coroutine_or_main_thread_id] = frames_list
for frame in frames_list:
frame_id = id(frame)
self._frame_id_to_frame[frame_id] = frame
_FrameVariable(self.py_db, frame, self._register_variable) # Instancing is enough to register.
self._suspended_frames_manager._variable_reference_to_frames_tracker[frame_id] = self
frame_ids_from_thread.append(frame_id)
self._frame_id_to_main_thread_id[frame_id] = thread_id
frame = None
def untrack_all(self):
with self._lock:
if self._untracked:
# Calling multiple times is expected for the set next statement.
return
self._untracked = True
for thread_id in self._thread_id_to_frame_ids:
self._suspended_frames_manager._thread_id_to_tracker.pop(thread_id, None)
for frame_id in self._frame_id_to_frame:
del self._suspended_frames_manager._variable_reference_to_frames_tracker[frame_id]
self._frame_id_to_frame.clear()
self._frame_id_to_main_thread_id.clear()
self._thread_id_to_frame_ids.clear()
self._thread_id_to_frames_list.clear()
self._main_thread_id = None
self._suspended_frames_manager = None
self._variable_reference_to_variable.clear()
def get_frames_list(self, thread_id):
with self._lock:
return self._thread_id_to_frames_list.get(thread_id)
def find_frame(self, thread_id, frame_id):
with self._lock:
return self._frame_id_to_frame.get(frame_id)
def create_thread_suspend_command(self, thread_id, stop_reason, message, suspend_type):
with self._lock:
# First one is topmost frame suspended.
frames_list = self._thread_id_to_frames_list[thread_id]
cmd = self.py_db.cmd_factory.make_thread_suspend_message(
self.py_db, thread_id, frames_list, stop_reason, message, suspend_type)
frames_list = None
return cmd
class SuspendedFramesManager(object):
def __init__(self):
self._thread_id_to_fake_frames = {}
self._thread_id_to_tracker = {}
# Mappings
self._variable_reference_to_frames_tracker = {}
def _get_tracker_for_variable_reference(self, variable_reference):
tracker = self._variable_reference_to_frames_tracker.get(variable_reference)
if tracker is not None:
return tracker
for _thread_id, tracker in self._thread_id_to_tracker.items():
try:
tracker.get_variable(variable_reference)
except KeyError:
pass
else:
return tracker
return None
def get_thread_id_for_variable_reference(self, variable_reference):
'''
We can't evaluate variable references values on any thread, only in the suspended
thread (the main reason for this is that in UI frameworks inspecting a UI object
from a different thread can potentially crash the application).
:param int variable_reference:
The variable reference (can be either a frame id or a reference to a previously
gotten variable).
:return str:
The thread id for the thread to be used to inspect the given variable reference or
None if the thread was already resumed.
'''
frames_tracker = self._get_tracker_for_variable_reference(variable_reference)
if frames_tracker is not None:
return frames_tracker.get_main_thread_id()
return None
def get_frame_tracker(self, thread_id):
return self._thread_id_to_tracker.get(thread_id)
def get_variable(self, variable_reference):
'''
:raises KeyError
'''
frames_tracker = self._get_tracker_for_variable_reference(variable_reference)
if frames_tracker is None:
raise KeyError()
return frames_tracker.get_variable(variable_reference)
def get_frames_list(self, thread_id):
tracker = self._thread_id_to_tracker.get(thread_id)
if tracker is None:
return None
return tracker.get_frames_list(thread_id)
@contextmanager
def track_frames(self, py_db):
tracker = _FramesTracker(self, py_db)
try:
yield tracker
finally:
tracker.untrack_all()
def add_fake_frame(self, thread_id, frame_id, frame):
self._thread_id_to_fake_frames.setdefault(thread_id, {})[int(frame_id)] = frame
def find_frame(self, thread_id, frame_id):
try:
if frame_id == "*":
return get_frame() # any frame is specified with "*"
frame_id = int(frame_id)
fake_frames = self._thread_id_to_fake_frames.get(thread_id)
if fake_frames is not None:
frame = fake_frames.get(frame_id)
if frame is not None:
return frame
frames_tracker = self._thread_id_to_tracker.get(thread_id)
if frames_tracker is not None:
frame = frames_tracker.find_frame(thread_id, frame_id)
if frame is not None:
return frame
return None
except:
pydev_log.exception()
return None
| 20,559 | Python | 37.501873 | 158 | 0.594776 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_cython_wrapper.py | import sys
try:
try:
from _pydevd_bundle_ext import pydevd_cython as mod
except ImportError:
from _pydevd_bundle import pydevd_cython as mod
except ImportError:
import struct
try:
is_python_64bit = (struct.calcsize('P') == 8)
except:
# In Jython this call fails, but this is Ok, we don't support Jython for speedups anyways.
raise ImportError
plat = '32'
if is_python_64bit:
plat = '64'
# We also accept things as:
#
# _pydevd_bundle.pydevd_cython_win32_27_32
# _pydevd_bundle.pydevd_cython_win32_34_64
#
# to have multiple pre-compiled pyds distributed along the IDE
# (generated by build_tools/build_binaries_windows.py).
mod_name = 'pydevd_cython_%s_%s%s_%s' % (sys.platform, sys.version_info[0], sys.version_info[1], plat)
check_name = '_pydevd_bundle.%s' % (mod_name,)
mod = getattr(__import__(check_name), mod_name)
# Regardless of how it was found, make sure it's later available as the
# initial name so that the expected types from cython in frame eval
# are valid.
sys.modules['_pydevd_bundle.pydevd_cython'] = mod
trace_dispatch = mod.trace_dispatch
PyDBAdditionalThreadInfo = mod.PyDBAdditionalThreadInfo
set_additional_thread_info = mod.set_additional_thread_info
global_cache_skips = mod.global_cache_skips
global_cache_frame_skips = mod.global_cache_frame_skips
_set_additional_thread_info_lock = mod._set_additional_thread_info_lock
fix_top_level_trace_and_get_trace_func = mod.fix_top_level_trace_and_get_trace_func
version = getattr(mod, 'version', 0)
| 1,600 | Python | 29.207547 | 106 | 0.694375 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_stackless.py | from __future__ import nested_scopes
import weakref
import sys
from _pydevd_bundle.pydevd_comm import get_global_debugger
from _pydevd_bundle.pydevd_constants import call_only_once
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_custom_frames import update_custom_frame, remove_custom_frame, add_custom_frame
import stackless # @UnresolvedImport
from _pydev_bundle import pydev_log
# Used so that we don't loose the id (because we'll remove when it's not alive and would generate a new id for the
# same tasklet).
class TaskletToLastId:
'''
So, why not a WeakKeyDictionary?
The problem is that removals from the WeakKeyDictionary will create a new tasklet (as it adds a callback to
remove the key when it's garbage-collected), so, we can get into a recursion.
'''
def __init__(self):
self.tasklet_ref_to_last_id = {}
self._i = 0
def get(self, tasklet):
return self.tasklet_ref_to_last_id.get(weakref.ref(tasklet))
def __setitem__(self, tasklet, last_id):
self.tasklet_ref_to_last_id[weakref.ref(tasklet)] = last_id
self._i += 1
if self._i % 100 == 0: # Collect at each 100 additions to the dict (no need to rush).
for tasklet_ref in list(self.tasklet_ref_to_last_id.keys()):
if tasklet_ref() is None:
del self.tasklet_ref_to_last_id[tasklet_ref]
_tasklet_to_last_id = TaskletToLastId()
#=======================================================================================================================
# _TaskletInfo
#=======================================================================================================================
class _TaskletInfo:
_last_id = 0
def __init__(self, tasklet_weakref, tasklet):
self.frame_id = None
self.tasklet_weakref = tasklet_weakref
last_id = _tasklet_to_last_id.get(tasklet)
if last_id is None:
_TaskletInfo._last_id += 1
last_id = _TaskletInfo._last_id
_tasklet_to_last_id[tasklet] = last_id
self._tasklet_id = last_id
self.update_name()
def update_name(self):
tasklet = self.tasklet_weakref()
if tasklet:
if tasklet.blocked:
state = 'blocked'
elif tasklet.paused:
state = 'paused'
elif tasklet.scheduled:
state = 'scheduled'
else:
state = '<UNEXPECTED>'
try:
name = tasklet.name
except AttributeError:
if tasklet.is_main:
name = 'MainTasklet'
else:
name = 'Tasklet-%s' % (self._tasklet_id,)
thread_id = tasklet.thread_id
if thread_id != -1:
for thread in threading.enumerate():
if thread.ident == thread_id:
if thread.name:
thread_name = "of %s" % (thread.name,)
else:
thread_name = "of Thread-%s" % (thread.name or str(thread_id),)
break
else:
# should not happen.
thread_name = "of Thread-%s" % (str(thread_id),)
thread = None
else:
# tasklet is no longer bound to a thread, because its thread ended
thread_name = "without thread"
tid = id(tasklet)
tasklet = None
else:
state = 'dead'
name = 'Tasklet-%s' % (self._tasklet_id,)
thread_name = ""
tid = '-'
self.tasklet_name = '%s %s %s (%s)' % (state, name, thread_name, tid)
if not hasattr(stackless.tasklet, "trace_function"):
# bug https://bitbucket.org/stackless-dev/stackless/issue/42
# is not fixed. Stackless releases before 2014
def update_name(self):
tasklet = self.tasklet_weakref()
if tasklet:
try:
name = tasklet.name
except AttributeError:
if tasklet.is_main:
name = 'MainTasklet'
else:
name = 'Tasklet-%s' % (self._tasklet_id,)
thread_id = tasklet.thread_id
for thread in threading.enumerate():
if thread.ident == thread_id:
if thread.name:
thread_name = "of %s" % (thread.name,)
else:
thread_name = "of Thread-%s" % (thread.name or str(thread_id),)
break
else:
# should not happen.
thread_name = "of Thread-%s" % (str(thread_id),)
thread = None
tid = id(tasklet)
tasklet = None
else:
name = 'Tasklet-%s' % (self._tasklet_id,)
thread_name = ""
tid = '-'
self.tasklet_name = '%s %s (%s)' % (name, thread_name, tid)
_weak_tasklet_registered_to_info = {}
#=======================================================================================================================
# get_tasklet_info
#=======================================================================================================================
def get_tasklet_info(tasklet):
return register_tasklet_info(tasklet)
#=======================================================================================================================
# register_tasklet_info
#=======================================================================================================================
def register_tasklet_info(tasklet):
r = weakref.ref(tasklet)
info = _weak_tasklet_registered_to_info.get(r)
if info is None:
info = _weak_tasklet_registered_to_info[r] = _TaskletInfo(r, tasklet)
return info
_application_set_schedule_callback = None
#=======================================================================================================================
# _schedule_callback
#=======================================================================================================================
def _schedule_callback(prev, next):
'''
Called when a context is stopped or a new context is made runnable.
'''
try:
if not prev and not next:
return
current_frame = sys._getframe()
if next:
register_tasklet_info(next)
# Ok, making next runnable: set the tracing facility in it.
debugger = get_global_debugger()
if debugger is not None:
next.trace_function = debugger.get_thread_local_trace_func()
frame = next.frame
if frame is current_frame:
frame = frame.f_back
if hasattr(frame, 'f_trace'): # Note: can be None (but hasattr should cover for that too).
frame.f_trace = debugger.get_thread_local_trace_func()
debugger = None
if prev:
register_tasklet_info(prev)
try:
for tasklet_ref, tasklet_info in list(_weak_tasklet_registered_to_info.items()): # Make sure it's a copy!
tasklet = tasklet_ref()
if tasklet is None or not tasklet.alive:
# Garbage-collected already!
try:
del _weak_tasklet_registered_to_info[tasklet_ref]
except KeyError:
pass
if tasklet_info.frame_id is not None:
remove_custom_frame(tasklet_info.frame_id)
else:
is_running = stackless.get_thread_info(tasklet.thread_id)[1] is tasklet
if tasklet is prev or (tasklet is not next and not is_running):
# the tasklet won't run after this scheduler action:
# - the tasklet is the previous tasklet
# - it is not the next tasklet and it is not an already running tasklet
frame = tasklet.frame
if frame is current_frame:
frame = frame.f_back
if frame is not None:
# print >>sys.stderr, "SchedCB: %r, %d, '%s', '%s'" % (tasklet, frame.f_lineno, _filename, base)
debugger = get_global_debugger()
if debugger is not None and debugger.get_file_type(frame) is None:
tasklet_info.update_name()
if tasklet_info.frame_id is None:
tasklet_info.frame_id = add_custom_frame(frame, tasklet_info.tasklet_name, tasklet.thread_id)
else:
update_custom_frame(tasklet_info.frame_id, frame, tasklet.thread_id, name=tasklet_info.tasklet_name)
debugger = None
elif tasklet is next or is_running:
if tasklet_info.frame_id is not None:
# Remove info about stackless suspended when it starts to run.
remove_custom_frame(tasklet_info.frame_id)
tasklet_info.frame_id = None
finally:
tasklet = None
tasklet_info = None
frame = None
except:
pydev_log.exception()
if _application_set_schedule_callback is not None:
return _application_set_schedule_callback(prev, next)
if not hasattr(stackless.tasklet, "trace_function"):
# Older versions of Stackless, released before 2014
# This code does not work reliable! It is affected by several
# stackless bugs: Stackless issues #44, #42, #40
def _schedule_callback(prev, next):
'''
Called when a context is stopped or a new context is made runnable.
'''
try:
if not prev and not next:
return
if next:
register_tasklet_info(next)
# Ok, making next runnable: set the tracing facility in it.
debugger = get_global_debugger()
if debugger is not None and next.frame:
if hasattr(next.frame, 'f_trace'):
next.frame.f_trace = debugger.get_thread_local_trace_func()
debugger = None
if prev:
register_tasklet_info(prev)
try:
for tasklet_ref, tasklet_info in list(_weak_tasklet_registered_to_info.items()): # Make sure it's a copy!
tasklet = tasklet_ref()
if tasklet is None or not tasklet.alive:
# Garbage-collected already!
try:
del _weak_tasklet_registered_to_info[tasklet_ref]
except KeyError:
pass
if tasklet_info.frame_id is not None:
remove_custom_frame(tasklet_info.frame_id)
else:
if tasklet.paused or tasklet.blocked or tasklet.scheduled:
if tasklet.frame and tasklet.frame.f_back:
f_back = tasklet.frame.f_back
debugger = get_global_debugger()
if debugger is not None and debugger.get_file_type(f_back) is None:
if tasklet_info.frame_id is None:
tasklet_info.frame_id = add_custom_frame(f_back, tasklet_info.tasklet_name, tasklet.thread_id)
else:
update_custom_frame(tasklet_info.frame_id, f_back, tasklet.thread_id)
debugger = None
elif tasklet.is_current:
if tasklet_info.frame_id is not None:
# Remove info about stackless suspended when it starts to run.
remove_custom_frame(tasklet_info.frame_id)
tasklet_info.frame_id = None
finally:
tasklet = None
tasklet_info = None
f_back = None
except:
pydev_log.exception()
if _application_set_schedule_callback is not None:
return _application_set_schedule_callback(prev, next)
_original_setup = stackless.tasklet.setup
#=======================================================================================================================
# setup
#=======================================================================================================================
def setup(self, *args, **kwargs):
'''
Called to run a new tasklet: rebind the creation so that we can trace it.
'''
f = self.tempval
def new_f(old_f, args, kwargs):
debugger = get_global_debugger()
if debugger is not None:
debugger.enable_tracing()
debugger = None
# Remove our own traces :)
self.tempval = old_f
register_tasklet_info(self)
# Hover old_f to see the stackless being created and *args and **kwargs to see its parameters.
return old_f(*args, **kwargs)
# This is the way to tell stackless that the function it should execute is our function, not the original one. Note:
# setting tempval is the same as calling bind(new_f), but it seems that there's no other way to get the currently
# bound function, so, keeping on using tempval instead of calling bind (which is actually the same thing in a better
# API).
self.tempval = new_f
return _original_setup(self, f, args, kwargs)
#=======================================================================================================================
# __call__
#=======================================================================================================================
def __call__(self, *args, **kwargs):
'''
Called to run a new tasklet: rebind the creation so that we can trace it.
'''
return setup(self, *args, **kwargs)
_original_run = stackless.run
#=======================================================================================================================
# run
#=======================================================================================================================
def run(*args, **kwargs):
debugger = get_global_debugger()
if debugger is not None:
debugger.enable_tracing()
debugger = None
return _original_run(*args, **kwargs)
#=======================================================================================================================
# patch_stackless
#=======================================================================================================================
def patch_stackless():
'''
This function should be called to patch the stackless module so that new tasklets are properly tracked in the
debugger.
'''
global _application_set_schedule_callback
_application_set_schedule_callback = stackless.set_schedule_callback(_schedule_callback)
def set_schedule_callback(callable):
global _application_set_schedule_callback
old = _application_set_schedule_callback
_application_set_schedule_callback = callable
return old
def get_schedule_callback():
global _application_set_schedule_callback
return _application_set_schedule_callback
set_schedule_callback.__doc__ = stackless.set_schedule_callback.__doc__
if hasattr(stackless, "get_schedule_callback"):
get_schedule_callback.__doc__ = stackless.get_schedule_callback.__doc__
stackless.set_schedule_callback = set_schedule_callback
stackless.get_schedule_callback = get_schedule_callback
if not hasattr(stackless.tasklet, "trace_function"):
# Older versions of Stackless, released before 2014
__call__.__doc__ = stackless.tasklet.__call__.__doc__
stackless.tasklet.__call__ = __call__
setup.__doc__ = stackless.tasklet.setup.__doc__
stackless.tasklet.setup = setup
run.__doc__ = stackless.run.__doc__
stackless.run = run
patch_stackless = call_only_once(patch_stackless)
| 16,909 | Python | 39.551559 | 136 | 0.472411 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_trace_dispatch_regular.py | from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle.pydev_log import exception as pydev_log_exception
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_constants import (get_current_thread_id, NO_FTRACE,
USE_CUSTOM_SYS_CURRENT_FRAMES_MAP, ForkSafeLock)
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
# IFDEF CYTHON
# from cpython.object cimport PyObject
# from cpython.ref cimport Py_INCREF, Py_XDECREF
# ELSE
from _pydevd_bundle.pydevd_frame import PyDBFrame, is_unhandled_exception
# ENDIF
# IFDEF CYTHON
# cdef dict _global_notify_skipped_step_in
# cython_inline_constant: CMD_STEP_INTO = 107
# cython_inline_constant: CMD_STEP_INTO_MY_CODE = 144
# cython_inline_constant: CMD_STEP_RETURN = 109
# cython_inline_constant: CMD_STEP_RETURN_MY_CODE = 160
# ELSE
# Note: those are now inlined on cython.
CMD_STEP_INTO = 107
CMD_STEP_INTO_MY_CODE = 144
CMD_STEP_RETURN = 109
CMD_STEP_RETURN_MY_CODE = 160
# ENDIF
# Cache where we should keep that we completely skipped entering some context.
# It needs to be invalidated when:
# - Breakpoints are changed
# It can be used when running regularly (without step over/step in/step return)
global_cache_skips = {}
global_cache_frame_skips = {}
_global_notify_skipped_step_in = False
_global_notify_skipped_step_in_lock = ForkSafeLock()
def notify_skipped_step_in_because_of_filters(py_db, frame):
global _global_notify_skipped_step_in
with _global_notify_skipped_step_in_lock:
if _global_notify_skipped_step_in:
# Check with lock in place (callers should actually have checked
# before without the lock in place due to performance).
return
_global_notify_skipped_step_in = True
py_db.notify_skipped_step_in_because_of_filters(frame)
# IFDEF CYTHON
# cdef class SafeCallWrapper:
# cdef method_object
# def __init__(self, method_object):
# self.method_object = method_object
# def __call__(self, *args):
# #Cannot use 'self' once inside the delegate call since we are borrowing the self reference f_trace field
# #in the frame, and that reference might get destroyed by set trace on frame and parents
# cdef PyObject* method_obj = <PyObject*> self.method_object
# Py_INCREF(<object>method_obj)
# ret = (<object>method_obj)(*args)
# Py_XDECREF (method_obj)
# return SafeCallWrapper(ret) if ret is not None else None
# def get_method_object(self):
# return self.method_object
# ELSE
# ENDIF
def fix_top_level_trace_and_get_trace_func(py_db, frame):
# IFDEF CYTHON
# cdef str filename;
# cdef str name;
# cdef tuple args;
# ENDIF
# Note: this is always the first entry-point in the tracing for any thread.
# After entering here we'll set a new tracing function for this thread
# where more information is cached (and will also setup the tracing for
# frames where we should deal with unhandled exceptions).
thread = None
# Cache the frame which should be traced to deal with unhandled exceptions.
# (i.e.: thread entry-points).
f_unhandled = frame
# print('called at', f_unhandled.f_code.co_name, f_unhandled.f_code.co_filename, f_unhandled.f_code.co_firstlineno)
force_only_unhandled_tracer = False
while f_unhandled is not None:
# name = splitext(basename(f_unhandled.f_code.co_filename))[0]
name = f_unhandled.f_code.co_filename
# basename
i = name.rfind('/')
j = name.rfind('\\')
if j > i:
i = j
if i >= 0:
name = name[i + 1:]
# remove ext
i = name.rfind('.')
if i >= 0:
name = name[:i]
if name == 'threading':
if f_unhandled.f_code.co_name in ('__bootstrap', '_bootstrap'):
# We need __bootstrap_inner, not __bootstrap.
return None, False
elif f_unhandled.f_code.co_name in ('__bootstrap_inner', '_bootstrap_inner'):
# Note: be careful not to use threading.currentThread to avoid creating a dummy thread.
t = f_unhandled.f_locals.get('self')
force_only_unhandled_tracer = True
if t is not None and isinstance(t, threading.Thread):
thread = t
break
elif name == 'pydev_monkey':
if f_unhandled.f_code.co_name == '__call__':
force_only_unhandled_tracer = True
break
elif name == 'pydevd':
if f_unhandled.f_code.co_name in ('run', 'main'):
# We need to get to _exec
return None, False
if f_unhandled.f_code.co_name == '_exec':
force_only_unhandled_tracer = True
break
elif name == 'pydevd_tracing':
return None, False
elif f_unhandled.f_back is None:
break
f_unhandled = f_unhandled.f_back
if thread is None:
# Important: don't call threadingCurrentThread if we're in the threading module
# to avoid creating dummy threads.
if py_db.threading_get_ident is not None:
thread = py_db.threading_active.get(py_db.threading_get_ident())
if thread is None:
return None, False
else:
# Jython does not have threading.get_ident().
thread = py_db.threading_current_thread()
if getattr(thread, 'pydev_do_not_trace', None):
py_db.disable_tracing()
return None, False
try:
additional_info = thread.additional_info
if additional_info is None:
raise AttributeError()
except:
additional_info = py_db.set_additional_thread_info(thread)
# print('enter thread tracer', thread, get_current_thread_id(thread))
args = (py_db, thread, additional_info, global_cache_skips, global_cache_frame_skips)
if f_unhandled is not None:
if f_unhandled.f_back is None and not force_only_unhandled_tracer:
# Happens when we attach to a running program (cannot reuse instance because it's mutable).
top_level_thread_tracer = TopLevelThreadTracerNoBackFrame(ThreadTracer(args), args)
additional_info.top_level_thread_tracer_no_back_frames.append(top_level_thread_tracer) # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).
else:
top_level_thread_tracer = additional_info.top_level_thread_tracer_unhandled
if top_level_thread_tracer is None:
# Stop in some internal place to report about unhandled exceptions
top_level_thread_tracer = TopLevelThreadTracerOnlyUnhandledExceptions(args)
additional_info.top_level_thread_tracer_unhandled = top_level_thread_tracer # Hack for cython to keep it alive while the thread is alive (just the method in the SetTrace is not enough).
# print(' --> found to trace unhandled', f_unhandled.f_code.co_name, f_unhandled.f_code.co_filename, f_unhandled.f_code.co_firstlineno)
f_trace = top_level_thread_tracer.get_trace_dispatch_func()
# IFDEF CYTHON
# f_trace = SafeCallWrapper(f_trace)
# ENDIF
f_unhandled.f_trace = f_trace
if frame is f_unhandled:
return f_trace, False
thread_tracer = additional_info.thread_tracer
if thread_tracer is None or thread_tracer._args[0] is not py_db:
thread_tracer = ThreadTracer(args)
additional_info.thread_tracer = thread_tracer
# IFDEF CYTHON
# return SafeCallWrapper(thread_tracer), True
# ELSE
return thread_tracer, True
# ENDIF
def trace_dispatch(py_db, frame, event, arg):
thread_trace_func, apply_to_settrace = py_db.fix_top_level_trace_and_get_trace_func(py_db, frame)
if thread_trace_func is None:
return None if event == 'call' else NO_FTRACE
if apply_to_settrace:
py_db.enable_tracing(thread_trace_func)
return thread_trace_func(frame, event, arg)
# IFDEF CYTHON
# cdef class TopLevelThreadTracerOnlyUnhandledExceptions:
# cdef public tuple _args;
# def __init__(self, tuple args):
# self._args = args
# ELSE
class TopLevelThreadTracerOnlyUnhandledExceptions(object):
def __init__(self, args):
self._args = args
# ENDIF
def trace_unhandled_exceptions(self, frame, event, arg):
# Note that we ignore the frame as this tracing method should only be put in topmost frames already.
# print('trace_unhandled_exceptions', event, frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno)
if event == 'exception' and arg is not None:
py_db, t, additional_info = self._args[0:3]
if arg is not None:
if not additional_info.suspended_at_unhandled:
additional_info.suspended_at_unhandled = True
py_db.stop_on_unhandled_exception(py_db, t, additional_info, arg)
# No need to reset frame.f_trace to keep the same trace function.
return self.trace_unhandled_exceptions
def get_trace_dispatch_func(self):
return self.trace_unhandled_exceptions
# IFDEF CYTHON
# cdef class TopLevelThreadTracerNoBackFrame:
#
# cdef public object _frame_trace_dispatch;
# cdef public tuple _args;
# cdef public object try_except_infos;
# cdef public object _last_exc_arg;
# cdef public set _raise_lines;
# cdef public int _last_raise_line;
#
# def __init__(self, frame_trace_dispatch, tuple args):
# self._frame_trace_dispatch = frame_trace_dispatch
# self._args = args
# self.try_except_infos = None
# self._last_exc_arg = None
# self._raise_lines = set()
# self._last_raise_line = -1
# ELSE
class TopLevelThreadTracerNoBackFrame(object):
'''
This tracer is pretty special in that it's dealing with a frame without f_back (i.e.: top frame
on remote attach or QThread).
This means that we have to carefully inspect exceptions to discover whether the exception will
be unhandled or not (if we're dealing with an unhandled exception we need to stop as unhandled,
otherwise we need to use the regular tracer -- unfortunately the debugger has little info to
work with in the tracing -- see: https://bugs.python.org/issue34099, so, we inspect bytecode to
determine if some exception will be traced or not... note that if this is not available -- such
as on Jython -- we consider any top-level exception to be unnhandled).
'''
def __init__(self, frame_trace_dispatch, args):
self._frame_trace_dispatch = frame_trace_dispatch
self._args = args
self.try_except_infos = None
self._last_exc_arg = None
self._raise_lines = set()
self._last_raise_line = -1
# ENDIF
def trace_dispatch_and_unhandled_exceptions(self, frame, event, arg):
# DEBUG = 'code_to_debug' in frame.f_code.co_filename
# if DEBUG: print('trace_dispatch_and_unhandled_exceptions: %s %s %s %s %s %s' % (event, frame.f_code.co_name, frame.f_code.co_filename, frame.f_code.co_firstlineno, self._frame_trace_dispatch, frame.f_lineno))
frame_trace_dispatch = self._frame_trace_dispatch
if frame_trace_dispatch is not None:
self._frame_trace_dispatch = frame_trace_dispatch(frame, event, arg)
if event == 'exception':
self._last_exc_arg = arg
self._raise_lines.add(frame.f_lineno)
self._last_raise_line = frame.f_lineno
elif event == 'return' and self._last_exc_arg is not None:
# For unhandled exceptions we actually track the return when at the topmost level.
try:
py_db, t, additional_info = self._args[0:3]
if not additional_info.suspended_at_unhandled: # Note: only check it here, don't set.
if is_unhandled_exception(self, py_db, frame, self._last_raise_line, self._raise_lines):
py_db.stop_on_unhandled_exception(py_db, t, additional_info, self._last_exc_arg)
finally:
# Remove reference to exception after handling it.
self._last_exc_arg = None
ret = self.trace_dispatch_and_unhandled_exceptions
# Need to reset (the call to _frame_trace_dispatch may have changed it).
# IFDEF CYTHON
# frame.f_trace = SafeCallWrapper(ret)
# ELSE
frame.f_trace = ret
# ENDIF
return ret
def get_trace_dispatch_func(self):
return self.trace_dispatch_and_unhandled_exceptions
# IFDEF CYTHON
# cdef class ThreadTracer:
# cdef public tuple _args;
# def __init__(self, tuple args):
# self._args = args
# ELSE
class ThreadTracer(object):
def __init__(self, args):
self._args = args
# ENDIF
def __call__(self, frame, event, arg):
''' This is the callback used when we enter some context in the debugger.
We also decorate the thread we are in with info about the debugging.
The attributes added are:
pydev_state
pydev_step_stop
pydev_step_cmd
pydev_notify_kill
:param PyDB py_db:
This is the global debugger (this method should actually be added as a method to it).
'''
# IFDEF CYTHON
# cdef str filename;
# cdef str base;
# cdef int pydev_step_cmd;
# cdef object frame_cache_key;
# cdef dict cache_skips;
# cdef bint is_stepping;
# cdef tuple abs_path_canonical_path_and_base;
# cdef PyDBAdditionalThreadInfo additional_info;
# ENDIF
# DEBUG = 'code_to_debug' in frame.f_code.co_filename
# if DEBUG: print('ENTER: trace_dispatch: %s %s %s %s' % (frame.f_code.co_filename, frame.f_lineno, event, frame.f_code.co_name))
py_db, t, additional_info, cache_skips, frame_skips_cache = self._args
if additional_info.is_tracing:
return None if event == 'call' else NO_FTRACE # we don't wan't to trace code invoked from pydevd_frame.trace_dispatch
additional_info.is_tracing += 1
try:
pydev_step_cmd = additional_info.pydev_step_cmd
is_stepping = pydev_step_cmd != -1
if py_db.pydb_disposed:
return None if event == 'call' else NO_FTRACE
# if thread is not alive, cancel trace_dispatch processing
if not is_thread_alive(t):
py_db.notify_thread_not_alive(get_current_thread_id(t))
return None if event == 'call' else NO_FTRACE
# Note: it's important that the context name is also given because we may hit something once
# in the global context and another in the local context.
frame_cache_key = frame.f_code
if frame_cache_key in cache_skips:
if not is_stepping:
# if DEBUG: print('skipped: trace_dispatch (cache hit)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)
return None if event == 'call' else NO_FTRACE
else:
# When stepping we can't take into account caching based on the breakpoints (only global filtering).
if cache_skips.get(frame_cache_key) == 1:
if additional_info.pydev_original_step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE) and not _global_notify_skipped_step_in:
notify_skipped_step_in_because_of_filters(py_db, frame)
back_frame = frame.f_back
if back_frame is not None and pydev_step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE, CMD_STEP_RETURN, CMD_STEP_RETURN_MY_CODE):
back_frame_cache_key = back_frame.f_code
if cache_skips.get(back_frame_cache_key) == 1:
# if DEBUG: print('skipped: trace_dispatch (cache hit: 1)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)
return None if event == 'call' else NO_FTRACE
else:
# if DEBUG: print('skipped: trace_dispatch (cache hit: 2)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)
return None if event == 'call' else NO_FTRACE
try:
# Make fast path faster!
abs_path_canonical_path_and_base = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_path_canonical_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
file_type = py_db.get_file_type(frame, abs_path_canonical_path_and_base) # we don't want to debug threading or anything related to pydevd
if file_type is not None:
if file_type == 1: # inlining LIB_FILE = 1
if not py_db.in_project_scope(frame, abs_path_canonical_path_and_base[0]):
# if DEBUG: print('skipped: trace_dispatch (not in scope)', abs_path_canonical_path_and_base[2], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[frame_cache_key] = 1
return None if event == 'call' else NO_FTRACE
else:
# if DEBUG: print('skipped: trace_dispatch', abs_path_canonical_path_and_base[2], frame.f_lineno, event, frame.f_code.co_name, file_type)
cache_skips[frame_cache_key] = 1
return None if event == 'call' else NO_FTRACE
if py_db.is_files_filter_enabled:
if py_db.apply_files_filter(frame, abs_path_canonical_path_and_base[0], False):
cache_skips[frame_cache_key] = 1
if is_stepping and additional_info.pydev_original_step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE) and not _global_notify_skipped_step_in:
notify_skipped_step_in_because_of_filters(py_db, frame)
# A little gotcha, sometimes when we're stepping in we have to stop in a
# return event showing the back frame as the current frame, so, we need
# to check not only the current frame but the back frame too.
back_frame = frame.f_back
if back_frame is not None and pydev_step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE, CMD_STEP_RETURN, CMD_STEP_RETURN_MY_CODE):
if py_db.apply_files_filter(back_frame, back_frame.f_code.co_filename, False):
back_frame_cache_key = back_frame.f_code
cache_skips[back_frame_cache_key] = 1
# if DEBUG: print('skipped: trace_dispatch (filtered out: 1)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)
return None if event == 'call' else NO_FTRACE
else:
# if DEBUG: print('skipped: trace_dispatch (filtered out: 2)', frame_cache_key, frame.f_lineno, event, frame.f_code.co_name)
return None if event == 'call' else NO_FTRACE
# if DEBUG: print('trace_dispatch', filename, frame.f_lineno, event, frame.f_code.co_name, file_type)
# Just create PyDBFrame directly (removed support for Python versions < 2.5, which required keeping a weak
# reference to the frame).
ret = PyDBFrame(
(
py_db, abs_path_canonical_path_and_base, additional_info, t, frame_skips_cache, frame_cache_key,
)
).trace_dispatch(frame, event, arg)
if ret is None:
# 1 means skipped because of filters.
# 2 means skipped because no breakpoints were hit.
cache_skips[frame_cache_key] = 2
return None if event == 'call' else NO_FTRACE
# IFDEF CYTHON
# frame.f_trace = SafeCallWrapper(ret) # Make sure we keep the returned tracer.
# ELSE
frame.f_trace = ret # Make sure we keep the returned tracer.
# ENDIF
return ret
except SystemExit:
return None if event == 'call' else NO_FTRACE
except Exception:
if py_db.pydb_disposed:
return None if event == 'call' else NO_FTRACE # Don't log errors when we're shutting down.
# Log it
try:
if pydev_log_exception is not None:
# This can actually happen during the interpreter shutdown in Python 2.7
pydev_log_exception()
except:
# Error logging? We're really in the interpreter shutdown...
# (https://github.com/fabioz/PyDev.Debugger/issues/8)
pass
return None if event == 'call' else NO_FTRACE
finally:
additional_info.is_tracing -= 1
if USE_CUSTOM_SYS_CURRENT_FRAMES_MAP:
# This is far from ideal, as we'll leak frames (we'll always have the last created frame, not really
# the last topmost frame saved -- this should be Ok for our usage, but it may leak frames and things
# may live longer... as IronPython is garbage-collected, things should live longer anyways, so, it
# shouldn't be an issue as big as it's in CPython -- it may still be annoying, but this should
# be a reasonable workaround until IronPython itself is able to provide that functionality).
#
# See: https://github.com/IronLanguages/main/issues/1630
from _pydevd_bundle.pydevd_constants import constructed_tid_to_last_frame
_original_call = ThreadTracer.__call__
def __call__(self, frame, event, arg):
constructed_tid_to_last_frame[self._args[1].ident] = frame
return _original_call(self, frame, event, arg)
ThreadTracer.__call__ = __call__
| 22,202 | Python | 44.219959 | 218 | 0.616836 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_utils.py | from __future__ import nested_scopes
import traceback
import warnings
from _pydev_bundle import pydev_log
from _pydev_bundle._pydev_saved_modules import thread, threading
from _pydev_bundle import _pydev_saved_modules
import signal
import os
import ctypes
from importlib import import_module
from urllib.parse import quote # @UnresolvedImport
import time
import inspect
import sys
from _pydevd_bundle.pydevd_constants import USE_CUSTOM_SYS_CURRENT_FRAMES, IS_PYPY, SUPPORT_GEVENT, \
GEVENT_SUPPORT_NOT_SET_MSG, GENERATED_LEN_ATTR_NAME, PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT, \
get_global_debugger
def save_main_module(file, module_name):
# patch provided by: Scott Schlesier - when script is run, it does not
# use globals from pydevd:
# This will prevent the pydevd script from contaminating the namespace for the script to be debugged
# pretend pydevd is not the main module, and
# convince the file to be debugged that it was loaded as main
sys.modules[module_name] = sys.modules['__main__']
sys.modules[module_name].__name__ = module_name
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
warnings.simplefilter("ignore", category=PendingDeprecationWarning)
from imp import new_module
m = new_module('__main__')
sys.modules['__main__'] = m
if hasattr(sys.modules[module_name], '__loader__'):
m.__loader__ = getattr(sys.modules[module_name], '__loader__')
m.__file__ = file
return m
def is_current_thread_main_thread():
if hasattr(threading, 'main_thread'):
return threading.current_thread() is threading.main_thread()
else:
return isinstance(threading.current_thread(), threading._MainThread)
def get_main_thread():
if hasattr(threading, 'main_thread'):
return threading.main_thread()
else:
for t in threading.enumerate():
if isinstance(t, threading._MainThread):
return t
return None
def to_number(x):
if is_string(x):
try:
n = float(x)
return n
except ValueError:
pass
l = x.find('(')
if l != -1:
y = x[0:l - 1]
# print y
try:
n = float(y)
return n
except ValueError:
pass
return None
def compare_object_attrs_key(x):
if GENERATED_LEN_ATTR_NAME == x:
as_number = to_number(x)
if as_number is None:
as_number = 99999999
# len() should appear after other attributes in a list.
return (1, as_number)
else:
return (-1, to_string(x))
def is_string(x):
return isinstance(x, str)
def to_string(x):
if isinstance(x, str):
return x
else:
return str(x)
def print_exc():
if traceback:
traceback.print_exc()
def quote_smart(s, safe='/'):
return quote(s, safe)
def get_clsname_for_code(code, frame):
clsname = None
if len(code.co_varnames) > 0:
# We are checking the first argument of the function
# (`self` or `cls` for methods).
first_arg_name = code.co_varnames[0]
if first_arg_name in frame.f_locals:
first_arg_obj = frame.f_locals[first_arg_name]
if inspect.isclass(first_arg_obj): # class method
first_arg_class = first_arg_obj
else: # instance method
if hasattr(first_arg_obj, "__class__"):
first_arg_class = first_arg_obj.__class__
else: # old style class, fall back on type
first_arg_class = type(first_arg_obj)
func_name = code.co_name
if hasattr(first_arg_class, func_name):
method = getattr(first_arg_class, func_name)
func_code = None
if hasattr(method, 'func_code'): # Python2
func_code = method.func_code
elif hasattr(method, '__code__'): # Python3
func_code = method.__code__
if func_code and func_code == code:
clsname = first_arg_class.__name__
return clsname
def get_non_pydevd_threads():
threads = threading.enumerate()
return [t for t in threads if t and not getattr(t, 'is_pydev_daemon_thread', False)]
if USE_CUSTOM_SYS_CURRENT_FRAMES and IS_PYPY:
# On PyPy we can use its fake_frames to get the traceback
# (instead of the actual real frames that need the tracing to be correct).
_tid_to_frame_for_dump_threads = sys._current_frames
else:
from _pydevd_bundle.pydevd_constants import _current_frames as _tid_to_frame_for_dump_threads
def dump_threads(stream=None, show_pydevd_threads=True):
'''
Helper to dump thread info.
'''
if stream is None:
stream = sys.stderr
thread_id_to_name_and_is_pydevd_thread = {}
try:
threading_enumerate = _pydev_saved_modules.pydevd_saved_threading_enumerate
if threading_enumerate is None:
threading_enumerate = threading.enumerate
for t in threading_enumerate():
is_pydevd_thread = getattr(t, 'is_pydev_daemon_thread', False)
thread_id_to_name_and_is_pydevd_thread[t.ident] = (
'%s (daemon: %s, pydevd thread: %s)' % (t.name, t.daemon, is_pydevd_thread),
is_pydevd_thread
)
except:
pass
stream.write('===============================================================================\n')
stream.write('Threads running\n')
stream.write('================================= Thread Dump =================================\n')
stream.flush()
for thread_id, frame in _tid_to_frame_for_dump_threads().items():
name, is_pydevd_thread = thread_id_to_name_and_is_pydevd_thread.get(thread_id, (thread_id, False))
if not show_pydevd_threads and is_pydevd_thread:
continue
stream.write('\n-------------------------------------------------------------------------------\n')
stream.write(" Thread %s" % (name,))
stream.write('\n\n')
for i, (filename, lineno, name, line) in enumerate(traceback.extract_stack(frame)):
stream.write(' File "%s", line %d, in %s\n' % (filename, lineno, name))
if line:
stream.write(" %s\n" % (line.strip()))
if i == 0 and 'self' in frame.f_locals:
stream.write(' self: ')
try:
stream.write(str(frame.f_locals['self']))
except:
stream.write('Unable to get str of: %s' % (type(frame.f_locals['self']),))
stream.write('\n')
stream.flush()
stream.write('\n=============================== END Thread Dump ===============================')
stream.flush()
def _extract_variable_nested_braces(char_iter):
expression = []
level = 0
for c in char_iter:
if c == '{':
level += 1
if c == '}':
level -= 1
if level == -1:
return ''.join(expression).strip()
expression.append(c)
raise SyntaxError('Unbalanced braces in expression.')
def _extract_expression_list(log_message):
# Note: not using re because of nested braces.
expression = []
expression_vars = []
char_iter = iter(log_message)
for c in char_iter:
if c == '{':
expression_var = _extract_variable_nested_braces(char_iter)
if expression_var:
expression.append('%s')
expression_vars.append(expression_var)
else:
expression.append(c)
expression = ''.join(expression)
return expression, expression_vars
def convert_dap_log_message_to_expression(log_message):
try:
expression, expression_vars = _extract_expression_list(log_message)
except SyntaxError:
return repr('Unbalanced braces in: %s' % (log_message))
if not expression_vars:
return repr(expression)
# Note: use '%' to be compatible with Python 2.6.
return repr(expression) + ' % (' + ', '.join(str(x) for x in expression_vars) + ',)'
def notify_about_gevent_if_needed(stream=None):
'''
When debugging with gevent check that the gevent flag is used if the user uses the gevent
monkey-patching.
:return bool:
Returns True if a message had to be shown to the user and False otherwise.
'''
stream = stream if stream is not None else sys.stderr
if not SUPPORT_GEVENT:
gevent_monkey = sys.modules.get('gevent.monkey')
if gevent_monkey is not None:
try:
saved = gevent_monkey.saved
except AttributeError:
pydev_log.exception_once('Error checking for gevent monkey-patching.')
return False
if saved:
# Note: print to stderr as it may deadlock the debugger.
sys.stderr.write('%s\n' % (GEVENT_SUPPORT_NOT_SET_MSG,))
return True
return False
def hasattr_checked(obj, name):
try:
getattr(obj, name)
except:
# i.e.: Handle any exception, not only AttributeError.
return False
else:
return True
def getattr_checked(obj, name):
try:
return getattr(obj, name)
except:
# i.e.: Handle any exception, not only AttributeError.
return None
def dir_checked(obj):
try:
return dir(obj)
except:
return []
def isinstance_checked(obj, cls):
try:
return isinstance(obj, cls)
except:
return False
class ScopeRequest(object):
__slots__ = ['variable_reference', 'scope']
def __init__(self, variable_reference, scope):
assert scope in ('globals', 'locals')
self.variable_reference = variable_reference
self.scope = scope
def __eq__(self, o):
if isinstance(o, ScopeRequest):
return self.variable_reference == o.variable_reference and self.scope == o.scope
return False
def __ne__(self, o):
return not self == o
def __hash__(self):
return hash((self.variable_reference, self.scope))
class DAPGrouper(object):
'''
Note: this is a helper class to group variables on the debug adapter protocol (DAP). For
the xml protocol the type is just added to each variable and the UI can group/hide it as needed.
'''
SCOPE_SPECIAL_VARS = 'special variables'
SCOPE_PROTECTED_VARS = 'protected variables'
SCOPE_FUNCTION_VARS = 'function variables'
SCOPE_CLASS_VARS = 'class variables'
SCOPES_SORTED = [
SCOPE_SPECIAL_VARS,
SCOPE_PROTECTED_VARS,
SCOPE_FUNCTION_VARS,
SCOPE_CLASS_VARS,
]
__slots__ = ['variable_reference', 'scope', 'contents_debug_adapter_protocol']
def __init__(self, scope):
self.variable_reference = id(self)
self.scope = scope
self.contents_debug_adapter_protocol = []
def get_contents_debug_adapter_protocol(self):
return self.contents_debug_adapter_protocol[:]
def __eq__(self, o):
if isinstance(o, ScopeRequest):
return self.variable_reference == o.variable_reference and self.scope == o.scope
return False
def __ne__(self, o):
return not self == o
def __hash__(self):
return hash((self.variable_reference, self.scope))
def __repr__(self):
return ''
def __str__(self):
return ''
def interrupt_main_thread(main_thread):
'''
Generates a KeyboardInterrupt in the main thread by sending a Ctrl+C
or by calling thread.interrupt_main().
:param main_thread:
Needed because Jython needs main_thread._thread.interrupt() to be called.
Note: if unable to send a Ctrl+C, the KeyboardInterrupt will only be raised
when the next Python instruction is about to be executed (so, it won't interrupt
a sleep(1000)).
'''
pydev_log.debug('Interrupt main thread.')
called = False
try:
if os.name == 'posix':
# On Linux we can't interrupt 0 as in Windows because it's
# actually owned by a process -- on the good side, signals
# work much better on Linux!
os.kill(os.getpid(), signal.SIGINT)
called = True
elif os.name == 'nt':
# This generates a Ctrl+C only for the current process and not
# to the process group!
# Note: there doesn't seem to be any public documentation for this
# function (although it seems to be present from Windows Server 2003 SP1 onwards
# according to: https://www.geoffchappell.com/studies/windows/win32/kernel32/api/index.htm)
ctypes.windll.kernel32.CtrlRoutine(0)
# The code below is deprecated because it actually sends a Ctrl+C
# to the process group, so, if this was a process created without
# passing `CREATE_NEW_PROCESS_GROUP` the signal may be sent to the
# parent process and to sub-processes too (which is not ideal --
# for instance, when using pytest-xdist, it'll actually stop the
# testing, even when called in the subprocess).
# if hasattr_checked(signal, 'CTRL_C_EVENT'):
# os.kill(0, signal.CTRL_C_EVENT)
# else:
# # Python 2.6
# ctypes.windll.kernel32.GenerateConsoleCtrlEvent(0, 0)
called = True
except:
# If something went wrong, fallback to interrupting when the next
# Python instruction is being called.
pydev_log.exception('Error interrupting main thread (using fallback).')
if not called:
try:
# In this case, we don't really interrupt a sleep() nor IO operations
# (this makes the KeyboardInterrupt be sent only when the next Python
# instruction is about to be executed).
if hasattr(thread, 'interrupt_main'):
thread.interrupt_main()
else:
main_thread._thread.interrupt() # Jython
except:
pydev_log.exception('Error on interrupt main thread fallback.')
class Timer(object):
def __init__(self, min_diff=PYDEVD_WARN_SLOW_RESOLVE_TIMEOUT):
self.min_diff = min_diff
self._curr_time = time.time()
def print_time(self, msg='Elapsed:'):
old = self._curr_time
new = self._curr_time = time.time()
diff = new - old
if diff >= self.min_diff:
print('%s: %.2fs' % (msg, diff))
def _report_slow(self, compute_msg, *args):
old = self._curr_time
new = self._curr_time = time.time()
diff = new - old
if diff >= self.min_diff:
py_db = get_global_debugger()
if py_db is not None:
msg = compute_msg(diff, *args)
py_db.writer.add_command(py_db.cmd_factory.make_warning_message(msg))
def report_if_compute_repr_attr_slow(self, attrs_tab_separated, attr_name, attr_type):
self._report_slow(self._compute_repr_slow, attrs_tab_separated, attr_name, attr_type)
def _compute_repr_slow(self, diff, attrs_tab_separated, attr_name, attr_type):
try:
attr_type = attr_type.__name__
except:
pass
if attrs_tab_separated:
return 'pydevd warning: Computing repr of %s.%s (%s) was slow (took %.2fs)\n' % (
attrs_tab_separated.replace('\t', '.'), attr_name, attr_type, diff)
else:
return 'pydevd warning: Computing repr of %s (%s) was slow (took %.2fs)\n' % (
attr_name, attr_type, diff)
def report_if_getting_attr_slow(self, cls, attr_name):
self._report_slow(self._compute_get_attr_slow, cls, attr_name)
def _compute_get_attr_slow(self, diff, cls, attr_name):
try:
cls = cls.__name__
except:
pass
return 'pydevd warning: Getting attribute %s.%s was slow (took %.2fs)\n' % (cls, attr_name, diff)
def import_attr_from_module(import_with_attr_access):
if '.' not in import_with_attr_access:
# We need at least one '.' (we don't support just the module import, we need the attribute access too).
raise ImportError('Unable to import module with attr access: %s' % (import_with_attr_access,))
module_name, attr_name = import_with_attr_access.rsplit('.', 1)
while True:
try:
mod = import_module(module_name)
except ImportError:
if '.' not in module_name:
raise ImportError('Unable to import module with attr access: %s' % (import_with_attr_access,))
module_name, new_attr_part = module_name.rsplit('.', 1)
attr_name = new_attr_part + '.' + attr_name
else:
# Ok, we got the base module, now, get the attribute we need.
try:
for attr in attr_name.split('.'):
mod = getattr(mod, attr)
return mod
except:
raise ImportError('Unable to import module with attr access: %s' % (import_with_attr_access,))
| 17,289 | Python | 32.769531 | 111 | 0.583955 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_vars.py | """ pydevd_vars deals with variables:
resolution/conversion to XML.
"""
import pickle
from _pydevd_bundle.pydevd_constants import get_frame, get_current_thread_id, \
iter_chars, silence_warnings_decorator, get_global_debugger
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate, get_type, var_to_xml
from _pydev_bundle import pydev_log
import functools
from _pydevd_bundle.pydevd_thread_lifecycle import resume_threads, mark_thread_suspended, suspend_all_threads
from _pydevd_bundle.pydevd_comm_constants import CMD_SET_BREAK
import sys # @Reimport
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle import pydevd_save_locals, pydevd_timeout, pydevd_constants
from _pydev_bundle.pydev_imports import Exec, execfile
from _pydevd_bundle.pydevd_utils import to_string
import inspect
from _pydevd_bundle.pydevd_daemon_thread import PyDBDaemonThread
from _pydevd_bundle.pydevd_save_locals import update_globals_and_locals
from functools import lru_cache
SENTINEL_VALUE = []
class VariableError(RuntimeError):
pass
def iter_frames(frame):
while frame is not None:
yield frame
frame = frame.f_back
frame = None
def dump_frames(thread_id):
sys.stdout.write('dumping frames\n')
if thread_id != get_current_thread_id(threading.current_thread()):
raise VariableError("find_frame: must execute on same thread")
frame = get_frame()
for frame in iter_frames(frame):
sys.stdout.write('%s\n' % pickle.dumps(frame))
@silence_warnings_decorator
def getVariable(dbg, thread_id, frame_id, scope, attrs):
"""
returns the value of a variable
:scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
BY_ID means we'll traverse the list of all objects alive to get the object.
:attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:note: when BY_ID is used, the frame_id is considered the id of the object to find and
not the frame (as we don't care about the frame in this case).
"""
if scope == 'BY_ID':
if thread_id != get_current_thread_id(threading.current_thread()):
raise VariableError("getVariable: must execute on same thread")
try:
import gc
objects = gc.get_objects()
except:
pass # Not all python variants have it.
else:
frame_id = int(frame_id)
for var in objects:
if id(var) == frame_id:
if attrs is not None:
attrList = attrs.split('\t')
for k in attrList:
_type, _type_name, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
# If it didn't return previously, we coudn't find it by id (i.e.: alrceady garbage collected).
sys.stderr.write('Unable to find object with id: %s\n' % (frame_id,))
return None
frame = dbg.find_frame(thread_id, frame_id)
if frame is None:
return {}
if attrs is not None:
attrList = attrs.split('\t')
else:
attrList = []
for attr in attrList:
attr.replace("@_@TAB_CHAR@_@", '\t')
if scope == 'EXPRESSION':
for count in range(len(attrList)):
if count == 0:
# An Expression can be in any scope (globals/locals), therefore it needs to evaluated as an expression
var = evaluate_expression(dbg, frame, attrList[count], False)
else:
_type, _type_name, resolver = get_type(var)
var = resolver.resolve(var, attrList[count])
else:
if scope == "GLOBAL":
var = frame.f_globals
del attrList[0] # globals are special, and they get a single dummy unused attribute
else:
# in a frame access both locals and globals as Python does
var = {}
var.update(frame.f_globals)
var.update(frame.f_locals)
for k in attrList:
_type, _type_name, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_variable_fields(dbg, thread_id, frame_id, scope, attrs):
"""
Resolve compound variable in debugger scopes by its name and attributes
:param thread_id: id of the variable's thread
:param frame_id: id of the variable's frame
:param scope: can be BY_ID, EXPRESSION, GLOBAL, LOCAL, FRAME
:param attrs: after reaching the proper scope, we have to get the attributes until we find
the proper location (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
var = getVariable(dbg, thread_id, frame_id, scope, attrs)
try:
_type, type_name, resolver = get_type(var)
return type_name, resolver.get_dictionary(var)
except:
pydev_log.exception('Error evaluating: thread_id: %s\nframe_id: %s\nscope: %s\nattrs: %s.',
thread_id, frame_id, scope, attrs)
def resolve_var_object(var, attrs):
"""
Resolve variable's attribute
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a value of resolved variable's attribute
"""
if attrs is not None:
attr_list = attrs.split('\t')
else:
attr_list = []
for k in attr_list:
type, _type_name, resolver = get_type(var)
var = resolver.resolve(var, k)
return var
def resolve_compound_var_object_fields(var, attrs):
"""
Resolve compound variable by its object and attributes
:param var: an object of variable
:param attrs: a sequence of variable's attributes separated by \t (i.e.: obj\tattr1\tattr2)
:return: a dictionary of variables's fields
"""
attr_list = attrs.split('\t')
for k in attr_list:
type, _type_name, resolver = get_type(var)
var = resolver.resolve(var, k)
try:
type, _type_name, resolver = get_type(var)
return resolver.get_dictionary(var)
except:
pydev_log.exception()
def custom_operation(dbg, thread_id, frame_id, scope, attrs, style, code_or_file, operation_fn_name):
"""
We'll execute the code_or_file and then search in the namespace the operation_fn_name to execute with the given var.
code_or_file: either some code (i.e.: from pprint import pprint) or a file to be executed.
operation_fn_name: the name of the operation to execute after the exec (i.e.: pprint)
"""
expressionValue = getVariable(dbg, thread_id, frame_id, scope, attrs)
try:
namespace = {'__name__': '<custom_operation>'}
if style == "EXECFILE":
namespace['__file__'] = code_or_file
execfile(code_or_file, namespace, namespace)
else: # style == EXEC
namespace['__file__'] = '<customOperationCode>'
Exec(code_or_file, namespace, namespace)
return str(namespace[operation_fn_name](expressionValue))
except:
pydev_log.exception()
@lru_cache(3)
def _expression_to_evaluate(expression):
keepends = True
lines = expression.splitlines(keepends)
# find first non-empty line
chars_to_strip = 0
for line in lines:
if line.strip(): # i.e.: check first non-empty line
for c in iter_chars(line):
if c.isspace():
chars_to_strip += 1
else:
break
break
if chars_to_strip:
# I.e.: check that the chars we'll remove are really only whitespaces.
proceed = True
new_lines = []
for line in lines:
if not proceed:
break
for c in iter_chars(line[:chars_to_strip]):
if not c.isspace():
proceed = False
break
new_lines.append(line[chars_to_strip:])
if proceed:
if isinstance(expression, bytes):
expression = b''.join(new_lines)
else:
expression = u''.join(new_lines)
return expression
def eval_in_context(expression, global_vars, local_vars, py_db=None):
result = None
try:
compiled = compile_as_eval(expression)
is_async = inspect.CO_COROUTINE & compiled.co_flags == inspect.CO_COROUTINE
if is_async:
if py_db is None:
py_db = get_global_debugger()
if py_db is None:
raise RuntimeError('Cannot evaluate async without py_db.')
t = _EvalAwaitInNewEventLoop(py_db, compiled, global_vars, local_vars)
t.start()
t.join()
if t.exc:
raise t.exc[1].with_traceback(t.exc[2])
else:
result = t.evaluated_value
else:
result = eval(compiled, global_vars, local_vars)
except (Exception, KeyboardInterrupt):
etype, result, tb = sys.exc_info()
result = ExceptionOnEvaluate(result, etype, tb)
# Ok, we have the initial error message, but let's see if we're dealing with a name mangling error...
try:
if '.__' in expression:
# Try to handle '__' name mangling (for simple cases such as self.__variable.__another_var).
split = expression.split('.')
entry = split[0]
if local_vars is None:
local_vars = global_vars
curr = local_vars[entry] # Note: we want the KeyError if it's not there.
for entry in split[1:]:
if entry.startswith('__') and not hasattr(curr, entry):
entry = '_%s%s' % (curr.__class__.__name__, entry)
curr = getattr(curr, entry)
result = curr
except:
pass
return result
def _run_with_interrupt_thread(original_func, py_db, curr_thread, frame, expression, is_exec):
on_interrupt_threads = None
timeout_tracker = py_db.timeout_tracker # : :type timeout_tracker: TimeoutTracker
interrupt_thread_timeout = pydevd_constants.PYDEVD_INTERRUPT_THREAD_TIMEOUT
if interrupt_thread_timeout > 0:
on_interrupt_threads = pydevd_timeout.create_interrupt_this_thread_callback()
pydev_log.info('Doing evaluate with interrupt threads timeout: %s.', interrupt_thread_timeout)
if on_interrupt_threads is None:
return original_func(py_db, frame, expression, is_exec)
else:
with timeout_tracker.call_on_timeout(interrupt_thread_timeout, on_interrupt_threads):
return original_func(py_db, frame, expression, is_exec)
def _run_with_unblock_threads(original_func, py_db, curr_thread, frame, expression, is_exec):
on_timeout_unblock_threads = None
timeout_tracker = py_db.timeout_tracker # : :type timeout_tracker: TimeoutTracker
if py_db.multi_threads_single_notification:
unblock_threads_timeout = pydevd_constants.PYDEVD_UNBLOCK_THREADS_TIMEOUT
else:
unblock_threads_timeout = -1 # Don't use this if threads are managed individually.
if unblock_threads_timeout >= 0:
pydev_log.info('Doing evaluate with unblock threads timeout: %s.', unblock_threads_timeout)
tid = get_current_thread_id(curr_thread)
def on_timeout_unblock_threads():
on_timeout_unblock_threads.called = True
pydev_log.info('Resuming threads after evaluate timeout.')
resume_threads('*', except_thread=curr_thread)
py_db.threads_suspended_single_notification.on_thread_resume(tid)
on_timeout_unblock_threads.called = False
try:
if on_timeout_unblock_threads is None:
return _run_with_interrupt_thread(original_func, py_db, curr_thread, frame, expression, is_exec)
else:
with timeout_tracker.call_on_timeout(unblock_threads_timeout, on_timeout_unblock_threads):
return _run_with_interrupt_thread(original_func, py_db, curr_thread, frame, expression, is_exec)
finally:
if on_timeout_unblock_threads is not None and on_timeout_unblock_threads.called:
mark_thread_suspended(curr_thread, CMD_SET_BREAK)
py_db.threads_suspended_single_notification.increment_suspend_time()
suspend_all_threads(py_db, except_thread=curr_thread)
py_db.threads_suspended_single_notification.on_thread_suspend(tid, CMD_SET_BREAK)
def _evaluate_with_timeouts(original_func):
'''
Provides a decorator that wraps the original evaluate to deal with slow evaluates.
If some evaluation is too slow, we may show a message, resume threads or interrupt them
as needed (based on the related configurations).
'''
@functools.wraps(original_func)
def new_func(py_db, frame, expression, is_exec):
if py_db is None:
# Only for testing...
pydev_log.critical('_evaluate_with_timeouts called without py_db!')
return original_func(py_db, frame, expression, is_exec)
warn_evaluation_timeout = pydevd_constants.PYDEVD_WARN_EVALUATION_TIMEOUT
curr_thread = threading.current_thread()
def on_warn_evaluation_timeout():
py_db.writer.add_command(py_db.cmd_factory.make_evaluation_timeout_msg(
py_db, expression, curr_thread))
timeout_tracker = py_db.timeout_tracker # : :type timeout_tracker: TimeoutTracker
with timeout_tracker.call_on_timeout(warn_evaluation_timeout, on_warn_evaluation_timeout):
return _run_with_unblock_threads(original_func, py_db, curr_thread, frame, expression, is_exec)
return new_func
_ASYNC_COMPILE_FLAGS = None
try:
from ast import PyCF_ALLOW_TOP_LEVEL_AWAIT
_ASYNC_COMPILE_FLAGS = PyCF_ALLOW_TOP_LEVEL_AWAIT
except:
pass
def compile_as_eval(expression):
'''
:param expression:
The expression to be _compiled.
:return: code object
:raises Exception if the expression cannot be evaluated.
'''
expression_to_evaluate = _expression_to_evaluate(expression)
if _ASYNC_COMPILE_FLAGS is not None:
return compile(expression_to_evaluate, '<string>', 'eval', _ASYNC_COMPILE_FLAGS)
else:
return compile(expression_to_evaluate, '<string>', 'eval')
def _compile_as_exec(expression):
'''
:param expression:
The expression to be _compiled.
:return: code object
:raises Exception if the expression cannot be evaluated.
'''
expression_to_evaluate = _expression_to_evaluate(expression)
if _ASYNC_COMPILE_FLAGS is not None:
return compile(expression_to_evaluate, '<string>', 'exec', _ASYNC_COMPILE_FLAGS)
else:
return compile(expression_to_evaluate, '<string>', 'exec')
class _EvalAwaitInNewEventLoop(PyDBDaemonThread):
def __init__(self, py_db, compiled, updated_globals, updated_locals):
PyDBDaemonThread.__init__(self, py_db)
self._compiled = compiled
self._updated_globals = updated_globals
self._updated_locals = updated_locals
# Output
self.evaluated_value = None
self.exc = None
async def _async_func(self):
return await eval(self._compiled, self._updated_locals, self._updated_globals)
def _on_run(self):
try:
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.evaluated_value = asyncio.run(self._async_func())
except:
self.exc = sys.exc_info()
@_evaluate_with_timeouts
def evaluate_expression(py_db, frame, expression, is_exec):
'''
:param str expression:
The expression to be evaluated.
Note that if the expression is indented it's automatically dedented (based on the indentation
found on the first non-empty line).
i.e.: something as:
`
def method():
a = 1
`
becomes:
`
def method():
a = 1
`
Also, it's possible to evaluate calls with a top-level await (currently this is done by
creating a new event loop in a new thread and making the evaluate at that thread -- note
that this is still done synchronously so the evaluation has to finish before this
function returns).
:param is_exec: determines if we should do an exec or an eval.
There are some changes in this function depending on whether it's an exec or an eval.
When it's an exec (i.e.: is_exec==True):
This function returns None.
Any exception that happens during the evaluation is reraised.
If the expression could actually be evaluated, the variable is printed to the console if not None.
When it's an eval (i.e.: is_exec==False):
This function returns the result from the evaluation.
If some exception happens in this case, the exception is caught and a ExceptionOnEvaluate is returned.
Also, in this case we try to resolve name-mangling (i.e.: to be able to add a self.__my_var watch).
:param py_db:
The debugger. Only needed if some top-level await is detected (for creating a
PyDBDaemonThread).
'''
if frame is None:
return
# This is very tricky. Some statements can change locals and use them in the same
# call (see https://github.com/microsoft/debugpy/issues/815), also, if locals and globals are
# passed separately, it's possible that one gets updated but apparently Python will still
# try to load from the other, so, what's done is that we merge all in a single dict and
# then go on and update the frame with the results afterwards.
# -- see tests in test_evaluate_expression.py
# This doesn't work because the variables aren't updated in the locals in case the
# evaluation tries to set a variable and use it in the same expression.
# updated_globals = frame.f_globals
# updated_locals = frame.f_locals
# This doesn't work because the variables aren't updated in the locals in case the
# evaluation tries to set a variable and use it in the same expression.
# updated_globals = {}
# updated_globals.update(frame.f_globals)
# updated_globals.update(frame.f_locals)
#
# updated_locals = frame.f_locals
# This doesn't work either in the case where the evaluation tries to set a variable and use
# it in the same expression (I really don't know why as it seems like this *should* work
# in theory but doesn't in practice).
# updated_globals = {}
# updated_globals.update(frame.f_globals)
#
# updated_locals = {}
# updated_globals.update(frame.f_locals)
# This is the only case that worked consistently to run the tests in test_evaluate_expression.py
# It's a bit unfortunate because although the exec works in this case, we have to manually
# put the updates in the frame locals afterwards.
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals)
initial_globals = updated_globals.copy()
updated_locals = None
try:
expression = expression.replace('@LINE@', '\n')
if is_exec:
try:
# Try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile_as_eval(expression)
except Exception:
compiled = None
if compiled is None:
try:
compiled = _compile_as_exec(expression)
is_async = inspect.CO_COROUTINE & compiled.co_flags == inspect.CO_COROUTINE
if is_async:
t = _EvalAwaitInNewEventLoop(py_db, compiled, updated_globals, updated_locals)
t.start()
t.join()
if t.exc:
raise t.exc[1].with_traceback(t.exc[2])
else:
Exec(compiled, updated_globals, updated_locals)
finally:
# Update the globals even if it errored as it may have partially worked.
update_globals_and_locals(updated_globals, initial_globals, frame)
else:
is_async = inspect.CO_COROUTINE & compiled.co_flags == inspect.CO_COROUTINE
if is_async:
t = _EvalAwaitInNewEventLoop(py_db, compiled, updated_globals, updated_locals)
t.start()
t.join()
if t.exc:
raise t.exc[1].with_traceback(t.exc[2])
else:
result = t.evaluated_value
else:
result = eval(compiled, updated_globals, updated_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
ret = eval_in_context(expression, updated_globals, updated_locals, py_db)
try:
is_exception_returned = ret.__class__ == ExceptionOnEvaluate
except:
pass
else:
if not is_exception_returned:
# i.e.: by using a walrus assignment (:=), expressions can change the locals,
# so, make sure that we save the locals back to the frame.
update_globals_and_locals(updated_globals, initial_globals, frame)
return ret
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del updated_locals
del initial_globals
del frame
def change_attr_expression(frame, attr, expression, dbg, value=SENTINEL_VALUE):
'''Changes some attribute in a given frame.
'''
if frame is None:
return
try:
expression = expression.replace('@LINE@', '\n')
if dbg.plugin and value is SENTINEL_VALUE:
result = dbg.plugin.change_variable(frame, attr, expression)
if result:
return result
if attr[:7] == "Globals":
attr = attr[8:]
if attr in frame.f_globals:
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_globals[attr] = value
return frame.f_globals[attr]
else:
if '.' not in attr: # i.e.: if we have a '.', we're changing some attribute of a local var.
if pydevd_save_locals.is_save_locals_available():
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
frame.f_locals[attr] = value
pydevd_save_locals.save_locals(frame)
return frame.f_locals[attr]
# i.e.: case with '.' or save locals not available (just exec the assignment in the frame).
if value is SENTINEL_VALUE:
value = eval(expression, frame.f_globals, frame.f_locals)
result = value
Exec('%s=%s' % (attr, expression), frame.f_globals, frame.f_locals)
return result
except Exception:
pydev_log.exception()
MAXIMUM_ARRAY_SIZE = 100
MAX_SLICE_SIZE = 1000
def table_like_struct_to_xml(array, name, roffset, coffset, rows, cols, format):
_, type_name, _ = get_type(array)
if type_name == 'ndarray':
array, metaxml, r, c, f = array_to_meta_xml(array, name, format)
xml = metaxml
format = '%' + f
if rows == -1 and cols == -1:
rows = r
cols = c
xml += array_to_xml(array, roffset, coffset, rows, cols, format)
elif type_name == 'DataFrame':
xml = dataframe_to_xml(array, name, roffset, coffset, rows, cols, format)
else:
raise VariableError("Do not know how to convert type %s to table" % (type_name))
return "<xml>%s</xml>" % xml
def array_to_xml(array, roffset, coffset, rows, cols, format):
xml = ""
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(cols, MAXIMUM_ARRAY_SIZE)
# there is no obvious rule for slicing (at least 5 choices)
if len(array) == 1 and (rows > 1 or cols > 1):
array = array[0]
if array.size > len(array):
array = array[roffset:, coffset:]
rows = min(rows, len(array))
cols = min(cols, len(array[0]))
if len(array) == 1:
array = array[0]
elif array.size == len(array):
if roffset == 0 and rows == 1:
array = array[coffset:]
cols = min(cols, len(array))
elif coffset == 0 and cols == 1:
array = array[roffset:]
rows = min(rows, len(array))
xml += "<arraydata rows=\"%s\" cols=\"%s\"/>" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>" % to_string(row)
for col in range(cols):
value = array
if rows == 1 or cols == 1:
if rows == 1 and cols == 1:
value = array[0]
else:
if rows == 1:
dim = col
else:
dim = row
value = array[dim]
if "ndarray" in str(type(value)):
value = value[0]
else:
value = array[row][col]
value = format % value
xml += var_to_xml(value, '')
return xml
def array_to_meta_xml(array, name, format):
type = array.dtype.kind
slice = name
l = len(array.shape)
# initial load, compute slice
if format == '%':
if l > 2:
slice += '[0]' * (l - 2)
for r in range(l - 2):
array = array[0]
if type == 'f':
format = '.5f'
elif type == 'i' or type == 'u':
format = 'd'
else:
format = 's'
else:
format = format.replace('%', '')
l = len(array.shape)
reslice = ""
if l > 2:
raise Exception("%s has more than 2 dimensions." % slice)
elif l == 1:
# special case with 1D arrays arr[i, :] - row, but arr[:, i] - column with equal shape and ndim
# http://stackoverflow.com/questions/16837946/numpy-a-2-rows-1-column-file-loadtxt-returns-1row-2-columns
# explanation: http://stackoverflow.com/questions/15165170/how-do-i-maintain-row-column-orientation-of-vectors-in-numpy?rq=1
# we use kind of a hack - get information about memory from C_CONTIGUOUS
is_row = array.flags['C_CONTIGUOUS']
if is_row:
rows = 1
cols = min(len(array), MAX_SLICE_SIZE)
if cols < len(array):
reslice = '[0:%s]' % (cols)
array = array[0:cols]
else:
cols = 1
rows = min(len(array), MAX_SLICE_SIZE)
if rows < len(array):
reslice = '[0:%s]' % (rows)
array = array[0:rows]
elif l == 2:
rows = min(array.shape[-2], MAX_SLICE_SIZE)
cols = min(array.shape[-1], MAX_SLICE_SIZE)
if cols < array.shape[-1] or rows < array.shape[-2]:
reslice = '[0:%s, 0:%s]' % (rows, cols)
array = array[0:rows, 0:cols]
# avoid slice duplication
if not slice.endswith(reslice):
slice += reslice
bounds = (0, 0)
if type in "biufc":
bounds = (array.min(), array.max())
xml = '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"%s\" type=\"%s\" max=\"%s\" min=\"%s\"/>' % \
(slice, rows, cols, format, type, bounds[1], bounds[0])
return array, xml, rows, cols, format
def dataframe_to_xml(df, name, roffset, coffset, rows, cols, format):
"""
:type df: pandas.core.frame.DataFrame
:type name: str
:type coffset: int
:type roffset: int
:type rows: int
:type cols: int
:type format: str
"""
num_rows = min(df.shape[0], MAX_SLICE_SIZE)
num_cols = min(df.shape[1], MAX_SLICE_SIZE)
if (num_rows, num_cols) != df.shape:
df = df.iloc[0:num_rows, 0: num_cols]
slice = '.iloc[0:%s, 0:%s]' % (num_rows, num_cols)
else:
slice = ''
slice = name + slice
xml = '<array slice=\"%s\" rows=\"%s\" cols=\"%s\" format=\"\" type=\"\" max=\"0\" min=\"0\"/>\n' % \
(slice, num_rows, num_cols)
if (rows, cols) == (-1, -1):
rows, cols = num_rows, num_cols
rows = min(rows, MAXIMUM_ARRAY_SIZE)
cols = min(min(cols, MAXIMUM_ARRAY_SIZE), num_cols)
# need to precompute column bounds here before slicing!
col_bounds = [None] * cols
for col in range(cols):
dtype = df.dtypes.iloc[coffset + col].kind
if dtype in "biufc":
cvalues = df.iloc[:, coffset + col]
bounds = (cvalues.min(), cvalues.max())
else:
bounds = (0, 0)
col_bounds[col] = bounds
df = df.iloc[roffset: roffset + rows, coffset: coffset + cols]
rows, cols = df.shape
xml += "<headerdata rows=\"%s\" cols=\"%s\">\n" % (rows, cols)
format = format.replace('%', '')
col_formats = []
get_label = lambda label: str(label) if not isinstance(label, tuple) else '/'.join(map(str, label))
for col in range(cols):
dtype = df.dtypes.iloc[col].kind
if dtype == 'f' and format:
fmt = format
elif dtype == 'f':
fmt = '.5f'
elif dtype == 'i' or dtype == 'u':
fmt = 'd'
else:
fmt = 's'
col_formats.append('%' + fmt)
bounds = col_bounds[col]
xml += '<colheader index=\"%s\" label=\"%s\" type=\"%s\" format=\"%s\" max=\"%s\" min=\"%s\" />\n' % \
(str(col), get_label(df.axes[1].values[col]), dtype, fmt, bounds[1], bounds[0])
for row, label in enumerate(iter(df.axes[0])):
xml += "<rowheader index=\"%s\" label = \"%s\"/>\n" % \
(str(row), get_label(label))
xml += "</headerdata>\n"
xml += "<arraydata rows=\"%s\" cols=\"%s\"/>\n" % (rows, cols)
for row in range(rows):
xml += "<row index=\"%s\"/>\n" % str(row)
for col in range(cols):
value = df.iat[row, col]
value = col_formats[col] % value
xml += var_to_xml(value, '')
return xml
| 30,786 | Python | 35.65119 | 132 | 0.5878 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_defaults.py | '''
This module holds the customization settings for the debugger.
'''
from _pydevd_bundle.pydevd_constants import QUOTED_LINE_PROTOCOL
class PydevdCustomization(object):
DEFAULT_PROTOCOL = QUOTED_LINE_PROTOCOL
| 217 | Python | 23.22222 | 64 | 0.788018 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_signature.py | from _pydev_bundle import pydev_log
try:
import trace
except ImportError:
pass
else:
trace._warn = lambda *args: None # workaround for http://bugs.python.org/issue17143 (PY-8706)
import os
from _pydevd_bundle.pydevd_comm import CMD_SIGNATURE_CALL_TRACE, NetCommand
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle.pydevd_utils import get_clsname_for_code
class Signature(object):
def __init__(self, file, name):
self.file = file
self.name = name
self.args = []
self.args_str = []
self.return_type = None
def add_arg(self, name, type):
self.args.append((name, type))
self.args_str.append("%s:%s" % (name, type))
def set_args(self, frame, recursive=False):
self.args = []
code = frame.f_code
locals = frame.f_locals
for i in range(0, code.co_argcount):
name = code.co_varnames[i]
class_name = get_type_of_value(locals[name], recursive=recursive)
self.add_arg(name, class_name)
def __str__(self):
return "%s %s(%s)" % (self.file, self.name, ", ".join(self.args_str))
def get_type_of_value(value, ignore_module_name=('__main__', '__builtin__', 'builtins'), recursive=False):
tp = type(value)
class_name = tp.__name__
if class_name == 'instance': # old-style classes
tp = value.__class__
class_name = tp.__name__
if hasattr(tp, '__module__') and tp.__module__ and tp.__module__ not in ignore_module_name:
class_name = "%s.%s" % (tp.__module__, class_name)
if class_name == 'list':
class_name = 'List'
if len(value) > 0 and recursive:
class_name += '[%s]' % get_type_of_value(value[0], recursive=recursive)
return class_name
if class_name == 'dict':
class_name = 'Dict'
if len(value) > 0 and recursive:
for (k, v) in value.items():
class_name += '[%s, %s]' % (get_type_of_value(k, recursive=recursive),
get_type_of_value(v, recursive=recursive))
break
return class_name
if class_name == 'tuple':
class_name = 'Tuple'
if len(value) > 0 and recursive:
class_name += '['
class_name += ', '.join(get_type_of_value(v, recursive=recursive) for v in value)
class_name += ']'
return class_name
def _modname(path):
"""Return a plausible module name for the path"""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
class SignatureFactory(object):
def __init__(self):
self._caller_cache = {}
self.cache = CallSignatureCache()
def create_signature(self, frame, filename, with_args=True):
try:
_, modulename, funcname = self.file_module_function_of(frame)
signature = Signature(filename, funcname)
if with_args:
signature.set_args(frame, recursive=True)
return signature
except:
pydev_log.exception()
def file_module_function_of(self, frame): # this code is take from trace module and fixed to work with new-style classes
code = frame.f_code
filename = code.co_filename
if filename:
modulename = _modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
clsname = get_clsname_for_code(code, frame)
if clsname is not None:
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def get_signature_info(signature):
return signature.file, signature.name, ' '.join([arg[1] for arg in signature.args])
def get_frame_info(frame):
co = frame.f_code
return co.co_name, frame.f_lineno, co.co_filename
class CallSignatureCache(object):
def __init__(self):
self.cache = {}
def add(self, signature):
filename, name, args_type = get_signature_info(signature)
calls_from_file = self.cache.setdefault(filename, {})
name_calls = calls_from_file.setdefault(name, {})
name_calls[args_type] = None
def is_in_cache(self, signature):
filename, name, args_type = get_signature_info(signature)
if args_type in self.cache.get(filename, {}).get(name, {}):
return True
return False
def create_signature_message(signature):
cmdTextList = ["<xml>"]
cmdTextList.append('<call_signature file="%s" name="%s">' % (pydevd_xml.make_valid_xml_value(signature.file), pydevd_xml.make_valid_xml_value(signature.name)))
for arg in signature.args:
cmdTextList.append('<arg name="%s" type="%s"></arg>' % (pydevd_xml.make_valid_xml_value(arg[0]), pydevd_xml.make_valid_xml_value(arg[1])))
if signature.return_type is not None:
cmdTextList.append('<return type="%s"></return>' % (pydevd_xml.make_valid_xml_value(signature.return_type)))
cmdTextList.append("</call_signature></xml>")
cmdText = ''.join(cmdTextList)
return NetCommand(CMD_SIGNATURE_CALL_TRACE, 0, cmdText)
def send_signature_call_trace(dbg, frame, filename):
if dbg.signature_factory and dbg.in_project_scope(frame):
signature = dbg.signature_factory.create_signature(frame, filename)
if signature is not None:
if dbg.signature_factory.cache is not None:
if not dbg.signature_factory.cache.is_in_cache(signature):
dbg.signature_factory.cache.add(signature)
dbg.writer.add_command(create_signature_message(signature))
return True
else:
# we don't send signature if it is cached
return False
else:
dbg.writer.add_command(create_signature_message(signature))
return True
return False
def send_signature_return_trace(dbg, frame, filename, return_value):
if dbg.signature_factory and dbg.in_project_scope(frame):
signature = dbg.signature_factory.create_signature(frame, filename, with_args=False)
signature.return_type = get_type_of_value(return_value, recursive=True)
dbg.writer.add_command(create_signature_message(signature))
return True
return False
| 6,883 | Python | 33.079208 | 163 | 0.599593 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_timeout.py | from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_daemon_thread import PyDBDaemonThread
from _pydevd_bundle.pydevd_constants import thread_get_ident, IS_CPYTHON, NULL
import ctypes
import time
from _pydev_bundle import pydev_log
import weakref
from _pydevd_bundle.pydevd_utils import is_current_thread_main_thread
from _pydevd_bundle import pydevd_utils
_DEBUG = False # Default should be False as this can be very verbose.
class _TimeoutThread(PyDBDaemonThread):
'''
The idea in this class is that it should be usually stopped waiting
for the next event to be called (paused in a threading.Event.wait).
When a new handle is added it sets the event so that it processes the handles and
then keeps on waiting as needed again.
This is done so that it's a bit more optimized than creating many Timer threads.
'''
def __init__(self, py_db):
PyDBDaemonThread.__init__(self, py_db)
self._event = threading.Event()
self._handles = []
# We could probably do things valid without this lock so that it's possible to add
# handles while processing, but the implementation would also be harder to follow,
# so, for now, we're either processing or adding handles, not both at the same time.
self._lock = threading.Lock()
def _on_run(self):
wait_time = None
while not self._kill_received:
if _DEBUG:
if wait_time is None:
pydev_log.critical('pydevd_timeout: Wait until a new handle is added.')
else:
pydev_log.critical('pydevd_timeout: Next wait time: %s.', wait_time)
self._event.wait(wait_time)
if self._kill_received:
self._handles = []
return
wait_time = self.process_handles()
def process_handles(self):
'''
:return int:
Returns the time we should be waiting for to process the next event properly.
'''
with self._lock:
if _DEBUG:
pydev_log.critical('pydevd_timeout: Processing handles')
self._event.clear()
handles = self._handles
new_handles = self._handles = []
# Do all the processing based on this time (we want to consider snapshots
# of processing time -- anything not processed now may be processed at the
# next snapshot).
curtime = time.time()
min_handle_timeout = None
for handle in handles:
if curtime < handle.abs_timeout and not handle.disposed:
# It still didn't time out.
if _DEBUG:
pydev_log.critical('pydevd_timeout: Handle NOT processed: %s', handle)
new_handles.append(handle)
if min_handle_timeout is None:
min_handle_timeout = handle.abs_timeout
elif handle.abs_timeout < min_handle_timeout:
min_handle_timeout = handle.abs_timeout
else:
if _DEBUG:
pydev_log.critical('pydevd_timeout: Handle processed: %s', handle)
# Timed out (or disposed), so, let's execute it (should be no-op if disposed).
handle.exec_on_timeout()
if min_handle_timeout is None:
return None
else:
timeout = min_handle_timeout - curtime
if timeout <= 0:
pydev_log.critical('pydevd_timeout: Expected timeout to be > 0. Found: %s', timeout)
return timeout
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
with self._lock:
self._event.set()
def add_on_timeout_handle(self, handle):
with self._lock:
self._handles.append(handle)
self._event.set()
class _OnTimeoutHandle(object):
def __init__(self, tracker, abs_timeout, on_timeout, kwargs):
self._str = '_OnTimeoutHandle(%s)' % (on_timeout,)
self._tracker = weakref.ref(tracker)
self.abs_timeout = abs_timeout
self.on_timeout = on_timeout
if kwargs is None:
kwargs = {}
self.kwargs = kwargs
self.disposed = False
def exec_on_timeout(self):
# Note: lock should already be obtained when executing this function.
kwargs = self.kwargs
on_timeout = self.on_timeout
if not self.disposed:
self.disposed = True
self.kwargs = None
self.on_timeout = None
try:
if _DEBUG:
pydev_log.critical('pydevd_timeout: Calling on timeout: %s with kwargs: %s', on_timeout, kwargs)
on_timeout(**kwargs)
except Exception:
pydev_log.exception('pydevd_timeout: Exception on callback timeout.')
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
tracker = self._tracker()
if tracker is None:
lock = NULL
else:
lock = tracker._lock
with lock:
self.disposed = True
self.kwargs = None
self.on_timeout = None
def __str__(self):
return self._str
__repr__ = __str__
class TimeoutTracker(object):
'''
This is a helper class to track the timeout of something.
'''
def __init__(self, py_db):
self._thread = None
self._lock = threading.Lock()
self._py_db = weakref.ref(py_db)
def call_on_timeout(self, timeout, on_timeout, kwargs=None):
'''
This can be called regularly to always execute the given function after a given timeout:
call_on_timeout(py_db, 10, on_timeout)
Or as a context manager to stop the method from being called if it finishes before the timeout
elapses:
with call_on_timeout(py_db, 10, on_timeout):
...
Note: the callback will be called from a PyDBDaemonThread.
'''
with self._lock:
if self._thread is None:
if _DEBUG:
pydev_log.critical('pydevd_timeout: Created _TimeoutThread.')
self._thread = _TimeoutThread(self._py_db())
self._thread.start()
curtime = time.time()
handle = _OnTimeoutHandle(self, curtime + timeout, on_timeout, kwargs)
if _DEBUG:
pydev_log.critical('pydevd_timeout: Added handle: %s.', handle)
self._thread.add_on_timeout_handle(handle)
return handle
def create_interrupt_this_thread_callback():
'''
The idea here is returning a callback that when called will generate a KeyboardInterrupt
in the thread that called this function.
If this is the main thread, this means that it'll emulate a Ctrl+C (which may stop I/O
and sleep operations).
For other threads, this will call PyThreadState_SetAsyncExc to raise
a KeyboardInterrupt before the next instruction (so, it won't really interrupt I/O or
sleep operations).
:return callable:
Returns a callback that will interrupt the current thread (this may be called
from an auxiliary thread).
'''
tid = thread_get_ident()
if is_current_thread_main_thread():
main_thread = threading.current_thread()
def raise_on_this_thread():
pydev_log.debug('Callback to interrupt main thread.')
pydevd_utils.interrupt_main_thread(main_thread)
else:
# Note: this works in the sense that it can stop some cpu-intensive slow operation,
# but we can't really interrupt the thread out of some sleep or I/O operation
# (this will only be raised when Python is about to execute the next instruction).
def raise_on_this_thread():
if IS_CPYTHON:
pydev_log.debug('Interrupt thread: %s', tid)
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), ctypes.py_object(KeyboardInterrupt))
else:
pydev_log.debug('It is only possible to interrupt non-main threads in CPython.')
return raise_on_this_thread
| 8,366 | Python | 33.8625 | 116 | 0.590485 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_plugin_utils.py | import types
from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_trace_api
try:
from pydevd_plugins import django_debug
except:
django_debug = None
pydev_log.debug('Unable to load django_debug plugin')
try:
from pydevd_plugins import jinja2_debug
except:
jinja2_debug = None
pydev_log.debug('Unable to load jinja2_debug plugin')
def load_plugins():
plugins = []
if django_debug is not None:
plugins.append(django_debug)
if jinja2_debug is not None:
plugins.append(jinja2_debug)
return plugins
def bind_func_to_method(func, obj, method_name):
bound_method = types.MethodType(func, obj)
setattr(obj, method_name, bound_method)
return bound_method
class PluginManager(object):
def __init__(self, main_debugger):
self.plugins = load_plugins()
self.active_plugins = []
self.main_debugger = main_debugger
self.rebind_methods()
def add_breakpoint(self, func_name, *args, **kwargs):
# add breakpoint for plugin and remember which plugin to use in tracing
for plugin in self.plugins:
if hasattr(plugin, func_name):
func = getattr(plugin, func_name)
result = func(self, *args, **kwargs)
if result:
self.activate(plugin)
return result
return None
def activate(self, plugin):
if plugin not in self.active_plugins:
self.active_plugins.append(plugin)
self.rebind_methods()
def rebind_methods(self):
if len(self.active_plugins) == 0:
self.bind_functions(pydevd_trace_api, getattr, pydevd_trace_api)
elif len(self.active_plugins) == 1:
self.bind_functions(pydevd_trace_api, getattr, self.active_plugins[0])
else:
self.bind_functions(pydevd_trace_api, create_dispatch, self.active_plugins)
def bind_functions(self, interface, function_factory, arg):
for name in dir(interface):
func = function_factory(arg, name)
if type(func) == types.FunctionType:
bind_func_to_method(func, self, name)
def create_dispatch(obj, name):
def dispatch(self, *args, **kwargs):
result = None
for p in self.active_plugins:
r = getattr(p, name)(self, *args, **kwargs)
if not result:
result = r
return result
return dispatch
| 2,484 | Python | 26.010869 | 87 | 0.615539 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_extension_utils.py | import pkgutil
import sys
from _pydev_bundle import pydev_log
try:
import pydevd_plugins.extensions as extensions
except:
pydev_log.exception()
extensions = None
class ExtensionManager(object):
def __init__(self):
self.loaded_extensions = None
self.type_to_instance = {}
def _load_modules(self):
self.loaded_extensions = []
if extensions:
for module_loader, name, ispkg in pkgutil.walk_packages(extensions.__path__,
extensions.__name__ + '.'):
mod_name = name.split('.')[-1]
if not ispkg and mod_name.startswith('pydevd_plugin'):
try:
__import__(name)
module = sys.modules[name]
self.loaded_extensions.append(module)
except ImportError:
pydev_log.critical('Unable to load extension: %s', name)
def _ensure_loaded(self):
if self.loaded_extensions is None:
self._load_modules()
def _iter_attr(self):
for extension in self.loaded_extensions:
dunder_all = getattr(extension, '__all__', None)
for attr_name in dir(extension):
if not attr_name.startswith('_'):
if dunder_all is None or attr_name in dunder_all:
yield attr_name, getattr(extension, attr_name)
def get_extension_classes(self, extension_type):
self._ensure_loaded()
if extension_type in self.type_to_instance:
return self.type_to_instance[extension_type]
handlers = self.type_to_instance.setdefault(extension_type, [])
for attr_name, attr in self._iter_attr():
if isinstance(attr, type) and issubclass(attr, extension_type) and attr is not extension_type:
try:
handlers.append(attr())
except:
pydev_log.exception('Unable to load extension class: %s', attr_name)
return handlers
EXTENSION_MANAGER_INSTANCE = ExtensionManager()
def extensions_of_type(extension_type):
"""
:param T extension_type: The type of the extension hook
:rtype: list[T]
"""
return EXTENSION_MANAGER_INSTANCE.get_extension_classes(extension_type)
| 2,369 | Python | 33.852941 | 106 | 0.56775 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_command_line_handling.py | import os
class ArgHandlerWithParam:
'''
Handler for some arguments which needs a value
'''
def __init__(self, arg_name, convert_val=None, default_val=None):
self.arg_name = arg_name
self.arg_v_rep = '--%s' % (arg_name,)
self.convert_val = convert_val
self.default_val = default_val
def to_argv(self, lst, setup):
v = setup.get(self.arg_name)
if v is not None and v != self.default_val:
lst.append(self.arg_v_rep)
lst.append('%s' % (v,))
def handle_argv(self, argv, i, setup):
assert argv[i] == self.arg_v_rep
del argv[i]
val = argv[i]
if self.convert_val:
val = self.convert_val(val)
setup[self.arg_name] = val
del argv[i]
class ArgHandlerBool:
'''
If a given flag is received, mark it as 'True' in setup.
'''
def __init__(self, arg_name, default_val=False):
self.arg_name = arg_name
self.arg_v_rep = '--%s' % (arg_name,)
self.default_val = default_val
def to_argv(self, lst, setup):
v = setup.get(self.arg_name)
if v:
lst.append(self.arg_v_rep)
def handle_argv(self, argv, i, setup):
assert argv[i] == self.arg_v_rep
del argv[i]
setup[self.arg_name] = True
def convert_ppid(ppid):
ret = int(ppid)
if ret != 0:
if ret == os.getpid():
raise AssertionError(
'ppid passed is the same as the current process pid (%s)!' % (ret,))
return ret
ACCEPTED_ARG_HANDLERS = [
ArgHandlerWithParam('port', int, 0),
ArgHandlerWithParam('ppid', convert_ppid, 0),
ArgHandlerWithParam('vm_type'),
ArgHandlerWithParam('client'),
ArgHandlerWithParam('access-token'),
ArgHandlerWithParam('client-access-token'),
ArgHandlerBool('server'),
ArgHandlerBool('DEBUG_RECORD_SOCKET_READS'),
ArgHandlerBool('multiproc'), # Used by PyCharm (reuses connection: ssh tunneling)
ArgHandlerBool('multiprocess'), # Used by PyDev (creates new connection to ide)
ArgHandlerBool('save-signatures'),
ArgHandlerBool('save-threading'),
ArgHandlerBool('save-asyncio'),
ArgHandlerBool('print-in-debugger-startup'),
ArgHandlerBool('cmd-line'),
ArgHandlerBool('module'),
ArgHandlerBool('skip-notify-stdin'),
# The ones below should've been just one setting to specify the protocol, but for compatibility
# reasons they're passed as a flag but are mutually exclusive.
ArgHandlerBool('json-dap'), # Protocol used by ptvsd to communicate with pydevd (a single json message in each read)
ArgHandlerBool('json-dap-http'), # Actual DAP (json messages over http protocol).
ArgHandlerBool('protocol-quoted-line'), # Custom protocol with quoted lines.
ArgHandlerBool('protocol-http'), # Custom protocol with http.
]
ARGV_REP_TO_HANDLER = {}
for handler in ACCEPTED_ARG_HANDLERS:
ARGV_REP_TO_HANDLER[handler.arg_v_rep] = handler
def get_pydevd_file():
import pydevd
f = pydevd.__file__
if f.endswith('.pyc'):
f = f[:-1]
elif f.endswith('$py.class'):
f = f[:-len('$py.class')] + '.py'
return f
def setup_to_argv(setup, skip_names=None):
'''
:param dict setup:
A dict previously gotten from process_command_line.
:param set skip_names:
The names in the setup which shouldn't be converted to argv.
:note: does not handle --file nor --DEBUG.
'''
if skip_names is None:
skip_names = set()
ret = [get_pydevd_file()]
for handler in ACCEPTED_ARG_HANDLERS:
if handler.arg_name in setup and handler.arg_name not in skip_names:
handler.to_argv(ret, setup)
return ret
def process_command_line(argv):
""" parses the arguments.
removes our arguments from the command line """
setup = {}
for handler in ACCEPTED_ARG_HANDLERS:
setup[handler.arg_name] = handler.default_val
setup['file'] = ''
setup['qt-support'] = ''
i = 0
del argv[0]
while i < len(argv):
handler = ARGV_REP_TO_HANDLER.get(argv[i])
if handler is not None:
handler.handle_argv(argv, i, setup)
elif argv[i].startswith('--qt-support'):
# The --qt-support is special because we want to keep backward compatibility:
# Previously, just passing '--qt-support' meant that we should use the auto-discovery mode
# whereas now, if --qt-support is passed, it should be passed as --qt-support=<mode>, where
# mode can be one of 'auto', 'none', 'pyqt5', 'pyqt4', 'pyside', 'pyside2'.
if argv[i] == '--qt-support':
setup['qt-support'] = 'auto'
elif argv[i].startswith('--qt-support='):
qt_support = argv[i][len('--qt-support='):]
valid_modes = ('none', 'auto', 'pyqt5', 'pyqt4', 'pyside', 'pyside2')
if qt_support not in valid_modes:
raise ValueError("qt-support mode invalid: " + qt_support)
if qt_support == 'none':
# On none, actually set an empty string to evaluate to False.
setup['qt-support'] = ''
else:
setup['qt-support'] = qt_support
else:
raise ValueError("Unexpected definition for qt-support flag: " + argv[i])
del argv[i]
elif argv[i] == '--file':
# --file is special because it's the last one (so, no handler for it).
del argv[i]
setup['file'] = argv[i]
i = len(argv) # pop out, file is our last argument
elif argv[i] == '--DEBUG':
from pydevd import set_debug
del argv[i]
set_debug(setup)
else:
raise ValueError("Unexpected option: " + argv[i])
return setup
| 5,906 | Python | 32 | 121 | 0.586861 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_traceproperty.py | '''For debug purpose we are replacing actual builtin property by the debug property
'''
from _pydevd_bundle.pydevd_comm import get_global_debugger
from _pydev_bundle import pydev_log
#=======================================================================================================================
# replace_builtin_property
#=======================================================================================================================
def replace_builtin_property(new_property=None):
if new_property is None:
new_property = DebugProperty
original = property
try:
import builtins
builtins.__dict__['property'] = new_property
except:
pydev_log.exception() # @Reimport
return original
#=======================================================================================================================
# DebugProperty
#=======================================================================================================================
class DebugProperty(object):
"""A custom property which allows python property to get
controlled by the debugger and selectively disable/re-enable
the tracing.
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
global_debugger = get_global_debugger()
try:
if global_debugger is not None and global_debugger.disable_property_getter_trace:
global_debugger.disable_tracing()
if self.fget is None:
raise AttributeError("unreadable attribute")
return self.fget(obj)
finally:
if global_debugger is not None:
global_debugger.enable_tracing()
def __set__(self, obj, value):
global_debugger = get_global_debugger()
try:
if global_debugger is not None and global_debugger.disable_property_setter_trace:
global_debugger.disable_tracing()
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(obj, value)
finally:
if global_debugger is not None:
global_debugger.enable_tracing()
def __delete__(self, obj):
global_debugger = get_global_debugger()
try:
if global_debugger is not None and global_debugger.disable_property_deleter_trace:
global_debugger.disable_tracing()
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(obj)
finally:
if global_debugger is not None:
global_debugger.enable_tracing()
def getter(self, fget):
"""Overriding getter decorator for the property
"""
self.fget = fget
return self
def setter(self, fset):
"""Overriding setter decorator for the property
"""
self.fset = fset
return self
def deleter(self, fdel):
"""Overriding deleter decorator for the property
"""
self.fdel = fdel
return self
| 3,279 | Python | 34.268817 | 120 | 0.511741 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command.py | from _pydevd_bundle.pydevd_constants import DebugInfoHolder, \
get_global_debugger, GetGlobalDebugger, set_global_debugger # Keep for backward compatibility @UnusedImport
from _pydevd_bundle.pydevd_utils import quote_smart as quote, to_string
from _pydevd_bundle.pydevd_comm_constants import ID_TO_MEANING, CMD_EXIT
from _pydevd_bundle.pydevd_constants import HTTP_PROTOCOL, HTTP_JSON_PROTOCOL, \
get_protocol, IS_JYTHON, ForkSafeLock
import json
from _pydev_bundle import pydev_log
class _BaseNetCommand(object):
# Command id. Should be set in instance.
id = -1
# Dict representation of the command to be set in instance. Only set for json commands.
as_dict = None
def send(self, *args, **kwargs):
pass
def call_after_send(self, callback):
pass
class _NullNetCommand(_BaseNetCommand):
pass
class _NullExitCommand(_NullNetCommand):
id = CMD_EXIT
# Constant meant to be passed to the writer when the command is meant to be ignored.
NULL_NET_COMMAND = _NullNetCommand()
# Exit command -- only internal (we don't want/need to send this to the IDE).
NULL_EXIT_COMMAND = _NullExitCommand()
class NetCommand(_BaseNetCommand):
"""
Commands received/sent over the network.
Command can represent command received from the debugger,
or one to be sent by daemon.
"""
next_seq = 0 # sequence numbers
_showing_debug_info = 0
_show_debug_info_lock = ForkSafeLock(rlock=True)
_after_send = None
def __init__(self, cmd_id, seq, text, is_json=False):
"""
If sequence is 0, new sequence will be generated (otherwise, this was the response
to a command from the client).
"""
protocol = get_protocol()
self.id = cmd_id
if seq == 0:
NetCommand.next_seq += 2
seq = NetCommand.next_seq
self.seq = seq
if is_json:
if hasattr(text, 'to_dict'):
as_dict = text.to_dict(update_ids_to_dap=True)
else:
assert isinstance(text, dict)
as_dict = text
as_dict['pydevd_cmd_id'] = cmd_id
as_dict['seq'] = seq
self.as_dict = as_dict
text = json.dumps(as_dict)
assert isinstance(text, str)
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
self._show_debug_info(cmd_id, seq, text)
if is_json:
msg = text
else:
if protocol not in (HTTP_PROTOCOL, HTTP_JSON_PROTOCOL):
encoded = quote(to_string(text), '/<>_=" \t')
msg = '%s\t%s\t%s\n' % (cmd_id, seq, encoded)
else:
msg = '%s\t%s\t%s' % (cmd_id, seq, text)
if isinstance(msg, str):
msg = msg.encode('utf-8')
assert isinstance(msg, bytes)
as_bytes = msg
self._as_bytes = as_bytes
def send(self, sock):
as_bytes = self._as_bytes
try:
if get_protocol() in (HTTP_PROTOCOL, HTTP_JSON_PROTOCOL):
sock.sendall(('Content-Length: %s\r\n\r\n' % len(as_bytes)).encode('ascii'))
sock.sendall(as_bytes)
if self._after_send:
for method in self._after_send:
method(sock)
except:
if IS_JYTHON:
# Ignore errors in sock.sendall in Jython (seems to be common for Jython to
# give spurious exceptions at interpreter shutdown here).
pass
else:
raise
def call_after_send(self, callback):
if not self._after_send:
self._after_send = [callback]
else:
self._after_send.append(callback)
@classmethod
def _show_debug_info(cls, cmd_id, seq, text):
with cls._show_debug_info_lock:
# Only one thread each time (rlock).
if cls._showing_debug_info:
# avoid recursing in the same thread (just printing could create
# a new command when redirecting output).
return
cls._showing_debug_info += 1
try:
out_message = 'sending cmd (%s) --> ' % (get_protocol(),)
out_message += "%20s" % ID_TO_MEANING.get(str(cmd_id), 'UNKNOWN')
out_message += ' '
out_message += text.replace('\n', ' ')
try:
pydev_log.critical('%s\n', out_message)
except:
pass
finally:
cls._showing_debug_info -= 1
| 4,588 | Python | 30.217687 | 112 | 0.562772 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_exec2.py | def Exec(exp, global_vars, local_vars=None):
if local_vars is not None:
exec(exp, global_vars, local_vars)
else:
exec(exp, global_vars) | 159 | Python | 30.999994 | 44 | 0.628931 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_xml.py | from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_extension_utils
from _pydevd_bundle import pydevd_resolver
import sys
from _pydevd_bundle.pydevd_constants import BUILTINS_MODULE_NAME, MAXIMUM_VARIABLE_REPRESENTATION_SIZE, \
RETURN_VALUES_DICT, LOAD_VALUES_ASYNC, DEFAULT_VALUE
from _pydev_bundle.pydev_imports import quote
from _pydevd_bundle.pydevd_extension_api import TypeResolveProvider, StrPresentationProvider
from _pydevd_bundle.pydevd_utils import isinstance_checked, hasattr_checked, DAPGrouper
from _pydevd_bundle.pydevd_resolver import get_var_scope
try:
import types
frame_type = types.FrameType
except:
frame_type = None
def make_valid_xml_value(s):
# Same thing as xml.sax.saxutils.escape but also escaping double quotes.
return s.replace("&", "&").replace('<', '<').replace('>', '>').replace('"', '"')
class ExceptionOnEvaluate:
def __init__(self, result, etype, tb):
self.result = result
self.etype = etype
self.tb = tb
_IS_JYTHON = sys.platform.startswith("java")
def _create_default_type_map():
default_type_map = [
# None means that it should not be treated as a compound variable
# isintance does not accept a tuple on some versions of python, so, we must declare it expanded
(type(None), None,),
(int, None),
(float, None),
(complex, None),
(str, None),
(tuple, pydevd_resolver.tupleResolver),
(list, pydevd_resolver.tupleResolver),
(dict, pydevd_resolver.dictResolver),
]
try:
from collections import OrderedDict
default_type_map.insert(0, (OrderedDict, pydevd_resolver.orderedDictResolver))
# we should put it before dict
except:
pass
try:
default_type_map.append((long, None)) # @UndefinedVariable
except:
pass # not available on all python versions
default_type_map.append((DAPGrouper, pydevd_resolver.dapGrouperResolver))
try:
default_type_map.append((set, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
default_type_map.append((frozenset, pydevd_resolver.setResolver))
except:
pass # not available on all python versions
try:
from django.utils.datastructures import MultiValueDict
default_type_map.insert(0, (MultiValueDict, pydevd_resolver.multiValueDictResolver))
# we should put it before dict
except:
pass # django may not be installed
try:
from django.forms import BaseForm
default_type_map.insert(0, (BaseForm, pydevd_resolver.djangoFormResolver))
# we should put it before instance resolver
except:
pass # django may not be installed
try:
from collections import deque
default_type_map.append((deque, pydevd_resolver.dequeResolver))
except:
pass
if frame_type is not None:
default_type_map.append((frame_type, pydevd_resolver.frameResolver))
if _IS_JYTHON:
from org.python import core # @UnresolvedImport
default_type_map.append((core.PyNone, None))
default_type_map.append((core.PyInteger, None))
default_type_map.append((core.PyLong, None))
default_type_map.append((core.PyFloat, None))
default_type_map.append((core.PyComplex, None))
default_type_map.append((core.PyString, None))
default_type_map.append((core.PyTuple, pydevd_resolver.tupleResolver))
default_type_map.append((core.PyList, pydevd_resolver.tupleResolver))
default_type_map.append((core.PyDictionary, pydevd_resolver.dictResolver))
default_type_map.append((core.PyStringMap, pydevd_resolver.dictResolver))
if hasattr(core, 'PyJavaInstance'):
# Jython 2.5b3 removed it.
default_type_map.append((core.PyJavaInstance, pydevd_resolver.instanceResolver))
return default_type_map
class TypeResolveHandler(object):
NO_PROVIDER = [] # Sentinel value (any mutable object to be used as a constant would be valid).
def __init__(self):
# Note: don't initialize with the types we already know about so that the extensions can override
# the default resolvers that are already available if they want.
self._type_to_resolver_cache = {}
self._type_to_str_provider_cache = {}
self._initialized = False
def _initialize(self):
self._default_type_map = _create_default_type_map()
self._resolve_providers = pydevd_extension_utils.extensions_of_type(TypeResolveProvider)
self._str_providers = pydevd_extension_utils.extensions_of_type(StrPresentationProvider)
self._initialized = True
def get_type(self, o):
try:
try:
# Faster than type(o) as we don't need the function call.
type_object = o.__class__ # could fail here
type_name = type_object.__name__
return self._get_type(o, type_object, type_name) # could fail here
except:
# Not all objects have __class__ (i.e.: there are bad bindings around).
type_object = type(o)
type_name = type_object.__name__
try:
return self._get_type(o, type_object, type_name)
except:
if isinstance(type_object, type):
# If it's still something manageable, use the default resolver, otherwise
# fallback to saying that it wasn't possible to get any info on it.
return type_object, str(type_name), pydevd_resolver.defaultResolver
return 'Unable to get Type', 'Unable to get Type', None
except:
# This happens for org.python.core.InitModule
return 'Unable to get Type', 'Unable to get Type', None
def _get_type(self, o, type_object, type_name):
# Note: we could have an exception here if the type_object is not hashable...
resolver = self._type_to_resolver_cache.get(type_object)
if resolver is not None:
return type_object, type_name, resolver
if not self._initialized:
self._initialize()
try:
for resolver in self._resolve_providers:
if resolver.can_provide(type_object, type_name):
# Cache it
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
for t in self._default_type_map:
if isinstance_checked(o, t[0]):
# Cache it
resolver = t[1]
self._type_to_resolver_cache[type_object] = resolver
return (type_object, type_name, resolver)
except:
pydev_log.exception()
# No match return default (and cache it).
resolver = pydevd_resolver.defaultResolver
self._type_to_resolver_cache[type_object] = resolver
return type_object, type_name, resolver
if _IS_JYTHON:
_base_get_type = _get_type
def _get_type(self, o, type_object, type_name):
if type_name == 'org.python.core.PyJavaInstance':
return type_object, type_name, pydevd_resolver.instanceResolver
if type_name == 'org.python.core.PyArray':
return type_object, type_name, pydevd_resolver.jyArrayResolver
return self._base_get_type(o, type_object, type_name)
def str_from_providers(self, o, type_object, type_name):
provider = self._type_to_str_provider_cache.get(type_object)
if provider is self.NO_PROVIDER:
return None
if provider is not None:
return provider.get_str(o)
if not self._initialized:
self._initialize()
for provider in self._str_providers:
if provider.can_provide(type_object, type_name):
self._type_to_str_provider_cache[type_object] = provider
try:
return provider.get_str(o)
except:
pydev_log.exception("Error when getting str with custom provider: %s." % (provider,))
self._type_to_str_provider_cache[type_object] = self.NO_PROVIDER
return None
_TYPE_RESOLVE_HANDLER = TypeResolveHandler()
"""
def get_type(o):
Receives object and returns a triple (type_object, type_string, resolver).
resolver != None means that variable is a container, and should be displayed as a hierarchy.
Use the resolver to get its attributes.
All container objects (i.e.: dict, list, tuple, object, etc) should have a resolver.
"""
get_type = _TYPE_RESOLVE_HANDLER.get_type
_str_from_providers = _TYPE_RESOLVE_HANDLER.str_from_providers
def is_builtin(x):
return getattr(x, '__module__', None) == BUILTINS_MODULE_NAME
def should_evaluate_full_value(val):
return not LOAD_VALUES_ASYNC or (is_builtin(type(val)) and not isinstance_checked(val, (list, tuple, dict)))
def return_values_from_dict_to_xml(return_dict):
res = []
for name, val in return_dict.items():
res.append(var_to_xml(val, name, additional_in_xml=' isRetVal="True"'))
return ''.join(res)
def frame_vars_to_xml(frame_f_locals, hidden_ns=None):
""" dumps frame variables to XML
<var name="var_name" scope="local" type="type" value="value"/>
"""
xml = []
keys = sorted(frame_f_locals)
return_values_xml = []
for k in keys:
try:
v = frame_f_locals[k]
eval_full_val = should_evaluate_full_value(v)
if k == '_pydev_stop_at_break':
continue
if k == RETURN_VALUES_DICT:
for name, val in v.items():
return_values_xml.append(var_to_xml(val, name, additional_in_xml=' isRetVal="True"'))
else:
if hidden_ns is not None and k in hidden_ns:
xml.append(var_to_xml(v, str(k), additional_in_xml=' isIPythonHidden="True"',
evaluate_full_value=eval_full_val))
else:
xml.append(var_to_xml(v, str(k), evaluate_full_value=eval_full_val))
except Exception:
pydev_log.exception("Unexpected error, recovered safely.")
# Show return values as the first entry.
return_values_xml.extend(xml)
return ''.join(return_values_xml)
def get_variable_details(val, evaluate_full_value=True, to_string=None):
try:
# This should be faster than isinstance (but we have to protect against not having a '__class__' attribute).
is_exception_on_eval = val.__class__ == ExceptionOnEvaluate
except:
is_exception_on_eval = False
if is_exception_on_eval:
v = val.result
else:
v = val
_type, type_name, resolver = get_type(v)
type_qualifier = getattr(_type, "__module__", "")
if not evaluate_full_value:
value = DEFAULT_VALUE
else:
try:
str_from_provider = _str_from_providers(v, _type, type_name)
if str_from_provider is not None:
value = str_from_provider
elif to_string is not None:
value = to_string(v)
elif hasattr_checked(v, '__class__'):
if v.__class__ == frame_type:
value = pydevd_resolver.frameResolver.get_frame_name(v)
elif v.__class__ in (list, tuple):
if len(v) > 300:
value = '%s: %s' % (str(v.__class__), '<Too big to print. Len: %s>' % (len(v),))
else:
value = '%s: %s' % (str(v.__class__), v)
else:
try:
cName = str(v.__class__)
if cName.find('.') != -1:
cName = cName.split('.')[-1]
elif cName.find("'") != -1: # does not have '.' (could be something like <type 'int'>)
cName = cName[cName.index("'") + 1:]
if cName.endswith("'>"):
cName = cName[:-2]
except:
cName = str(v.__class__)
value = '%s: %s' % (cName, v)
else:
value = str(v)
except:
try:
value = repr(v)
except:
value = 'Unable to get repr for %s' % v.__class__
# fix to work with unicode values
try:
if value.__class__ == bytes:
value = value.decode('utf-8', 'replace')
except TypeError:
pass
return type_name, type_qualifier, is_exception_on_eval, resolver, value
def var_to_xml(val, name, trim_if_too_big=True, additional_in_xml='', evaluate_full_value=True):
""" single variable or dictionary to xml representation """
type_name, type_qualifier, is_exception_on_eval, resolver, value = get_variable_details(
val, evaluate_full_value)
scope = get_var_scope(name, val, '', True)
try:
name = quote(name, '/>_= ') # TODO: Fix PY-5834 without using quote
except:
pass
xml = '<var name="%s" type="%s" ' % (make_valid_xml_value(name), make_valid_xml_value(type_name))
if type_qualifier:
xml_qualifier = 'qualifier="%s"' % make_valid_xml_value(type_qualifier)
else:
xml_qualifier = ''
if value:
# cannot be too big... communication may not handle it.
if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE and trim_if_too_big:
value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE]
value += '...'
xml_value = ' value="%s"' % (make_valid_xml_value(quote(value, '/>_= ')))
else:
xml_value = ''
if is_exception_on_eval:
xml_container = ' isErrorOnEval="True"'
else:
if resolver is not None:
xml_container = ' isContainer="True"'
else:
xml_container = ''
if scope:
return ''.join((xml, xml_qualifier, xml_value, xml_container, additional_in_xml, ' scope="', scope, '"', ' />\n'))
else:
return ''.join((xml, xml_qualifier, xml_value, xml_container, additional_in_xml, ' />\n'))
| 14,443 | Python | 35.11 | 122 | 0.589767 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_runpy.py | """
Vendored copy of runpy from the standard library.
It's vendored so that we can properly ignore it when used to start user code
while still making it possible for the user to debug runpy itself.
runpy.py - locating and running Python code using the module namespace
Provides support for locating and running Python scripts using the Python
module namespace instead of the native filesystem.
This allows Python code to play nicely with non-filesystem based PEP 302
importers when locating support scripts as well as when importing modules.
"""
# Written by Nick Coghlan <ncoghlan at gmail.com>
# to implement PEP 338 (Executing Modules as Scripts)
import sys
import importlib.machinery # importlib first so we can test #15386 via -m
import importlib.util
import io
import types
import os
__all__ = [
"run_module", "run_path",
]
# Note: fabioz: Don't use pkgutil (when handling caught exceptions we could end up
# showing exceptions in pkgutil.get_imported (specifically the KeyError), so,
# create a copy of the function we need to properly ignore this exception when
# running the program.
def pkgutil_get_importer(path_item):
"""Retrieve a finder for the given path item
The returned finder is cached in sys.path_importer_cache
if it was newly created by a path hook.
The cache (or part of it) can be cleared manually if a
rescan of sys.path_hooks is necessary.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for path_hook in sys.path_hooks:
try:
importer = path_hook(path_item)
sys.path_importer_cache.setdefault(path_item, importer)
break
except ImportError:
pass
else:
importer = None
return importer
class _TempModule(object):
"""Temporarily replace a module in sys.modules with an empty namespace"""
def __init__(self, mod_name):
self.mod_name = mod_name
self.module = types.ModuleType(mod_name)
self._saved_module = []
def __enter__(self):
mod_name = self.mod_name
try:
self._saved_module.append(sys.modules[mod_name])
except KeyError:
pass
sys.modules[mod_name] = self.module
return self
def __exit__(self, *args):
if self._saved_module:
sys.modules[self.mod_name] = self._saved_module[0]
else:
del sys.modules[self.mod_name]
self._saved_module = []
class _ModifiedArgv0(object):
def __init__(self, value):
self.value = value
self._saved_value = self._sentinel = object()
def __enter__(self):
if self._saved_value is not self._sentinel:
raise RuntimeError("Already preserving saved value")
self._saved_value = sys.argv[0]
sys.argv[0] = self.value
def __exit__(self, *args):
self.value = self._sentinel
sys.argv[0] = self._saved_value
# TODO: Replace these helpers with importlib._bootstrap_external functions.
def _run_code(code, run_globals, init_globals=None,
mod_name=None, mod_spec=None,
pkg_name=None, script_name=None):
"""Helper to run code in nominated namespace"""
if init_globals is not None:
run_globals.update(init_globals)
if mod_spec is None:
loader = None
fname = script_name
cached = None
else:
loader = mod_spec.loader
fname = mod_spec.origin
cached = mod_spec.cached
if pkg_name is None:
pkg_name = mod_spec.parent
run_globals.update(__name__=mod_name,
__file__=fname,
__cached__=cached,
__doc__=None,
__loader__=loader,
__package__=pkg_name,
__spec__=mod_spec)
exec(code, run_globals)
return run_globals
def _run_module_code(code, init_globals=None,
mod_name=None, mod_spec=None,
pkg_name=None, script_name=None):
"""Helper to run code in new namespace with sys modified"""
fname = script_name if mod_spec is None else mod_spec.origin
with _TempModule(mod_name) as temp_module, _ModifiedArgv0(fname):
mod_globals = temp_module.module.__dict__
_run_code(code, mod_globals, init_globals,
mod_name, mod_spec, pkg_name, script_name)
# Copy the globals of the temporary module, as they
# may be cleared when the temporary module goes away
return mod_globals.copy()
# Helper to get the full name, spec and code for a module
def _get_module_details(mod_name, error=ImportError):
if mod_name.startswith("."):
raise error("Relative module names not supported")
pkg_name, _, _ = mod_name.rpartition(".")
if pkg_name:
# Try importing the parent to avoid catching initialization errors
try:
__import__(pkg_name)
except ImportError as e:
# If the parent or higher ancestor package is missing, let the
# error be raised by find_spec() below and then be caught. But do
# not allow other errors to be caught.
if e.name is None or (e.name != pkg_name and
not pkg_name.startswith(e.name + ".")):
raise
# Warn if the module has already been imported under its normal name
existing = sys.modules.get(mod_name)
if existing is not None and not hasattr(existing, "__path__"):
from warnings import warn
msg = "{mod_name!r} found in sys.modules after import of " \
"package {pkg_name!r}, but prior to execution of " \
"{mod_name!r}; this may result in unpredictable " \
"behaviour".format(mod_name=mod_name, pkg_name=pkg_name)
warn(RuntimeWarning(msg))
try:
spec = importlib.util.find_spec(mod_name)
except (ImportError, AttributeError, TypeError, ValueError) as ex:
# This hack fixes an impedance mismatch between pkgutil and
# importlib, where the latter raises other errors for cases where
# pkgutil previously raised ImportError
msg = "Error while finding module specification for {!r} ({}: {})"
if mod_name.endswith(".py"):
msg += (f". Try using '{mod_name[:-3]}' instead of "
f"'{mod_name}' as the module name.")
raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex
if spec is None:
raise error("No module named %s" % mod_name)
if spec.submodule_search_locations is not None:
if mod_name == "__main__" or mod_name.endswith(".__main__"):
raise error("Cannot use package as __main__ module")
try:
pkg_main_name = mod_name + ".__main__"
return _get_module_details(pkg_main_name, error)
except error as e:
if mod_name not in sys.modules:
raise # No module loaded; being a package is irrelevant
raise error(("%s; %r is a package and cannot " +
"be directly executed") % (e, mod_name))
loader = spec.loader
if loader is None:
raise error("%r is a namespace package and cannot be executed"
% mod_name)
try:
code = loader.get_code(mod_name)
except ImportError as e:
raise error(format(e)) from e
if code is None:
raise error("No code object available for %s" % mod_name)
return mod_name, spec, code
class _Error(Exception):
"""Error that _run_module_as_main() should report without a traceback"""
# XXX ncoghlan: Should this be documented and made public?
# (Current thoughts: don't repeat the mistake that lead to its
# creation when run_module() no longer met the needs of
# mainmodule.c, but couldn't be changed because it was public)
def _run_module_as_main(mod_name, alter_argv=True):
"""Runs the designated module in the __main__ namespace
Note that the executed module will have full access to the
__main__ namespace. If this is not desirable, the run_module()
function should be used to run the module code in a fresh namespace.
At the very least, these variables in __main__ will be overwritten:
__name__
__file__
__cached__
__loader__
__package__
"""
try:
if alter_argv or mod_name != "__main__": # i.e. -m switch
mod_name, mod_spec, code = _get_module_details(mod_name, _Error)
else: # i.e. directory or zipfile execution
mod_name, mod_spec, code = _get_main_module_details(_Error)
except _Error as exc:
msg = "%s: %s" % (sys.executable, exc)
sys.exit(msg)
main_globals = sys.modules["__main__"].__dict__
if alter_argv:
sys.argv[0] = mod_spec.origin
return _run_code(code, main_globals, None,
"__main__", mod_spec)
def run_module(mod_name, init_globals=None,
run_name=None, alter_sys=False):
"""Execute a module's code without importing it
Returns the resulting top level namespace dictionary
"""
mod_name, mod_spec, code = _get_module_details(mod_name)
if run_name is None:
run_name = mod_name
if alter_sys:
return _run_module_code(code, init_globals, run_name, mod_spec)
else:
# Leave the sys module alone
return _run_code(code, {}, init_globals, run_name, mod_spec)
def _get_main_module_details(error=ImportError):
# Helper that gives a nicer error message when attempting to
# execute a zipfile or directory by invoking __main__.py
# Also moves the standard __main__ out of the way so that the
# preexisting __loader__ entry doesn't cause issues
main_name = "__main__"
saved_main = sys.modules[main_name]
del sys.modules[main_name]
try:
return _get_module_details(main_name)
except ImportError as exc:
if main_name in str(exc):
raise error("can't find %r module in %r" %
(main_name, sys.path[0])) from exc
raise
finally:
sys.modules[main_name] = saved_main
try:
io_open_code = io.open_code
except AttributeError:
# Compatibility with Python 3.6/3.7
import tokenize
io_open_code = tokenize.open
def _get_code_from_file(run_name, fname):
# Check for a compiled file first
from pkgutil import read_code
decoded_path = os.path.abspath(os.fsdecode(fname))
with io_open_code(decoded_path) as f:
code = read_code(f)
if code is None:
# That didn't work, so try it as normal source code
with io_open_code(decoded_path) as f:
code = compile(f.read(), fname, 'exec')
return code, fname
def run_path(path_name, init_globals=None, run_name=None):
"""Execute code located at the specified filesystem location
Returns the resulting top level namespace dictionary
The file path may refer directly to a Python script (i.e.
one that could be directly executed with execfile) or else
it may refer to a zipfile or directory containing a top
level __main__.py script.
"""
if run_name is None:
run_name = "<run_path>"
pkg_name = run_name.rpartition(".")[0]
importer = pkgutil_get_importer(path_name)
# Trying to avoid importing imp so as to not consume the deprecation warning.
is_NullImporter = False
if type(importer).__module__ == 'imp':
if type(importer).__name__ == 'NullImporter':
is_NullImporter = True
if isinstance(importer, type(None)) or is_NullImporter:
# Not a valid sys.path entry, so run the code directly
# execfile() doesn't help as we want to allow compiled files
code, fname = _get_code_from_file(run_name, path_name)
return _run_module_code(code, init_globals, run_name,
pkg_name=pkg_name, script_name=fname)
else:
# Finder is defined for path, so add it to
# the start of sys.path
sys.path.insert(0, path_name)
try:
# Here's where things are a little different from the run_module
# case. There, we only had to replace the module in sys while the
# code was running and doing so was somewhat optional. Here, we
# have no choice and we have to remove it even while we read the
# code. If we don't do this, a __loader__ attribute in the
# existing __main__ module may prevent location of the new module.
mod_name, mod_spec, code = _get_main_module_details()
with _TempModule(run_name) as temp_module, \
_ModifiedArgv0(path_name):
mod_globals = temp_module.module.__dict__
return _run_code(code, mod_globals, init_globals,
run_name, mod_spec, pkg_name).copy()
finally:
try:
sys.path.remove(path_name)
except ValueError:
pass
if __name__ == "__main__":
# Run the module specified as the next command line argument
if len(sys.argv) < 2:
print("No module specified for execution", file=sys.stderr)
else:
del sys.argv[0] # Make the requested module sys.argv[0]
_run_module_as_main(sys.argv[0])
| 13,521 | Python | 37.19774 | 82 | 0.607425 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace_files.py | # Important: Autogenerated file.
# DO NOT edit manually!
# DO NOT edit manually!
LIB_FILE = 1
PYDEV_FILE = 2
DONT_TRACE_DIRS = {
'_pydev_bundle': PYDEV_FILE,
'_pydev_runfiles': PYDEV_FILE,
'_pydevd_bundle': PYDEV_FILE,
'_pydevd_frame_eval': PYDEV_FILE,
'pydev_ipython': PYDEV_FILE,
'pydev_sitecustomize': PYDEV_FILE,
'pydevd_attach_to_process': PYDEV_FILE,
'pydevd_concurrency_analyser': PYDEV_FILE,
'pydevd_plugins': PYDEV_FILE,
'test_pydevd_reload': PYDEV_FILE,
}
DONT_TRACE = {
# commonly used things from the stdlib that we don't want to trace
'Queue.py':LIB_FILE,
'queue.py':LIB_FILE,
'socket.py':LIB_FILE,
'weakref.py':LIB_FILE,
'_weakrefset.py':LIB_FILE,
'linecache.py':LIB_FILE,
'threading.py':LIB_FILE,
'dis.py':LIB_FILE,
# things from pydev that we don't want to trace
'__main__pydevd_gen_debug_adapter_protocol.py': PYDEV_FILE,
'_pydev_calltip_util.py': PYDEV_FILE,
'_pydev_completer.py': PYDEV_FILE,
'_pydev_execfile.py': PYDEV_FILE,
'_pydev_filesystem_encoding.py': PYDEV_FILE,
'_pydev_getopt.py': PYDEV_FILE,
'_pydev_imports_tipper.py': PYDEV_FILE,
'_pydev_jy_imports_tipper.py': PYDEV_FILE,
'_pydev_log.py': PYDEV_FILE,
'_pydev_saved_modules.py': PYDEV_FILE,
'_pydev_sys_patch.py': PYDEV_FILE,
'_pydev_tipper_common.py': PYDEV_FILE,
'django_debug.py': PYDEV_FILE,
'jinja2_debug.py': PYDEV_FILE,
'pycompletionserver.py': PYDEV_FILE,
'pydev_app_engine_debug_startup.py': PYDEV_FILE,
'pydev_console_utils.py': PYDEV_FILE,
'pydev_import_hook.py': PYDEV_FILE,
'pydev_imports.py': PYDEV_FILE,
'pydev_ipython_console.py': PYDEV_FILE,
'pydev_ipython_console_011.py': PYDEV_FILE,
'pydev_is_thread_alive.py': PYDEV_FILE,
'pydev_localhost.py': PYDEV_FILE,
'pydev_log.py': PYDEV_FILE,
'pydev_monkey.py': PYDEV_FILE,
'pydev_monkey_qt.py': PYDEV_FILE,
'pydev_override.py': PYDEV_FILE,
'pydev_run_in_console.py': PYDEV_FILE,
'pydev_runfiles.py': PYDEV_FILE,
'pydev_runfiles_coverage.py': PYDEV_FILE,
'pydev_runfiles_nose.py': PYDEV_FILE,
'pydev_runfiles_parallel.py': PYDEV_FILE,
'pydev_runfiles_parallel_client.py': PYDEV_FILE,
'pydev_runfiles_pytest2.py': PYDEV_FILE,
'pydev_runfiles_unittest.py': PYDEV_FILE,
'pydev_runfiles_xml_rpc.py': PYDEV_FILE,
'pydev_umd.py': PYDEV_FILE,
'pydev_versioncheck.py': PYDEV_FILE,
'pydevconsole.py': PYDEV_FILE,
'pydevconsole_code.py': PYDEV_FILE,
'pydevd.py': PYDEV_FILE,
'pydevd_additional_thread_info.py': PYDEV_FILE,
'pydevd_additional_thread_info_regular.py': PYDEV_FILE,
'pydevd_api.py': PYDEV_FILE,
'pydevd_base_schema.py': PYDEV_FILE,
'pydevd_breakpoints.py': PYDEV_FILE,
'pydevd_bytecode_utils.py': PYDEV_FILE,
'pydevd_code_to_source.py': PYDEV_FILE,
'pydevd_collect_bytecode_info.py': PYDEV_FILE,
'pydevd_comm.py': PYDEV_FILE,
'pydevd_comm_constants.py': PYDEV_FILE,
'pydevd_command_line_handling.py': PYDEV_FILE,
'pydevd_concurrency_logger.py': PYDEV_FILE,
'pydevd_console.py': PYDEV_FILE,
'pydevd_constants.py': PYDEV_FILE,
'pydevd_custom_frames.py': PYDEV_FILE,
'pydevd_cython_wrapper.py': PYDEV_FILE,
'pydevd_daemon_thread.py': PYDEV_FILE,
'pydevd_defaults.py': PYDEV_FILE,
'pydevd_dont_trace.py': PYDEV_FILE,
'pydevd_dont_trace_files.py': PYDEV_FILE,
'pydevd_exec2.py': PYDEV_FILE,
'pydevd_extension_api.py': PYDEV_FILE,
'pydevd_extension_utils.py': PYDEV_FILE,
'pydevd_file_utils.py': PYDEV_FILE,
'pydevd_filtering.py': PYDEV_FILE,
'pydevd_frame.py': PYDEV_FILE,
'pydevd_frame_eval_cython_wrapper.py': PYDEV_FILE,
'pydevd_frame_eval_main.py': PYDEV_FILE,
'pydevd_frame_tracing.py': PYDEV_FILE,
'pydevd_frame_utils.py': PYDEV_FILE,
'pydevd_gevent_integration.py': PYDEV_FILE,
'pydevd_helpers.py': PYDEV_FILE,
'pydevd_import_class.py': PYDEV_FILE,
'pydevd_io.py': PYDEV_FILE,
'pydevd_json_debug_options.py': PYDEV_FILE,
'pydevd_line_validation.py': PYDEV_FILE,
'pydevd_modify_bytecode.py': PYDEV_FILE,
'pydevd_net_command.py': PYDEV_FILE,
'pydevd_net_command_factory_json.py': PYDEV_FILE,
'pydevd_net_command_factory_xml.py': PYDEV_FILE,
'pydevd_plugin_numpy_types.py': PYDEV_FILE,
'pydevd_plugin_pandas_types.py': PYDEV_FILE,
'pydevd_plugin_utils.py': PYDEV_FILE,
'pydevd_plugins_django_form_str.py': PYDEV_FILE,
'pydevd_process_net_command.py': PYDEV_FILE,
'pydevd_process_net_command_json.py': PYDEV_FILE,
'pydevd_referrers.py': PYDEV_FILE,
'pydevd_reload.py': PYDEV_FILE,
'pydevd_resolver.py': PYDEV_FILE,
'pydevd_runpy.py': PYDEV_FILE,
'pydevd_safe_repr.py': PYDEV_FILE,
'pydevd_save_locals.py': PYDEV_FILE,
'pydevd_schema.py': PYDEV_FILE,
'pydevd_schema_log.py': PYDEV_FILE,
'pydevd_signature.py': PYDEV_FILE,
'pydevd_source_mapping.py': PYDEV_FILE,
'pydevd_stackless.py': PYDEV_FILE,
'pydevd_suspended_frames.py': PYDEV_FILE,
'pydevd_thread_lifecycle.py': PYDEV_FILE,
'pydevd_thread_wrappers.py': PYDEV_FILE,
'pydevd_timeout.py': PYDEV_FILE,
'pydevd_trace_api.py': PYDEV_FILE,
'pydevd_trace_dispatch.py': PYDEV_FILE,
'pydevd_trace_dispatch_regular.py': PYDEV_FILE,
'pydevd_traceproperty.py': PYDEV_FILE,
'pydevd_tracing.py': PYDEV_FILE,
'pydevd_utils.py': PYDEV_FILE,
'pydevd_vars.py': PYDEV_FILE,
'pydevd_vm_type.py': PYDEV_FILE,
'pydevd_xml.py': PYDEV_FILE,
}
# if we try to trace io.py it seems it can get halted (see http://bugs.python.org/issue4716)
DONT_TRACE['io.py'] = LIB_FILE
# Don't trace common encodings too
DONT_TRACE['cp1252.py'] = LIB_FILE
DONT_TRACE['utf_8.py'] = LIB_FILE
DONT_TRACE['codecs.py'] = LIB_FILE
| 5,814 | Python | 36.75974 | 92 | 0.665807 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_referrers.py | import sys
from _pydevd_bundle import pydevd_xml
from os.path import basename
from _pydev_bundle import pydev_log
from urllib.parse import unquote_plus
from _pydevd_bundle.pydevd_constants import IS_PY311_OR_GREATER
#===================================================================================================
# print_var_node
#===================================================================================================
def print_var_node(xml_node, stream):
name = xml_node.getAttribute('name')
value = xml_node.getAttribute('value')
val_type = xml_node.getAttribute('type')
found_as = xml_node.getAttribute('found_as')
stream.write('Name: ')
stream.write(unquote_plus(name))
stream.write(', Value: ')
stream.write(unquote_plus(value))
stream.write(', Type: ')
stream.write(unquote_plus(val_type))
if found_as:
stream.write(', Found as: %s' % (unquote_plus(found_as),))
stream.write('\n')
#===================================================================================================
# print_referrers
#===================================================================================================
def print_referrers(obj, stream=None):
if stream is None:
stream = sys.stdout
result = get_referrer_info(obj)
from xml.dom.minidom import parseString
dom = parseString(result)
xml = dom.getElementsByTagName('xml')[0]
for node in xml.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
if node.localName == 'for':
stream.write('Searching references for: ')
for child in node.childNodes:
if child.nodeType == node.TEXT_NODE:
continue
print_var_node(child, stream)
elif node.localName == 'var':
stream.write('Referrer found: ')
print_var_node(node, stream)
else:
sys.stderr.write('Unhandled node: %s\n' % (node,))
return result
#===================================================================================================
# get_referrer_info
#===================================================================================================
def get_referrer_info(searched_obj):
DEBUG = 0
if DEBUG:
sys.stderr.write('Getting referrers info.\n')
try:
try:
if searched_obj is None:
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Skipping getting referrers for None',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
obj_id = id(searched_obj)
try:
if DEBUG:
sys.stderr.write('Getting referrers...\n')
import gc
referrers = gc.get_referrers(searched_obj)
except:
pydev_log.exception()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Exception raised while trying to get_referrers.',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
if DEBUG:
sys.stderr.write('Found %s referrers.\n' % (len(referrers),))
curr_frame = sys._getframe()
frame_type = type(curr_frame)
# Ignore this frame and any caller frame of this frame
ignore_frames = {} # Should be a set, but it's not available on all python versions.
while curr_frame is not None:
if basename(curr_frame.f_code.co_filename).startswith('pydev'):
ignore_frames[curr_frame] = 1
curr_frame = curr_frame.f_back
ret = ['<xml>\n']
ret.append('<for>\n')
if DEBUG:
sys.stderr.write('Searching Referrers of obj with id="%s"\n' % (obj_id,))
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Referrers of obj with id="%s"' % (obj_id,)))
ret.append('</for>\n')
curr_frame = sys._getframe()
all_objects = None
for r in referrers:
try:
if r in ignore_frames:
continue # Skip the references we may add ourselves
except:
pass # Ok: unhashable type checked...
if r is referrers:
continue
if r is curr_frame.f_locals:
continue
r_type = type(r)
r_id = str(id(r))
representation = str(r_type)
found_as = ''
if r_type == frame_type:
if DEBUG:
sys.stderr.write('Found frame referrer: %r\n' % (r,))
for key, val in r.f_locals.items():
if val is searched_obj:
found_as = key
break
elif r_type == dict:
if DEBUG:
sys.stderr.write('Found dict referrer: %r\n' % (r,))
# Try to check if it's a value in the dict (and under which key it was found)
for key, val in r.items():
if val is searched_obj:
found_as = key
if DEBUG:
sys.stderr.write(' Found as %r in dict\n' % (found_as,))
break
# Ok, there's one annoying thing: many times we find it in a dict from an instance,
# but with this we don't directly have the class, only the dict, so, to workaround that
# we iterate over all reachable objects ad check if one of those has the given dict.
if all_objects is None:
all_objects = gc.get_objects()
for x in all_objects:
try:
if getattr(x, '__dict__', None) is r:
r = x
r_type = type(x)
r_id = str(id(r))
representation = str(r_type)
break
except:
pass # Just ignore any error here (i.e.: ReferenceError, etc.)
elif r_type in (tuple, list):
if DEBUG:
sys.stderr.write('Found tuple referrer: %r\n' % (r,))
for i, x in enumerate(r):
if x is searched_obj:
found_as = '%s[%s]' % (r_type.__name__, i)
if DEBUG:
sys.stderr.write(' Found as %s in tuple: \n' % (found_as,))
break
elif IS_PY311_OR_GREATER:
# Up to Python 3.10, gc.get_referrers for an instance actually returned the
# object.__dict__, but on Python 3.11 it returns the actual object, so,
# handling is a bit easier (we don't need the workaround from the dict
# case to find the actual instance, we just need to find the attribute name).
if DEBUG:
sys.stderr.write('Found dict referrer: %r\n' % (r,))
dct = getattr(r, '__dict__', None)
if dct:
# Try to check if it's a value in the dict (and under which key it was found)
for key, val in dct.items():
if val is searched_obj:
found_as = key
if DEBUG:
sys.stderr.write(' Found as %r in object instance\n' % (found_as,))
break
if found_as:
if not isinstance(found_as, str):
found_as = str(found_as)
found_as = ' found_as="%s"' % (pydevd_xml.make_valid_xml_value(found_as),)
ret.append(pydevd_xml.var_to_xml(
r,
representation,
additional_in_xml=' id="%s"%s' % (r_id, found_as)))
finally:
if DEBUG:
sys.stderr.write('Done searching for references.\n')
# If we have any exceptions, don't keep dangling references from this frame to any of our objects.
all_objects = None
referrers = None
searched_obj = None
r = None
x = None
key = None
val = None
curr_frame = None
ignore_frames = None
except:
pydev_log.exception()
ret = ['<xml>\n']
ret.append('<for>\n')
ret.append(pydevd_xml.var_to_xml(
searched_obj,
'Error getting referrers for:',
additional_in_xml=' id="%s"' % (id(searched_obj),)))
ret.append('</for>\n')
ret.append('</xml>')
ret = ''.join(ret)
return ret
ret.append('</xml>')
ret = ''.join(ret)
return ret
| 9,756 | Python | 36.817829 | 110 | 0.432554 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_json.py | from functools import partial
import itertools
import os
import sys
import socket as socket_module
from _pydev_bundle._pydev_imports_tipper import TYPE_IMPORT, TYPE_CLASS, TYPE_FUNCTION, TYPE_ATTR, \
TYPE_BUILTIN, TYPE_PARAM
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle.pydev_override import overrides
from _pydevd_bundle._debug_adapter import pydevd_schema
from _pydevd_bundle._debug_adapter.pydevd_schema import ModuleEvent, ModuleEventBody, Module, \
OutputEventBody, OutputEvent, ContinuedEventBody, ExitedEventBody, \
ExitedEvent
from _pydevd_bundle.pydevd_comm_constants import CMD_THREAD_CREATE, CMD_RETURN, CMD_MODULE_EVENT, \
CMD_WRITE_TO_CONSOLE, CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE, CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE, \
CMD_STEP_RETURN, CMD_STEP_CAUGHT_EXCEPTION, CMD_ADD_EXCEPTION_BREAK, CMD_SET_BREAK, \
CMD_SET_NEXT_STATEMENT, CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, \
CMD_THREAD_RESUME_SINGLE_NOTIFICATION, CMD_THREAD_KILL, CMD_STOP_ON_START, CMD_INPUT_REQUESTED, \
CMD_EXIT, CMD_STEP_INTO_COROUTINE, CMD_STEP_RETURN_MY_CODE, CMD_SMART_STEP_INTO, \
CMD_SET_FUNCTION_BREAK
from _pydevd_bundle.pydevd_constants import get_thread_id, ForkSafeLock
from _pydevd_bundle.pydevd_net_command import NetCommand, NULL_NET_COMMAND
from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory
from _pydevd_bundle.pydevd_utils import get_non_pydevd_threads
import pydevd_file_utils
from _pydevd_bundle.pydevd_comm import build_exception_info_response
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle import pydevd_frame_utils, pydevd_constants, pydevd_utils
import linecache
from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id
from io import StringIO
class ModulesManager(object):
def __init__(self):
self._lock = ForkSafeLock()
self._modules = {}
self._next_id = partial(next, itertools.count(0))
def track_module(self, filename_in_utf8, module_name, frame):
'''
:return list(NetCommand):
Returns a list with the module events to be sent.
'''
if filename_in_utf8 in self._modules:
return []
module_events = []
with self._lock:
# Must check again after getting the lock.
if filename_in_utf8 in self._modules:
return
try:
version = str(frame.f_globals.get('__version__', ''))
except:
version = '<unknown>'
try:
package_name = str(frame.f_globals.get('__package__', ''))
except:
package_name = '<unknown>'
module_id = self._next_id()
module = Module(module_id, module_name, filename_in_utf8)
if version:
module.version = version
if package_name:
# Note: package doesn't appear in the docs but seems to be expected?
module.kwargs['package'] = package_name
module_event = ModuleEvent(ModuleEventBody('new', module))
module_events.append(NetCommand(CMD_MODULE_EVENT, 0, module_event, is_json=True))
self._modules[filename_in_utf8] = module.to_dict()
return module_events
def get_modules_info(self):
'''
:return list(Module)
'''
with self._lock:
return list(self._modules.values())
class NetCommandFactoryJson(NetCommandFactory):
'''
Factory for commands which will provide messages as json (they should be
similar to the debug adapter where possible, although some differences
are currently Ok).
Note that it currently overrides the xml version so that messages
can be done one at a time (any message not overridden will currently
use the xml version) -- after having all messages handled, it should
no longer use NetCommandFactory as the base class.
'''
def __init__(self):
NetCommandFactory.__init__(self)
self.modules_manager = ModulesManager()
@overrides(NetCommandFactory.make_version_message)
def make_version_message(self, seq):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_protocol_set_message)
def make_protocol_set_message(self, seq):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_thread_created_message)
def make_thread_created_message(self, thread):
# Note: the thread id for the debug adapter must be an int
# (make the actual id from get_thread_id respect that later on).
msg = pydevd_schema.ThreadEvent(
pydevd_schema.ThreadEventBody('started', get_thread_id(thread)),
)
return NetCommand(CMD_THREAD_CREATE, 0, msg, is_json=True)
@overrides(NetCommandFactory.make_custom_frame_created_message)
def make_custom_frame_created_message(self, frame_id, frame_description):
self._additional_thread_id_to_thread_name[frame_id] = frame_description
msg = pydevd_schema.ThreadEvent(
pydevd_schema.ThreadEventBody('started', frame_id),
)
return NetCommand(CMD_THREAD_CREATE, 0, msg, is_json=True)
@overrides(NetCommandFactory.make_thread_killed_message)
def make_thread_killed_message(self, tid):
self._additional_thread_id_to_thread_name.pop(tid, None)
msg = pydevd_schema.ThreadEvent(
pydevd_schema.ThreadEventBody('exited', tid),
)
return NetCommand(CMD_THREAD_KILL, 0, msg, is_json=True)
@overrides(NetCommandFactory.make_list_threads_message)
def make_list_threads_message(self, py_db, seq):
threads = []
for thread in get_non_pydevd_threads():
if is_thread_alive(thread):
thread_id = get_thread_id(thread)
# Notify that it's created (no-op if we already notified before).
py_db.notify_thread_created(thread_id, thread)
thread_schema = pydevd_schema.Thread(id=thread_id, name=thread.name)
threads.append(thread_schema.to_dict())
for thread_id, thread_name in list(self._additional_thread_id_to_thread_name.items()):
thread_schema = pydevd_schema.Thread(id=thread_id, name=thread_name)
threads.append(thread_schema.to_dict())
body = pydevd_schema.ThreadsResponseBody(threads)
response = pydevd_schema.ThreadsResponse(
request_seq=seq, success=True, command='threads', body=body)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_get_completions_message)
def make_get_completions_message(self, seq, completions, qualifier, start):
COMPLETION_TYPE_LOOK_UP = {
TYPE_IMPORT: pydevd_schema.CompletionItemType.MODULE,
TYPE_CLASS: pydevd_schema.CompletionItemType.CLASS,
TYPE_FUNCTION: pydevd_schema.CompletionItemType.FUNCTION,
TYPE_ATTR: pydevd_schema.CompletionItemType.FIELD,
TYPE_BUILTIN: pydevd_schema.CompletionItemType.KEYWORD,
TYPE_PARAM: pydevd_schema.CompletionItemType.VARIABLE,
}
qualifier = qualifier.lower()
qualifier_len = len(qualifier)
targets = []
for completion in completions:
label = completion[0]
if label.lower().startswith(qualifier):
completion = pydevd_schema.CompletionItem(
label=label, type=COMPLETION_TYPE_LOOK_UP[completion[3]], start=start, length=qualifier_len)
targets.append(completion.to_dict())
body = pydevd_schema.CompletionsResponseBody(targets)
response = pydevd_schema.CompletionsResponse(
request_seq=seq, success=True, command='completions', body=body)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def _format_frame_name(self, fmt, initial_name, module_name, line, path):
if fmt is None:
return initial_name
frame_name = initial_name
if fmt.get('module', False):
if module_name:
if initial_name == '<module>':
frame_name = module_name
else:
frame_name = '%s.%s' % (module_name, initial_name)
else:
basename = os.path.basename(path)
basename = basename[0:-3] if basename.lower().endswith('.py') else basename
if initial_name == '<module>':
frame_name = '%s in %s' % (initial_name, basename)
else:
frame_name = '%s.%s' % (basename, initial_name)
if fmt.get('line', False):
frame_name = '%s : %d' % (frame_name, line)
return frame_name
@overrides(NetCommandFactory.make_get_thread_stack_message)
def make_get_thread_stack_message(self, py_db, seq, thread_id, topmost_frame, fmt, must_be_suspended=False, start_frame=0, levels=0):
frames = []
module_events = []
try:
# : :type suspended_frames_manager: SuspendedFramesManager
suspended_frames_manager = py_db.suspended_frames_manager
frames_list = suspended_frames_manager.get_frames_list(thread_id)
if frames_list is None:
# Could not find stack of suspended frame...
if must_be_suspended:
return None
else:
frames_list = pydevd_frame_utils.create_frames_list_from_frame(topmost_frame)
for frame_id, frame, method_name, original_filename, filename_in_utf8, lineno, applied_mapping, show_as_current_frame in self._iter_visible_frames_info(
py_db, frames_list
):
try:
module_name = str(frame.f_globals.get('__name__', ''))
except:
module_name = '<unknown>'
module_events.extend(self.modules_manager.track_module(filename_in_utf8, module_name, frame))
presentation_hint = None
if not getattr(frame, 'IS_PLUGIN_FRAME', False): # Never filter out plugin frames!
if py_db.is_files_filter_enabled and py_db.apply_files_filter(frame, original_filename, False):
continue
if not py_db.in_project_scope(frame):
presentation_hint = 'subtle'
formatted_name = self._format_frame_name(fmt, method_name, module_name, lineno, filename_in_utf8)
if show_as_current_frame:
formatted_name += ' (Current frame)'
source_reference = pydevd_file_utils.get_client_filename_source_reference(filename_in_utf8)
if not source_reference and not applied_mapping and not os.path.exists(original_filename):
if getattr(frame.f_code, 'co_lnotab', None):
# Create a source-reference to be used where we provide the source by decompiling the code.
# Note: When the time comes to retrieve the source reference in this case, we'll
# check the linecache first (see: get_decompiled_source_from_frame_id).
source_reference = pydevd_file_utils.create_source_reference_for_frame_id(frame_id, original_filename)
else:
# Check if someone added a source reference to the linecache (Python attrs does this).
if linecache.getline(original_filename, 1):
source_reference = pydevd_file_utils.create_source_reference_for_linecache(
original_filename)
frames.append(pydevd_schema.StackFrame(
frame_id, formatted_name, lineno, column=1, source={
'path': filename_in_utf8,
'sourceReference': source_reference,
},
presentationHint=presentation_hint).to_dict())
finally:
topmost_frame = None
for module_event in module_events:
py_db.writer.add_command(module_event)
total_frames = len(frames)
stack_frames = frames
if bool(levels):
start = start_frame
end = min(start + levels, total_frames)
stack_frames = frames[start:end]
response = pydevd_schema.StackTraceResponse(
request_seq=seq,
success=True,
command='stackTrace',
body=pydevd_schema.StackTraceResponseBody(stackFrames=stack_frames, totalFrames=total_frames))
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_warning_message)
def make_warning_message(self, msg):
category = 'important'
body = OutputEventBody(msg, category)
event = OutputEvent(body)
return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)
@overrides(NetCommandFactory.make_io_message)
def make_io_message(self, msg, ctx):
category = 'stdout' if int(ctx) == 1 else 'stderr'
body = OutputEventBody(msg, category)
event = OutputEvent(body)
return NetCommand(CMD_WRITE_TO_CONSOLE, 0, event, is_json=True)
_STEP_REASONS = set([
CMD_STEP_INTO,
CMD_STEP_INTO_MY_CODE,
CMD_STEP_OVER,
CMD_STEP_OVER_MY_CODE,
CMD_STEP_RETURN,
CMD_STEP_RETURN_MY_CODE,
CMD_STEP_INTO_MY_CODE,
CMD_STOP_ON_START,
CMD_STEP_INTO_COROUTINE,
CMD_SMART_STEP_INTO,
])
_EXCEPTION_REASONS = set([
CMD_STEP_CAUGHT_EXCEPTION,
CMD_ADD_EXCEPTION_BREAK,
])
@overrides(NetCommandFactory.make_thread_suspend_single_notification)
def make_thread_suspend_single_notification(self, py_db, thread_id, stop_reason):
exc_desc = None
exc_name = None
thread = pydevd_find_thread_by_id(thread_id)
info = set_additional_thread_info(thread)
preserve_focus_hint = False
if stop_reason in self._STEP_REASONS:
if info.pydev_original_step_cmd == CMD_STOP_ON_START:
# Just to make sure that's not set as the original reason anymore.
info.pydev_original_step_cmd = -1
stop_reason = 'entry'
else:
stop_reason = 'step'
elif stop_reason in self._EXCEPTION_REASONS:
stop_reason = 'exception'
elif stop_reason == CMD_SET_BREAK:
stop_reason = 'breakpoint'
elif stop_reason == CMD_SET_FUNCTION_BREAK:
stop_reason = 'function breakpoint'
elif stop_reason == CMD_SET_NEXT_STATEMENT:
stop_reason = 'goto'
else:
stop_reason = 'pause'
preserve_focus_hint = True
if stop_reason == 'exception':
exception_info_response = build_exception_info_response(
py_db, thread_id, -1, set_additional_thread_info, self._iter_visible_frames_info, max_frames=-1)
exception_info_response
exc_name = exception_info_response.body.exceptionId
exc_desc = exception_info_response.body.description
body = pydevd_schema.StoppedEventBody(
reason=stop_reason,
description=exc_desc,
threadId=thread_id,
text=exc_name,
allThreadsStopped=True,
preserveFocusHint=preserve_focus_hint,
)
event = pydevd_schema.StoppedEvent(body)
return NetCommand(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, 0, event, is_json=True)
@overrides(NetCommandFactory.make_thread_resume_single_notification)
def make_thread_resume_single_notification(self, thread_id):
body = ContinuedEventBody(threadId=thread_id, allThreadsContinued=True)
event = pydevd_schema.ContinuedEvent(body)
return NetCommand(CMD_THREAD_RESUME_SINGLE_NOTIFICATION, 0, event, is_json=True)
@overrides(NetCommandFactory.make_set_next_stmnt_status_message)
def make_set_next_stmnt_status_message(self, seq, is_success, exception_msg):
response = pydevd_schema.GotoResponse(
request_seq=int(seq),
success=is_success,
command='goto',
body={},
message=(None if is_success else exception_msg))
return NetCommand(CMD_RETURN, 0, response, is_json=True)
@overrides(NetCommandFactory.make_send_curr_exception_trace_message)
def make_send_curr_exception_trace_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_send_curr_exception_trace_proceeded_message)
def make_send_curr_exception_trace_proceeded_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_send_breakpoint_exception_message)
def make_send_breakpoint_exception_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_process_created_message)
def make_process_created_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_process_about_to_be_replaced_message)
def make_process_about_to_be_replaced_message(self):
event = ExitedEvent(ExitedEventBody(-1, pydevdReason="processReplaced"))
cmd = NetCommand(CMD_RETURN, 0, event, is_json=True)
def after_send(socket):
socket.setsockopt(socket_module.IPPROTO_TCP, socket_module.TCP_NODELAY, 1)
cmd.call_after_send(after_send)
return cmd
@overrides(NetCommandFactory.make_thread_suspend_message)
def make_thread_suspend_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_thread_run_message)
def make_thread_run_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_reloaded_code_message)
def make_reloaded_code_message(self, *args, **kwargs):
return NULL_NET_COMMAND # Not a part of the debug adapter protocol
@overrides(NetCommandFactory.make_input_requested_message)
def make_input_requested_message(self, started):
event = pydevd_schema.PydevdInputRequestedEvent(body={})
return NetCommand(CMD_INPUT_REQUESTED, 0, event, is_json=True)
@overrides(NetCommandFactory.make_skipped_step_in_because_of_filters)
def make_skipped_step_in_because_of_filters(self, py_db, frame):
msg = 'Frame skipped from debugging during step-in.'
if py_db.get_use_libraries_filter():
msg += ('\nNote: may have been skipped because of "justMyCode" option (default == true). '
'Try setting \"justMyCode\": false in the debug configuration (e.g., launch.json).\n')
return self.make_warning_message(msg)
@overrides(NetCommandFactory.make_evaluation_timeout_msg)
def make_evaluation_timeout_msg(self, py_db, expression, curr_thread):
msg = '''Evaluating: %s did not finish after %.2f seconds.
This may mean a number of things:
- This evaluation is really slow and this is expected.
In this case it's possible to silence this error by raising the timeout, setting the
PYDEVD_WARN_EVALUATION_TIMEOUT environment variable to a bigger value.
- The evaluation may need other threads running while it's running:
In this case, it's possible to set the PYDEVD_UNBLOCK_THREADS_TIMEOUT
environment variable so that if after a given timeout an evaluation doesn't finish,
other threads are unblocked or you can manually resume all threads.
Alternatively, it's also possible to skip breaking on a particular thread by setting a
`pydev_do_not_trace = True` attribute in the related threading.Thread instance
(if some thread should always be running and no breakpoints are expected to be hit in it).
- The evaluation is deadlocked:
In this case you may set the PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT
environment variable to true so that a thread dump is shown along with this message and
optionally, set the PYDEVD_INTERRUPT_THREAD_TIMEOUT to some value so that the debugger
tries to interrupt the evaluation (if possible) when this happens.
''' % (expression, pydevd_constants.PYDEVD_WARN_EVALUATION_TIMEOUT)
if pydevd_constants.PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT:
stream = StringIO()
pydevd_utils.dump_threads(stream, show_pydevd_threads=False)
msg += '\n\n%s\n' % stream.getvalue()
return self.make_warning_message(msg)
@overrides(NetCommandFactory.make_exit_command)
def make_exit_command(self, py_db):
event = pydevd_schema.TerminatedEvent(pydevd_schema.TerminatedEventBody())
return NetCommand(CMD_EXIT, 0, event, is_json=True)
| 21,328 | Python | 43.903158 | 164 | 0.644599 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_api.py | import sys
import bisect
import types
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle import pydevd_utils, pydevd_source_mapping
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_comm import (InternalGetThreadStack, internal_get_completions,
InternalSetNextStatementThread, internal_reload_code,
InternalGetVariable, InternalGetArray, InternalLoadFullValue,
internal_get_description, internal_get_frame, internal_evaluate_expression, InternalConsoleExec,
internal_get_variable_json, internal_change_variable, internal_change_variable_json,
internal_evaluate_expression_json, internal_set_expression_json, internal_get_exception_details_json,
internal_step_in_thread, internal_smart_step_into)
from _pydevd_bundle.pydevd_comm_constants import (CMD_THREAD_SUSPEND, file_system_encoding,
CMD_STEP_INTO_MY_CODE, CMD_STOP_ON_START, CMD_SMART_STEP_INTO)
from _pydevd_bundle.pydevd_constants import (get_current_thread_id, set_protocol, get_protocol,
HTTP_JSON_PROTOCOL, JSON_PROTOCOL, DebugInfoHolder, IS_WINDOWS)
from _pydevd_bundle.pydevd_net_command_factory_json import NetCommandFactoryJson
from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory
import pydevd_file_utils
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_breakpoints import LineBreakpoint
from pydevd_tracing import get_exception_traceback_str
import os
import subprocess
import ctypes
from _pydevd_bundle.pydevd_collect_bytecode_info import code_to_bytecode_representation
import itertools
import linecache
from _pydevd_bundle.pydevd_utils import DAPGrouper
from _pydevd_bundle.pydevd_daemon_thread import run_as_pydevd_daemon_thread
from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id, resume_threads
import tokenize
try:
import dis
except ImportError:
def _get_code_lines(code):
raise NotImplementedError
else:
def _get_code_lines(code):
if not isinstance(code, types.CodeType):
path = code
with tokenize.open(path) as f:
src = f.read()
code = compile(src, path, 'exec', 0, dont_inherit=True)
return _get_code_lines(code)
def iterate():
# First, get all line starts for this code object. This does not include
# bodies of nested class and function definitions, as they have their
# own objects.
for _, lineno in dis.findlinestarts(code):
yield lineno
# For nested class and function definitions, their respective code objects
# are constants referenced by this object.
for const in code.co_consts:
if isinstance(const, types.CodeType) and const.co_filename == code.co_filename:
for lineno in _get_code_lines(const):
yield lineno
return iterate()
class PyDevdAPI(object):
class VariablePresentation(object):
def __init__(self, special='group', function='group', class_='group', protected='inline'):
self._presentation = {
DAPGrouper.SCOPE_SPECIAL_VARS: special,
DAPGrouper.SCOPE_FUNCTION_VARS: function,
DAPGrouper.SCOPE_CLASS_VARS: class_,
DAPGrouper.SCOPE_PROTECTED_VARS: protected,
}
def get_presentation(self, scope):
return self._presentation[scope]
def run(self, py_db):
py_db.ready_to_run = True
def notify_initialize(self, py_db):
py_db.on_initialize()
def notify_configuration_done(self, py_db):
py_db.on_configuration_done()
def notify_disconnect(self, py_db):
py_db.on_disconnect()
def set_protocol(self, py_db, seq, protocol):
set_protocol(protocol.strip())
if get_protocol() in (HTTP_JSON_PROTOCOL, JSON_PROTOCOL):
cmd_factory_class = NetCommandFactoryJson
else:
cmd_factory_class = NetCommandFactory
if not isinstance(py_db.cmd_factory, cmd_factory_class):
py_db.cmd_factory = cmd_factory_class()
return py_db.cmd_factory.make_protocol_set_message(seq)
def set_ide_os_and_breakpoints_by(self, py_db, seq, ide_os, breakpoints_by):
'''
:param ide_os: 'WINDOWS' or 'UNIX'
:param breakpoints_by: 'ID' or 'LINE'
'''
if breakpoints_by == 'ID':
py_db._set_breakpoints_with_id = True
else:
py_db._set_breakpoints_with_id = False
self.set_ide_os(ide_os)
return py_db.cmd_factory.make_version_message(seq)
def set_ide_os(self, ide_os):
'''
:param ide_os: 'WINDOWS' or 'UNIX'
'''
pydevd_file_utils.set_ide_os(ide_os)
def set_gui_event_loop(self, py_db, gui_event_loop):
py_db._gui_event_loop = gui_event_loop
def send_error_message(self, py_db, msg):
cmd = py_db.cmd_factory.make_warning_message('pydevd: %s\n' % (msg,))
py_db.writer.add_command(cmd)
def set_show_return_values(self, py_db, show_return_values):
if show_return_values:
py_db.show_return_values = True
else:
if py_db.show_return_values:
# We should remove saved return values
py_db.remove_return_values_flag = True
py_db.show_return_values = False
pydev_log.debug("Show return values: %s", py_db.show_return_values)
def list_threads(self, py_db, seq):
# Response is the command with the list of threads to be added to the writer thread.
return py_db.cmd_factory.make_list_threads_message(py_db, seq)
def request_suspend_thread(self, py_db, thread_id='*'):
# Yes, thread suspend is done at this point, not through an internal command.
threads = []
suspend_all = thread_id.strip() == '*'
if suspend_all:
threads = pydevd_utils.get_non_pydevd_threads()
elif thread_id.startswith('__frame__:'):
sys.stderr.write("Can't suspend tasklet: %s\n" % (thread_id,))
else:
threads = [pydevd_find_thread_by_id(thread_id)]
for t in threads:
if t is None:
continue
py_db.set_suspend(
t,
CMD_THREAD_SUSPEND,
suspend_other_threads=suspend_all,
is_pause=True,
)
# Break here (even if it's suspend all) as py_db.set_suspend will
# take care of suspending other threads.
break
def set_enable_thread_notifications(self, py_db, enable):
'''
When disabled, no thread notifications (for creation/removal) will be
issued until it's re-enabled.
Note that when it's re-enabled, a creation notification will be sent for
all existing threads even if it was previously sent (this is meant to
be used on disconnect/reconnect).
'''
py_db.set_enable_thread_notifications(enable)
def request_disconnect(self, py_db, resume_threads):
self.set_enable_thread_notifications(py_db, False)
self.remove_all_breakpoints(py_db, '*')
self.remove_all_exception_breakpoints(py_db)
self.notify_disconnect(py_db)
if resume_threads:
self.request_resume_thread(thread_id='*')
def request_resume_thread(self, thread_id):
resume_threads(thread_id)
def request_completions(self, py_db, seq, thread_id, frame_id, act_tok, line=-1, column=-1):
py_db.post_method_as_internal_command(
thread_id, internal_get_completions, seq, thread_id, frame_id, act_tok, line=line, column=column)
def request_stack(self, py_db, seq, thread_id, fmt=None, timeout=.5, start_frame=0, levels=0):
# If it's already suspended, get it right away.
internal_get_thread_stack = InternalGetThreadStack(
seq, thread_id, py_db, set_additional_thread_info, fmt=fmt, timeout=timeout, start_frame=start_frame, levels=levels)
if internal_get_thread_stack.can_be_executed_by(get_current_thread_id(threading.current_thread())):
internal_get_thread_stack.do_it(py_db)
else:
py_db.post_internal_command(internal_get_thread_stack, '*')
def request_exception_info_json(self, py_db, request, thread_id, max_frames):
py_db.post_method_as_internal_command(
thread_id,
internal_get_exception_details_json,
request,
thread_id,
max_frames,
set_additional_thread_info=set_additional_thread_info,
iter_visible_frames_info=py_db.cmd_factory._iter_visible_frames_info,
)
def request_step(self, py_db, thread_id, step_cmd_id):
t = pydevd_find_thread_by_id(thread_id)
if t:
py_db.post_method_as_internal_command(
thread_id,
internal_step_in_thread,
thread_id,
step_cmd_id,
set_additional_thread_info=set_additional_thread_info,
)
elif thread_id.startswith('__frame__:'):
sys.stderr.write("Can't make tasklet step command: %s\n" % (thread_id,))
def request_smart_step_into(self, py_db, seq, thread_id, offset, child_offset):
t = pydevd_find_thread_by_id(thread_id)
if t:
py_db.post_method_as_internal_command(
thread_id, internal_smart_step_into, thread_id, offset, child_offset, set_additional_thread_info=set_additional_thread_info)
elif thread_id.startswith('__frame__:'):
sys.stderr.write("Can't set next statement in tasklet: %s\n" % (thread_id,))
def request_smart_step_into_by_func_name(self, py_db, seq, thread_id, line, func_name):
# Same thing as set next, just with a different cmd id.
self.request_set_next(py_db, seq, thread_id, CMD_SMART_STEP_INTO, None, line, func_name)
def request_set_next(self, py_db, seq, thread_id, set_next_cmd_id, original_filename, line, func_name):
'''
set_next_cmd_id may actually be one of:
CMD_RUN_TO_LINE
CMD_SET_NEXT_STATEMENT
CMD_SMART_STEP_INTO -- note: request_smart_step_into is preferred if it's possible
to work with bytecode offset.
:param Optional[str] original_filename:
If available, the filename may be source translated, otherwise no translation will take
place (the set next just needs the line afterwards as it executes locally, but for
the Jupyter integration, the source mapping may change the actual lines and not only
the filename).
'''
t = pydevd_find_thread_by_id(thread_id)
if t:
if original_filename is not None:
translated_filename = self.filename_to_server(original_filename) # Apply user path mapping.
pydev_log.debug('Set next (after path translation) in: %s line: %s', translated_filename, line)
func_name = self.to_str(func_name)
assert translated_filename.__class__ == str # i.e.: bytes on py2 and str on py3
assert func_name.__class__ == str # i.e.: bytes on py2 and str on py3
# Apply source mapping (i.e.: ipython).
_source_mapped_filename, new_line, multi_mapping_applied = py_db.source_mapping.map_to_server(
translated_filename, line)
if multi_mapping_applied:
pydev_log.debug('Set next (after source mapping) in: %s line: %s', translated_filename, line)
line = new_line
int_cmd = InternalSetNextStatementThread(thread_id, set_next_cmd_id, line, func_name, seq=seq)
py_db.post_internal_command(int_cmd, thread_id)
elif thread_id.startswith('__frame__:'):
sys.stderr.write("Can't set next statement in tasklet: %s\n" % (thread_id,))
def request_reload_code(self, py_db, seq, module_name, filename):
'''
:param seq: if -1 no message will be sent back when the reload is done.
Note: either module_name or filename may be None (but not both at the same time).
'''
thread_id = '*' # Any thread
# Note: not going for the main thread because in this case it'd only do the load
# when we stopped on a breakpoint.
py_db.post_method_as_internal_command(
thread_id, internal_reload_code, seq, module_name, filename)
def request_change_variable(self, py_db, seq, thread_id, frame_id, scope, attr, value):
'''
:param scope: 'FRAME' or 'GLOBAL'
'''
py_db.post_method_as_internal_command(
thread_id, internal_change_variable, seq, thread_id, frame_id, scope, attr, value)
def request_get_variable(self, py_db, seq, thread_id, frame_id, scope, attrs):
'''
:param scope: 'FRAME' or 'GLOBAL'
'''
int_cmd = InternalGetVariable(seq, thread_id, frame_id, scope, attrs)
py_db.post_internal_command(int_cmd, thread_id)
def request_get_array(self, py_db, seq, roffset, coffset, rows, cols, fmt, thread_id, frame_id, scope, attrs):
int_cmd = InternalGetArray(seq, roffset, coffset, rows, cols, fmt, thread_id, frame_id, scope, attrs)
py_db.post_internal_command(int_cmd, thread_id)
def request_load_full_value(self, py_db, seq, thread_id, frame_id, vars):
int_cmd = InternalLoadFullValue(seq, thread_id, frame_id, vars)
py_db.post_internal_command(int_cmd, thread_id)
def request_get_description(self, py_db, seq, thread_id, frame_id, expression):
py_db.post_method_as_internal_command(
thread_id, internal_get_description, seq, thread_id, frame_id, expression)
def request_get_frame(self, py_db, seq, thread_id, frame_id):
py_db.post_method_as_internal_command(
thread_id, internal_get_frame, seq, thread_id, frame_id)
def to_str(self, s):
'''
-- in py3 raises an error if it's not str already.
'''
if s.__class__ != str:
raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (s, s.__class__))
return s
def filename_to_str(self, filename):
'''
-- in py3 raises an error if it's not str already.
'''
if filename.__class__ != str:
raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (filename, filename.__class__))
return filename
def filename_to_server(self, filename):
filename = self.filename_to_str(filename)
filename = pydevd_file_utils.map_file_to_server(filename)
return filename
class _DummyFrame(object):
'''
Dummy frame to be used with PyDB.apply_files_filter (as we don't really have the
related frame as breakpoints are added before execution).
'''
class _DummyCode(object):
def __init__(self, filename):
self.co_firstlineno = 1
self.co_filename = filename
self.co_name = 'invalid func name '
def __init__(self, filename):
self.f_code = self._DummyCode(filename)
self.f_globals = {}
ADD_BREAKPOINT_NO_ERROR = 0
ADD_BREAKPOINT_FILE_NOT_FOUND = 1
ADD_BREAKPOINT_FILE_EXCLUDED_BY_FILTERS = 2
# This means that the breakpoint couldn't be fully validated (more runtime
# information may be needed).
ADD_BREAKPOINT_LAZY_VALIDATION = 3
ADD_BREAKPOINT_INVALID_LINE = 4
class _AddBreakpointResult(object):
# :see: ADD_BREAKPOINT_NO_ERROR = 0
# :see: ADD_BREAKPOINT_FILE_NOT_FOUND = 1
# :see: ADD_BREAKPOINT_FILE_EXCLUDED_BY_FILTERS = 2
# :see: ADD_BREAKPOINT_LAZY_VALIDATION = 3
# :see: ADD_BREAKPOINT_INVALID_LINE = 4
__slots__ = ['error_code', 'breakpoint_id', 'translated_filename', 'translated_line', 'original_line']
def __init__(self, breakpoint_id, translated_filename, translated_line, original_line):
self.error_code = PyDevdAPI.ADD_BREAKPOINT_NO_ERROR
self.breakpoint_id = breakpoint_id
self.translated_filename = translated_filename
self.translated_line = translated_line
self.original_line = original_line
def add_breakpoint(
self, py_db, original_filename, breakpoint_type, breakpoint_id, line, condition, func_name,
expression, suspend_policy, hit_condition, is_logpoint, adjust_line=False, on_changed_breakpoint_state=None):
'''
:param str original_filename:
Note: must be sent as it was received in the protocol. It may be translated in this
function and its final value will be available in the returned _AddBreakpointResult.
:param str breakpoint_type:
One of: 'python-line', 'django-line', 'jinja2-line'.
:param int breakpoint_id:
:param int line:
Note: it's possible that a new line was actually used. If that's the case its
final value will be available in the returned _AddBreakpointResult.
:param condition:
Either None or the condition to activate the breakpoint.
:param str func_name:
If "None" (str), may hit in any context.
Empty string will hit only top level.
Any other value must match the scope of the method to be matched.
:param str expression:
None or the expression to be evaluated.
:param suspend_policy:
Either "NONE" (to suspend only the current thread when the breakpoint is hit) or
"ALL" (to suspend all threads when a breakpoint is hit).
:param str hit_condition:
An expression where `@HIT@` will be replaced by the number of hits.
i.e.: `@HIT@ == x` or `@HIT@ >= x`
:param bool is_logpoint:
If True and an expression is passed, pydevd will create an io message command with the
result of the evaluation.
:param bool adjust_line:
If True, the breakpoint line should be adjusted if the current line doesn't really
match an executable line (if possible).
:param callable on_changed_breakpoint_state:
This is called when something changed internally on the breakpoint after it was initially
added (for instance, template file_to_line_to_breakpoints could be signaled as invalid initially and later
when the related template is loaded, if the line is valid it could be marked as valid).
The signature for the callback should be:
on_changed_breakpoint_state(breakpoint_id: int, add_breakpoint_result: _AddBreakpointResult)
Note that the add_breakpoint_result should not be modified by the callback (the
implementation may internally reuse the same instance multiple times).
:return _AddBreakpointResult:
'''
assert original_filename.__class__ == str, 'Expected str, found: %s' % (original_filename.__class__,) # i.e.: bytes on py2 and str on py3
original_filename_normalized = pydevd_file_utils.normcase_from_client(original_filename)
pydev_log.debug('Request for breakpoint in: %s line: %s', original_filename, line)
original_line = line
# Parameters to reapply breakpoint.
api_add_breakpoint_params = (original_filename, breakpoint_type, breakpoint_id, line, condition, func_name,
expression, suspend_policy, hit_condition, is_logpoint)
translated_filename = self.filename_to_server(original_filename) # Apply user path mapping.
pydev_log.debug('Breakpoint (after path translation) in: %s line: %s', translated_filename, line)
func_name = self.to_str(func_name)
assert translated_filename.__class__ == str # i.e.: bytes on py2 and str on py3
assert func_name.__class__ == str # i.e.: bytes on py2 and str on py3
# Apply source mapping (i.e.: ipython).
source_mapped_filename, new_line, multi_mapping_applied = py_db.source_mapping.map_to_server(
translated_filename, line)
if multi_mapping_applied:
pydev_log.debug('Breakpoint (after source mapping) in: %s line: %s', source_mapped_filename, new_line)
# Note that source mapping is internal and does not change the resulting filename nor line
# (we want the outside world to see the line in the original file and not in the ipython
# cell, otherwise the editor wouldn't be correct as the returned line is the line to
# which the breakpoint will be moved in the editor).
result = self._AddBreakpointResult(breakpoint_id, original_filename, line, original_line)
# If a multi-mapping was applied, consider it the canonical / source mapped version (translated to ipython cell).
translated_absolute_filename = source_mapped_filename
canonical_normalized_filename = pydevd_file_utils.normcase(source_mapped_filename)
line = new_line
else:
translated_absolute_filename = pydevd_file_utils.absolute_path(translated_filename)
canonical_normalized_filename = pydevd_file_utils.canonical_normalized_path(translated_filename)
if adjust_line and not translated_absolute_filename.startswith('<'):
# Validate file_to_line_to_breakpoints and adjust their positions.
try:
lines = sorted(_get_code_lines(translated_absolute_filename))
except Exception:
pass
else:
if line not in lines:
# Adjust to the first preceding valid line.
idx = bisect.bisect_left(lines, line)
if idx > 0:
line = lines[idx - 1]
result = self._AddBreakpointResult(breakpoint_id, original_filename, line, original_line)
py_db.api_received_breakpoints[(original_filename_normalized, breakpoint_id)] = (canonical_normalized_filename, api_add_breakpoint_params)
if not translated_absolute_filename.startswith('<'):
# Note: if a mapping pointed to a file starting with '<', don't validate.
if not pydevd_file_utils.exists(translated_absolute_filename):
result.error_code = self.ADD_BREAKPOINT_FILE_NOT_FOUND
return result
if (
py_db.is_files_filter_enabled and
not py_db.get_require_module_for_filters() and
py_db.apply_files_filter(self._DummyFrame(translated_absolute_filename), translated_absolute_filename, False)
):
# Note that if `get_require_module_for_filters()` returns False, we don't do this check.
# This is because we don't have the module name given a file at this point (in
# runtime it's gotten from the frame.f_globals).
# An option could be calculate it based on the filename and current sys.path,
# but on some occasions that may be wrong (for instance with `__main__` or if
# the user dynamically changes the PYTHONPATH).
# Note: depending on the use-case, filters may be changed, so, keep on going and add the
# breakpoint even with the error code.
result.error_code = self.ADD_BREAKPOINT_FILE_EXCLUDED_BY_FILTERS
if breakpoint_type == 'python-line':
added_breakpoint = LineBreakpoint(
breakpoint_id, line, condition, func_name, expression, suspend_policy, hit_condition=hit_condition, is_logpoint=is_logpoint)
file_to_line_to_breakpoints = py_db.breakpoints
file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
supported_type = True
else:
add_plugin_breakpoint_result = None
plugin = py_db.get_plugin_lazy_init()
if plugin is not None:
add_plugin_breakpoint_result = plugin.add_breakpoint(
'add_line_breakpoint', py_db, breakpoint_type, canonical_normalized_filename,
breakpoint_id, line, condition, expression, func_name, hit_condition=hit_condition, is_logpoint=is_logpoint,
add_breakpoint_result=result, on_changed_breakpoint_state=on_changed_breakpoint_state)
if add_plugin_breakpoint_result is not None:
supported_type = True
added_breakpoint, file_to_line_to_breakpoints = add_plugin_breakpoint_result
file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
else:
supported_type = False
if not supported_type:
raise NameError(breakpoint_type)
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.debug('Added breakpoint:%s - line:%s - func_name:%s\n', canonical_normalized_filename, line, func_name)
if canonical_normalized_filename in file_to_id_to_breakpoint:
id_to_pybreakpoint = file_to_id_to_breakpoint[canonical_normalized_filename]
else:
id_to_pybreakpoint = file_to_id_to_breakpoint[canonical_normalized_filename] = {}
id_to_pybreakpoint[breakpoint_id] = added_breakpoint
py_db.consolidate_breakpoints(canonical_normalized_filename, id_to_pybreakpoint, file_to_line_to_breakpoints)
if py_db.plugin is not None:
py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()
py_db.plugin.after_breakpoints_consolidated(py_db, canonical_normalized_filename, id_to_pybreakpoint, file_to_line_to_breakpoints)
py_db.on_breakpoints_changed()
return result
def reapply_breakpoints(self, py_db):
'''
Reapplies all the received breakpoints as they were received by the API (so, new
translations are applied).
'''
pydev_log.debug('Reapplying breakpoints.')
values = list(py_db.api_received_breakpoints.values()) # Create a copy with items to reapply.
self.remove_all_breakpoints(py_db, '*')
for val in values:
_new_filename, api_add_breakpoint_params = val
self.add_breakpoint(py_db, *api_add_breakpoint_params)
def remove_all_breakpoints(self, py_db, received_filename):
'''
Removes all the breakpoints from a given file or from all files if received_filename == '*'.
:param str received_filename:
Note: must be sent as it was received in the protocol. It may be translated in this
function.
'''
assert received_filename.__class__ == str # i.e.: bytes on py2 and str on py3
changed = False
lst = [
py_db.file_to_id_to_line_breakpoint,
py_db.file_to_id_to_plugin_breakpoint,
py_db.breakpoints
]
if hasattr(py_db, 'django_breakpoints'):
lst.append(py_db.django_breakpoints)
if hasattr(py_db, 'jinja2_breakpoints'):
lst.append(py_db.jinja2_breakpoints)
if received_filename == '*':
py_db.api_received_breakpoints.clear()
for file_to_id_to_breakpoint in lst:
if file_to_id_to_breakpoint:
file_to_id_to_breakpoint.clear()
changed = True
else:
received_filename_normalized = pydevd_file_utils.normcase_from_client(received_filename)
items = list(py_db.api_received_breakpoints.items()) # Create a copy to remove items.
translated_filenames = []
for key, val in items:
original_filename_normalized, _breakpoint_id = key
if original_filename_normalized == received_filename_normalized:
canonical_normalized_filename, _api_add_breakpoint_params = val
# Note: there can be actually 1:N mappings due to source mapping (i.e.: ipython).
translated_filenames.append(canonical_normalized_filename)
del py_db.api_received_breakpoints[key]
for canonical_normalized_filename in translated_filenames:
for file_to_id_to_breakpoint in lst:
if canonical_normalized_filename in file_to_id_to_breakpoint:
file_to_id_to_breakpoint.pop(canonical_normalized_filename, None)
changed = True
if changed:
py_db.on_breakpoints_changed(removed=True)
def remove_breakpoint(self, py_db, received_filename, breakpoint_type, breakpoint_id):
'''
:param str received_filename:
Note: must be sent as it was received in the protocol. It may be translated in this
function.
:param str breakpoint_type:
One of: 'python-line', 'django-line', 'jinja2-line'.
:param int breakpoint_id:
'''
received_filename_normalized = pydevd_file_utils.normcase_from_client(received_filename)
for key, val in list(py_db.api_received_breakpoints.items()):
original_filename_normalized, existing_breakpoint_id = key
_new_filename, _api_add_breakpoint_params = val
if received_filename_normalized == original_filename_normalized and existing_breakpoint_id == breakpoint_id:
del py_db.api_received_breakpoints[key]
break
else:
pydev_log.info(
'Did not find breakpoint to remove: %s (breakpoint id: %s)', received_filename, breakpoint_id)
file_to_id_to_breakpoint = None
received_filename = self.filename_to_server(received_filename)
canonical_normalized_filename = pydevd_file_utils.canonical_normalized_path(received_filename)
if breakpoint_type == 'python-line':
file_to_line_to_breakpoints = py_db.breakpoints
file_to_id_to_breakpoint = py_db.file_to_id_to_line_breakpoint
elif py_db.plugin is not None:
result = py_db.plugin.get_breakpoints(py_db, breakpoint_type)
if result is not None:
file_to_id_to_breakpoint = py_db.file_to_id_to_plugin_breakpoint
file_to_line_to_breakpoints = result
if file_to_id_to_breakpoint is None:
pydev_log.critical('Error removing breakpoint. Cannot handle breakpoint of type %s', breakpoint_type)
else:
try:
id_to_pybreakpoint = file_to_id_to_breakpoint.get(canonical_normalized_filename, {})
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
existing = id_to_pybreakpoint[breakpoint_id]
pydev_log.info('Removed breakpoint:%s - line:%s - func_name:%s (id: %s)\n' % (
canonical_normalized_filename, existing.line, existing.func_name.encode('utf-8'), breakpoint_id))
del id_to_pybreakpoint[breakpoint_id]
py_db.consolidate_breakpoints(canonical_normalized_filename, id_to_pybreakpoint, file_to_line_to_breakpoints)
if py_db.plugin is not None:
py_db.has_plugin_line_breaks = py_db.plugin.has_line_breaks()
py_db.plugin.after_breakpoints_consolidated(py_db, canonical_normalized_filename, id_to_pybreakpoint, file_to_line_to_breakpoints)
except KeyError:
pydev_log.info("Error removing breakpoint: Breakpoint id not found: %s id: %s. Available ids: %s\n",
canonical_normalized_filename, breakpoint_id, list(id_to_pybreakpoint))
py_db.on_breakpoints_changed(removed=True)
def set_function_breakpoints(self, py_db, function_breakpoints):
function_breakpoint_name_to_breakpoint = {}
for function_breakpoint in function_breakpoints:
function_breakpoint_name_to_breakpoint[function_breakpoint.func_name] = function_breakpoint
py_db.function_breakpoint_name_to_breakpoint = function_breakpoint_name_to_breakpoint
py_db.on_breakpoints_changed()
def request_exec_or_evaluate(
self, py_db, seq, thread_id, frame_id, expression, is_exec, trim_if_too_big, attr_to_set_result):
py_db.post_method_as_internal_command(
thread_id, internal_evaluate_expression,
seq, thread_id, frame_id, expression, is_exec, trim_if_too_big, attr_to_set_result)
def request_exec_or_evaluate_json(
self, py_db, request, thread_id):
py_db.post_method_as_internal_command(
thread_id, internal_evaluate_expression_json, request, thread_id)
def request_set_expression_json(self, py_db, request, thread_id):
py_db.post_method_as_internal_command(
thread_id, internal_set_expression_json, request, thread_id)
def request_console_exec(self, py_db, seq, thread_id, frame_id, expression):
int_cmd = InternalConsoleExec(seq, thread_id, frame_id, expression)
py_db.post_internal_command(int_cmd, thread_id)
def request_load_source(self, py_db, seq, filename):
'''
:param str filename:
Note: must be sent as it was received in the protocol. It may be translated in this
function.
'''
try:
filename = self.filename_to_server(filename)
assert filename.__class__ == str # i.e.: bytes on py2 and str on py3
with tokenize.open(filename) as stream:
source = stream.read()
cmd = py_db.cmd_factory.make_load_source_message(seq, source)
except:
cmd = py_db.cmd_factory.make_error_message(seq, get_exception_traceback_str())
py_db.writer.add_command(cmd)
def get_decompiled_source_from_frame_id(self, py_db, frame_id):
'''
:param py_db:
:param frame_id:
:throws Exception:
If unable to get the frame in the currently paused frames or if some error happened
when decompiling.
'''
variable = py_db.suspended_frames_manager.get_variable(int(frame_id))
frame = variable.value
# Check if it's in the linecache first.
lines = (linecache.getline(frame.f_code.co_filename, i) for i in itertools.count(1))
lines = itertools.takewhile(bool, lines) # empty lines are '\n', EOF is ''
source = ''.join(lines)
if not source:
source = code_to_bytecode_representation(frame.f_code)
return source
def request_load_source_from_frame_id(self, py_db, seq, frame_id):
try:
source = self.get_decompiled_source_from_frame_id(py_db, frame_id)
cmd = py_db.cmd_factory.make_load_source_from_frame_id_message(seq, source)
except:
cmd = py_db.cmd_factory.make_error_message(seq, get_exception_traceback_str())
py_db.writer.add_command(cmd)
def add_python_exception_breakpoint(
self,
py_db,
exception,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries,
):
exception_breakpoint = py_db.add_break_on_exception(
exception,
condition=condition,
expression=expression,
notify_on_handled_exceptions=notify_on_handled_exceptions,
notify_on_unhandled_exceptions=notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions=notify_on_user_unhandled_exceptions,
notify_on_first_raise_only=notify_on_first_raise_only,
ignore_libraries=ignore_libraries,
)
if exception_breakpoint is not None:
py_db.on_breakpoints_changed()
def add_plugins_exception_breakpoint(self, py_db, breakpoint_type, exception):
supported_type = False
plugin = py_db.get_plugin_lazy_init()
if plugin is not None:
supported_type = plugin.add_breakpoint('add_exception_breakpoint', py_db, breakpoint_type, exception)
if supported_type:
py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()
py_db.on_breakpoints_changed()
else:
raise NameError(breakpoint_type)
def remove_python_exception_breakpoint(self, py_db, exception):
try:
cp = py_db.break_on_uncaught_exceptions.copy()
cp.pop(exception, None)
py_db.break_on_uncaught_exceptions = cp
cp = py_db.break_on_caught_exceptions.copy()
cp.pop(exception, None)
py_db.break_on_caught_exceptions = cp
cp = py_db.break_on_user_uncaught_exceptions.copy()
cp.pop(exception, None)
py_db.break_on_user_uncaught_exceptions = cp
except:
pydev_log.exception("Error while removing exception %s", sys.exc_info()[0])
py_db.on_breakpoints_changed(removed=True)
def remove_plugins_exception_breakpoint(self, py_db, exception_type, exception):
# I.e.: no need to initialize lazy (if we didn't have it in the first place, we can't remove
# anything from it anyways).
plugin = py_db.plugin
if plugin is None:
return
supported_type = plugin.remove_exception_breakpoint(py_db, exception_type, exception)
if supported_type:
py_db.has_plugin_exception_breaks = py_db.plugin.has_exception_breaks()
else:
pydev_log.info('No exception of type: %s was previously registered.', exception_type)
py_db.on_breakpoints_changed(removed=True)
def remove_all_exception_breakpoints(self, py_db):
py_db.break_on_uncaught_exceptions = {}
py_db.break_on_caught_exceptions = {}
py_db.break_on_user_uncaught_exceptions = {}
plugin = py_db.plugin
if plugin is not None:
plugin.remove_all_exception_breakpoints(py_db)
py_db.on_breakpoints_changed(removed=True)
def set_project_roots(self, py_db, project_roots):
'''
:param str project_roots:
'''
py_db.set_project_roots(project_roots)
def set_stepping_resumes_all_threads(self, py_db, stepping_resumes_all_threads):
py_db.stepping_resumes_all_threads = stepping_resumes_all_threads
# Add it to the namespace so that it's available as PyDevdAPI.ExcludeFilter
from _pydevd_bundle.pydevd_filtering import ExcludeFilter # noqa
def set_exclude_filters(self, py_db, exclude_filters):
'''
:param list(PyDevdAPI.ExcludeFilter) exclude_filters:
'''
py_db.set_exclude_filters(exclude_filters)
def set_use_libraries_filter(self, py_db, use_libraries_filter):
py_db.set_use_libraries_filter(use_libraries_filter)
def request_get_variable_json(self, py_db, request, thread_id):
'''
:param VariablesRequest request:
'''
py_db.post_method_as_internal_command(
thread_id, internal_get_variable_json, request)
def request_change_variable_json(self, py_db, request, thread_id):
'''
:param SetVariableRequest request:
'''
py_db.post_method_as_internal_command(
thread_id, internal_change_variable_json, request)
def set_dont_trace_start_end_patterns(self, py_db, start_patterns, end_patterns):
# Note: start/end patterns normalized internally.
start_patterns = tuple(pydevd_file_utils.normcase(x) for x in start_patterns)
end_patterns = tuple(pydevd_file_utils.normcase(x) for x in end_patterns)
# After it's set the first time, we can still change it, but we need to reset the
# related caches.
reset_caches = False
dont_trace_start_end_patterns_previously_set = \
py_db.dont_trace_external_files.__name__ == 'custom_dont_trace_external_files'
if not dont_trace_start_end_patterns_previously_set and not start_patterns and not end_patterns:
# If it wasn't set previously and start and end patterns are empty we don't need to do anything.
return
if not py_db.is_cache_file_type_empty():
# i.e.: custom function set in set_dont_trace_start_end_patterns.
if dont_trace_start_end_patterns_previously_set:
reset_caches = py_db.dont_trace_external_files.start_patterns != start_patterns or \
py_db.dont_trace_external_files.end_patterns != end_patterns
else:
reset_caches = True
def custom_dont_trace_external_files(abs_path):
normalized_abs_path = pydevd_file_utils.normcase(abs_path)
return normalized_abs_path.startswith(start_patterns) or normalized_abs_path.endswith(end_patterns)
custom_dont_trace_external_files.start_patterns = start_patterns
custom_dont_trace_external_files.end_patterns = end_patterns
py_db.dont_trace_external_files = custom_dont_trace_external_files
if reset_caches:
py_db.clear_dont_trace_start_end_patterns_caches()
def stop_on_entry(self):
main_thread = pydevd_utils.get_main_thread()
if main_thread is None:
pydev_log.critical('Could not find main thread while setting Stop on Entry.')
else:
info = set_additional_thread_info(main_thread)
info.pydev_original_step_cmd = CMD_STOP_ON_START
info.pydev_step_cmd = CMD_STEP_INTO_MY_CODE
def set_ignore_system_exit_codes(self, py_db, ignore_system_exit_codes):
py_db.set_ignore_system_exit_codes(ignore_system_exit_codes)
SourceMappingEntry = pydevd_source_mapping.SourceMappingEntry
def set_source_mapping(self, py_db, source_filename, mapping):
'''
:param str source_filename:
The filename for the source mapping (bytes on py2 and str on py3).
This filename will be made absolute in this function.
:param list(SourceMappingEntry) mapping:
A list with the source mapping entries to be applied to the given filename.
:return str:
An error message if it was not possible to set the mapping or an empty string if
everything is ok.
'''
source_filename = self.filename_to_server(source_filename)
absolute_source_filename = pydevd_file_utils.absolute_path(source_filename)
for map_entry in mapping:
map_entry.source_filename = absolute_source_filename
error_msg = py_db.source_mapping.set_source_mapping(absolute_source_filename, mapping)
if error_msg:
return error_msg
self.reapply_breakpoints(py_db)
return ''
def set_variable_presentation(self, py_db, variable_presentation):
assert isinstance(variable_presentation, self.VariablePresentation)
py_db.variable_presentation = variable_presentation
def get_ppid(self):
'''
Provides the parent pid (even for older versions of Python on Windows).
'''
ppid = None
try:
ppid = os.getppid()
except AttributeError:
pass
if ppid is None and IS_WINDOWS:
ppid = self._get_windows_ppid()
return ppid
def _get_windows_ppid(self):
this_pid = os.getpid()
for ppid, pid in _list_ppid_and_pid():
if pid == this_pid:
return ppid
return None
def _terminate_child_processes_windows(self, dont_terminate_child_pids):
this_pid = os.getpid()
for _ in range(50): # Try this at most 50 times before giving up.
# Note: we can't kill the process itself with taskkill, so, we
# list immediate children, kill that tree and then exit this process.
children_pids = []
for ppid, pid in _list_ppid_and_pid():
if ppid == this_pid:
if pid not in dont_terminate_child_pids:
children_pids.append(pid)
if not children_pids:
break
else:
for pid in children_pids:
self._call(
['taskkill', '/F', '/PID', str(pid), '/T'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
del children_pids[:]
def _terminate_child_processes_linux_and_mac(self, dont_terminate_child_pids):
this_pid = os.getpid()
def list_children_and_stop_forking(initial_pid, stop=True):
children_pids = []
if stop:
# Ask to stop forking (shouldn't be called for this process, only subprocesses).
self._call(
['kill', '-STOP', str(initial_pid)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
list_popen = self._popen(
['pgrep', '-P', str(initial_pid)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
if list_popen is not None:
stdout, _ = list_popen.communicate()
for line in stdout.splitlines():
line = line.decode('ascii').strip()
if line:
pid = str(line)
if pid in dont_terminate_child_pids:
continue
children_pids.append(pid)
# Recursively get children.
children_pids.extend(list_children_and_stop_forking(pid))
return children_pids
previously_found = set()
for _ in range(50): # Try this at most 50 times before giving up.
children_pids = list_children_and_stop_forking(this_pid, stop=False)
found_new = False
for pid in children_pids:
if pid not in previously_found:
found_new = True
previously_found.add(pid)
self._call(
['kill', '-KILL', str(pid)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
if not found_new:
break
def _popen(self, cmdline, **kwargs):
try:
return subprocess.Popen(cmdline, **kwargs)
except:
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
pydev_log.exception('Error running: %s' % (' '.join(cmdline)))
return None
def _call(self, cmdline, **kwargs):
try:
subprocess.check_call(cmdline, **kwargs)
except:
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
pydev_log.exception('Error running: %s' % (' '.join(cmdline)))
def set_terminate_child_processes(self, py_db, terminate_child_processes):
py_db.terminate_child_processes = terminate_child_processes
def terminate_process(self, py_db):
'''
Terminates the current process (and child processes if the option to also terminate
child processes is enabled).
'''
try:
if py_db.terminate_child_processes:
pydev_log.debug('Terminating child processes.')
if IS_WINDOWS:
self._terminate_child_processes_windows(py_db.dont_terminate_child_pids)
else:
self._terminate_child_processes_linux_and_mac(py_db.dont_terminate_child_pids)
finally:
pydev_log.debug('Exiting process (os._exit(0)).')
os._exit(0)
def _terminate_if_commands_processed(self, py_db):
py_db.dispose_and_kill_all_pydevd_threads()
self.terminate_process(py_db)
def request_terminate_process(self, py_db):
# We mark with a terminate_requested to avoid that paused threads start running
# (we should terminate as is without letting any paused thread run).
py_db.terminate_requested = True
run_as_pydevd_daemon_thread(py_db, self._terminate_if_commands_processed, py_db)
def setup_auto_reload_watcher(self, py_db, enable_auto_reload, watch_dirs, poll_target_time, exclude_patterns, include_patterns):
py_db.setup_auto_reload_watcher(enable_auto_reload, watch_dirs, poll_target_time, exclude_patterns, include_patterns)
def _list_ppid_and_pid():
_TH32CS_SNAPPROCESS = 0x00000002
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_uint32),
("cntUsage", ctypes.c_uint32),
("th32ProcessID", ctypes.c_uint32),
("th32DefaultHeapID", ctypes.c_size_t),
("th32ModuleID", ctypes.c_uint32),
("cntThreads", ctypes.c_uint32),
("th32ParentProcessID", ctypes.c_uint32),
("pcPriClassBase", ctypes.c_long),
("dwFlags", ctypes.c_uint32),
("szExeFile", ctypes.c_char * 260)]
kernel32 = ctypes.windll.kernel32
snapshot = kernel32.CreateToolhelp32Snapshot(_TH32CS_SNAPPROCESS, 0)
ppid_and_pids = []
try:
process_entry = PROCESSENTRY32()
process_entry.dwSize = ctypes.sizeof(PROCESSENTRY32)
if not kernel32.Process32First(ctypes.c_void_p(snapshot), ctypes.byref(process_entry)):
pydev_log.critical('Process32First failed (getting process from CreateToolhelp32Snapshot).')
else:
while True:
ppid_and_pids.append((process_entry.th32ParentProcessID, process_entry.th32ProcessID))
if not kernel32.Process32Next(ctypes.c_void_p(snapshot), ctypes.byref(process_entry)):
break
finally:
kernel32.CloseHandle(snapshot)
return ppid_and_pids
| 50,385 | Python | 43.120841 | 150 | 0.617684 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_collect_bytecode_info.py | import dis
import inspect
import sys
from collections import namedtuple
from _pydev_bundle import pydev_log
from opcode import (EXTENDED_ARG, HAVE_ARGUMENT, cmp_op, hascompare, hasconst,
hasfree, hasjrel, haslocal, hasname, opname)
from io import StringIO
class TryExceptInfo(object):
def __init__(self, try_line, ignore=False):
'''
:param try_line:
:param ignore:
Usually we should ignore any block that's not a try..except
(this can happen for finally blocks, with statements, etc, for
which we create temporary entries).
'''
self.try_line = try_line
self.ignore = ignore
self.except_line = -1
self.except_end_line = -1
self.raise_lines_in_except = []
# Note: these may not be available if generated from source instead of bytecode.
self.except_bytecode_offset = -1
self.except_end_bytecode_offset = -1
def is_line_in_try_block(self, line):
return self.try_line <= line < self.except_line
def is_line_in_except_block(self, line):
return self.except_line <= line <= self.except_end_line
def __str__(self):
lst = [
'{try:',
str(self.try_line),
' except ',
str(self.except_line),
' end block ',
str(self.except_end_line),
]
if self.raise_lines_in_except:
lst.append(' raises: %s' % (', '.join(str(x) for x in self.raise_lines_in_except),))
lst.append('}')
return ''.join(lst)
__repr__ = __str__
class ReturnInfo(object):
def __init__(self, return_line):
self.return_line = return_line
def __str__(self):
return '{return: %s}' % (self.return_line,)
__repr__ = __str__
def _get_line(op_offset_to_line, op_offset, firstlineno, search=False):
op_offset_original = op_offset
while op_offset >= 0:
ret = op_offset_to_line.get(op_offset)
if ret is not None:
return ret - firstlineno
if not search:
return ret
else:
op_offset -= 1
raise AssertionError('Unable to find line for offset: %s.Info: %s' % (
op_offset_original, op_offset_to_line))
def debug(s):
pass
_Instruction = namedtuple('_Instruction', 'opname, opcode, starts_line, argval, is_jump_target, offset, argrepr')
def _iter_as_bytecode_as_instructions_py2(co):
code = co.co_code
op_offset_to_line = dict(dis.findlinestarts(co))
labels = set(dis.findlabels(code))
bytecode_len = len(code)
i = 0
extended_arg = 0
free = None
op_to_name = opname
while i < bytecode_len:
c = code[i]
op = ord(c)
is_jump_target = i in labels
curr_op_name = op_to_name[op]
initial_bytecode_offset = i
i = i + 1
if op < HAVE_ARGUMENT:
yield _Instruction(curr_op_name, op, _get_line(op_offset_to_line, initial_bytecode_offset, 0), None, is_jump_target, initial_bytecode_offset, '')
else:
oparg = ord(code[i]) + ord(code[i + 1]) * 256 + extended_arg
extended_arg = 0
i = i + 2
if op == EXTENDED_ARG:
extended_arg = oparg * 65536
if op in hasconst:
yield _Instruction(curr_op_name, op, _get_line(op_offset_to_line, initial_bytecode_offset, 0), co.co_consts[oparg], is_jump_target, initial_bytecode_offset, repr(co.co_consts[oparg]))
elif op in hasname:
yield _Instruction(curr_op_name, op, _get_line(op_offset_to_line, initial_bytecode_offset, 0), co.co_names[oparg], is_jump_target, initial_bytecode_offset, str(co.co_names[oparg]))
elif op in hasjrel:
argval = i + oparg
yield _Instruction(curr_op_name, op, _get_line(op_offset_to_line, initial_bytecode_offset, 0), argval, is_jump_target, initial_bytecode_offset, "to " + repr(argval))
elif op in haslocal:
yield _Instruction(curr_op_name, op, _get_line(op_offset_to_line, initial_bytecode_offset, 0), co.co_varnames[oparg], is_jump_target, initial_bytecode_offset, str(co.co_varnames[oparg]))
elif op in hascompare:
yield _Instruction(curr_op_name, op, _get_line(op_offset_to_line, initial_bytecode_offset, 0), cmp_op[oparg], is_jump_target, initial_bytecode_offset, cmp_op[oparg])
elif op in hasfree:
if free is None:
free = co.co_cellvars + co.co_freevars
yield _Instruction(curr_op_name, op, _get_line(op_offset_to_line, initial_bytecode_offset, 0), free[oparg], is_jump_target, initial_bytecode_offset, str(free[oparg]))
else:
yield _Instruction(curr_op_name, op, _get_line(op_offset_to_line, initial_bytecode_offset, 0), oparg, is_jump_target, initial_bytecode_offset, str(oparg))
def iter_instructions(co):
if sys.version_info[0] < 3:
iter_in = _iter_as_bytecode_as_instructions_py2(co)
else:
iter_in = dis.Bytecode(co)
iter_in = list(iter_in)
bytecode_to_instruction = {}
for instruction in iter_in:
bytecode_to_instruction[instruction.offset] = instruction
if iter_in:
for instruction in iter_in:
yield instruction
def collect_return_info(co, use_func_first_line=False):
if not hasattr(co, 'co_lnotab'):
return []
if use_func_first_line:
firstlineno = co.co_firstlineno
else:
firstlineno = 0
lst = []
op_offset_to_line = dict(dis.findlinestarts(co))
for instruction in iter_instructions(co):
curr_op_name = instruction.opname
if curr_op_name == 'RETURN_VALUE':
lst.append(ReturnInfo(_get_line(op_offset_to_line, instruction.offset, firstlineno, search=True)))
return lst
if sys.version_info[:2] <= (3, 9):
class _TargetInfo(object):
def __init__(self, except_end_instruction, jump_if_not_exc_instruction=None):
self.except_end_instruction = except_end_instruction
self.jump_if_not_exc_instruction = jump_if_not_exc_instruction
def __str__(self):
msg = ['_TargetInfo(']
msg.append(self.except_end_instruction.opname)
if self.jump_if_not_exc_instruction:
msg.append(' - ')
msg.append(self.jump_if_not_exc_instruction.opname)
msg.append('(')
msg.append(str(self.jump_if_not_exc_instruction.argval))
msg.append(')')
msg.append(')')
return ''.join(msg)
def _get_except_target_info(instructions, exception_end_instruction_index, offset_to_instruction_idx):
next_3 = [j_instruction.opname for j_instruction in instructions[exception_end_instruction_index:exception_end_instruction_index + 3]]
# print('next_3:', [(j_instruction.opname, j_instruction.argval) for j_instruction in instructions[exception_end_instruction_index:exception_end_instruction_index + 3]])
if next_3 == ['POP_TOP', 'POP_TOP', 'POP_TOP']: # try..except without checking exception.
try:
jump_instruction = instructions[exception_end_instruction_index - 1]
if jump_instruction.opname not in ('JUMP_FORWARD', 'JUMP_ABSOLUTE'):
return None
except IndexError:
pass
if jump_instruction.opname == 'JUMP_ABSOLUTE':
# On latest versions of Python 3 the interpreter has a go-backwards step,
# used to show the initial line of a for/while, etc (which is this
# JUMP_ABSOLUTE)... we're not really interested in it, but rather on where
# it points to.
except_end_instruction = instructions[offset_to_instruction_idx[jump_instruction.argval]]
idx = offset_to_instruction_idx[except_end_instruction.argval]
# Search for the POP_EXCEPT which should be at the end of the block.
for pop_except_instruction in reversed(instructions[:idx]):
if pop_except_instruction.opname == 'POP_EXCEPT':
except_end_instruction = pop_except_instruction
return _TargetInfo(except_end_instruction)
else:
return None # i.e.: Continue outer loop
else:
# JUMP_FORWARD
i = offset_to_instruction_idx[jump_instruction.argval]
try:
# i.e.: the jump is to the instruction after the block finishes (so, we need to
# get the previous instruction as that should be the place where the exception
# block finishes).
except_end_instruction = instructions[i - 1]
except:
pydev_log.critical('Error when computing try..except block end.')
return None
return _TargetInfo(except_end_instruction)
elif next_3 and next_3[0] == 'DUP_TOP': # try..except AssertionError.
iter_in = instructions[exception_end_instruction_index + 1:]
for j, jump_if_not_exc_instruction in enumerate(iter_in):
if jump_if_not_exc_instruction.opname == 'JUMP_IF_NOT_EXC_MATCH':
# Python 3.9
except_end_instruction = instructions[offset_to_instruction_idx[jump_if_not_exc_instruction.argval]]
return _TargetInfo(except_end_instruction, jump_if_not_exc_instruction)
elif jump_if_not_exc_instruction.opname == 'COMPARE_OP' and jump_if_not_exc_instruction.argval == 'exception match':
# Python 3.8 and before
try:
next_instruction = iter_in[j + 1]
except:
continue
if next_instruction.opname == 'POP_JUMP_IF_FALSE':
except_end_instruction = instructions[offset_to_instruction_idx[next_instruction.argval]]
return _TargetInfo(except_end_instruction, next_instruction)
else:
return None # i.e.: Continue outer loop
else:
# i.e.: we're not interested in try..finally statements, only try..except.
return None
def collect_try_except_info(co, use_func_first_line=False):
# We no longer have 'END_FINALLY', so, we need to do things differently in Python 3.9
if not hasattr(co, 'co_lnotab'):
return []
if use_func_first_line:
firstlineno = co.co_firstlineno
else:
firstlineno = 0
try_except_info_lst = []
op_offset_to_line = dict(dis.findlinestarts(co))
offset_to_instruction_idx = {}
instructions = list(iter_instructions(co))
for i, instruction in enumerate(instructions):
offset_to_instruction_idx[instruction.offset] = i
for i, instruction in enumerate(instructions):
curr_op_name = instruction.opname
if curr_op_name in ('SETUP_FINALLY', 'SETUP_EXCEPT'): # SETUP_EXCEPT before Python 3.8, SETUP_FINALLY Python 3.8 onwards.
exception_end_instruction_index = offset_to_instruction_idx[instruction.argval]
jump_instruction = instructions[exception_end_instruction_index - 1]
if jump_instruction.opname not in ('JUMP_FORWARD', 'JUMP_ABSOLUTE'):
continue
except_end_instruction = None
indexes_checked = set()
indexes_checked.add(exception_end_instruction_index)
target_info = _get_except_target_info(instructions, exception_end_instruction_index, offset_to_instruction_idx)
while target_info is not None:
# Handle a try..except..except..except.
jump_instruction = target_info.jump_if_not_exc_instruction
except_end_instruction = target_info.except_end_instruction
if jump_instruction is not None:
check_index = offset_to_instruction_idx[jump_instruction.argval]
if check_index in indexes_checked:
break
indexes_checked.add(check_index)
target_info = _get_except_target_info(instructions, check_index, offset_to_instruction_idx)
else:
break
if except_end_instruction is not None:
try_except_info = TryExceptInfo(
_get_line(op_offset_to_line, instruction.offset, firstlineno, search=True),
ignore=False
)
try_except_info.except_bytecode_offset = instruction.argval
try_except_info.except_line = _get_line(
op_offset_to_line,
try_except_info.except_bytecode_offset,
firstlineno,
search=True
)
try_except_info.except_end_bytecode_offset = except_end_instruction.offset
try_except_info.except_end_line = _get_line(op_offset_to_line, except_end_instruction.offset, firstlineno, search=True)
try_except_info_lst.append(try_except_info)
for raise_instruction in instructions[i:offset_to_instruction_idx[try_except_info.except_end_bytecode_offset]]:
if raise_instruction.opname == 'RAISE_VARARGS':
if raise_instruction.argval == 0:
try_except_info.raise_lines_in_except.append(
_get_line(op_offset_to_line, raise_instruction.offset, firstlineno, search=True))
return try_except_info_lst
elif sys.version_info[:2] == (3, 10):
class _TargetInfo(object):
def __init__(self, except_end_instruction, jump_if_not_exc_instruction=None):
self.except_end_instruction = except_end_instruction
self.jump_if_not_exc_instruction = jump_if_not_exc_instruction
def __str__(self):
msg = ['_TargetInfo(']
msg.append(self.except_end_instruction.opname)
if self.jump_if_not_exc_instruction:
msg.append(' - ')
msg.append(self.jump_if_not_exc_instruction.opname)
msg.append('(')
msg.append(str(self.jump_if_not_exc_instruction.argval))
msg.append(')')
msg.append(')')
return ''.join(msg)
def _get_except_target_info(instructions, exception_end_instruction_index, offset_to_instruction_idx):
next_3 = [j_instruction.opname for j_instruction in instructions[exception_end_instruction_index:exception_end_instruction_index + 3]]
# print('next_3:', [(j_instruction.opname, j_instruction.argval) for j_instruction in instructions[exception_end_instruction_index:exception_end_instruction_index + 3]])
if next_3 == ['POP_TOP', 'POP_TOP', 'POP_TOP']: # try..except without checking exception.
# Previously there was a jump which was able to point where the exception would end. This
# is no longer true, now a bare except doesn't really have any indication in the bytecode
# where the end would be expected if the exception wasn't raised, so, we just blindly
# search for a POP_EXCEPT from the current position.
for pop_except_instruction in instructions[exception_end_instruction_index + 3:]:
if pop_except_instruction.opname == 'POP_EXCEPT':
except_end_instruction = pop_except_instruction
return _TargetInfo(except_end_instruction)
elif next_3 and next_3[0] == 'DUP_TOP': # try..except AssertionError.
iter_in = instructions[exception_end_instruction_index + 1:]
for jump_if_not_exc_instruction in iter_in:
if jump_if_not_exc_instruction.opname == 'JUMP_IF_NOT_EXC_MATCH':
# Python 3.9
except_end_instruction = instructions[offset_to_instruction_idx[jump_if_not_exc_instruction.argval]]
return _TargetInfo(except_end_instruction, jump_if_not_exc_instruction)
else:
return None # i.e.: Continue outer loop
else:
# i.e.: we're not interested in try..finally statements, only try..except.
return None
def collect_try_except_info(co, use_func_first_line=False):
# We no longer have 'END_FINALLY', so, we need to do things differently in Python 3.9
if not hasattr(co, 'co_lnotab'):
return []
if use_func_first_line:
firstlineno = co.co_firstlineno
else:
firstlineno = 0
try_except_info_lst = []
op_offset_to_line = dict(dis.findlinestarts(co))
offset_to_instruction_idx = {}
instructions = list(iter_instructions(co))
for i, instruction in enumerate(instructions):
offset_to_instruction_idx[instruction.offset] = i
for i, instruction in enumerate(instructions):
curr_op_name = instruction.opname
if curr_op_name == 'SETUP_FINALLY':
exception_end_instruction_index = offset_to_instruction_idx[instruction.argval]
jump_instruction = instructions[exception_end_instruction_index]
if jump_instruction.opname != 'DUP_TOP':
continue
except_end_instruction = None
indexes_checked = set()
indexes_checked.add(exception_end_instruction_index)
target_info = _get_except_target_info(instructions, exception_end_instruction_index, offset_to_instruction_idx)
while target_info is not None:
# Handle a try..except..except..except.
jump_instruction = target_info.jump_if_not_exc_instruction
except_end_instruction = target_info.except_end_instruction
if jump_instruction is not None:
check_index = offset_to_instruction_idx[jump_instruction.argval]
if check_index in indexes_checked:
break
indexes_checked.add(check_index)
target_info = _get_except_target_info(instructions, check_index, offset_to_instruction_idx)
else:
break
if except_end_instruction is not None:
try_except_info = TryExceptInfo(
_get_line(op_offset_to_line, instruction.offset, firstlineno, search=True),
ignore=False
)
try_except_info.except_bytecode_offset = instruction.argval
try_except_info.except_line = _get_line(
op_offset_to_line,
try_except_info.except_bytecode_offset,
firstlineno,
search=True
)
try_except_info.except_end_bytecode_offset = except_end_instruction.offset
# On Python 3.10 the final line of the except end isn't really correct, rather,
# it's engineered to be the same line of the except and not the end line of the
# block, so, the approach taken is to search for the biggest line between the
# except and the end instruction
except_end_line = -1
start_i = offset_to_instruction_idx[try_except_info.except_bytecode_offset]
end_i = offset_to_instruction_idx[except_end_instruction.offset]
for instruction in instructions[start_i: end_i + 1]:
found_at_line = op_offset_to_line.get(instruction.offset)
if found_at_line is not None and found_at_line > except_end_line:
except_end_line = found_at_line
try_except_info.except_end_line = except_end_line - firstlineno
try_except_info_lst.append(try_except_info)
for raise_instruction in instructions[i:offset_to_instruction_idx[try_except_info.except_end_bytecode_offset]]:
if raise_instruction.opname == 'RAISE_VARARGS':
if raise_instruction.argval == 0:
try_except_info.raise_lines_in_except.append(
_get_line(op_offset_to_line, raise_instruction.offset, firstlineno, search=True))
return try_except_info_lst
elif sys.version_info[:2] >= (3, 11):
def collect_try_except_info(co, use_func_first_line=False):
'''
Note: if the filename is available and we can get the source,
`collect_try_except_info_from_source` is preferred (this is kept as
a fallback for cases where sources aren't available).
'''
return []
import ast as ast_module
class _Visitor(ast_module.NodeVisitor):
def __init__(self):
self.try_except_infos = []
self._stack = []
self._in_except_stack = []
self.max_line = -1
def generic_visit(self, node):
if hasattr(node, 'lineno'):
if node.lineno > self.max_line:
self.max_line = node.lineno
return ast_module.NodeVisitor.generic_visit(self, node)
def visit_Try(self, node):
info = TryExceptInfo(node.lineno, ignore=True)
self._stack.append(info)
self.generic_visit(node)
assert info is self._stack.pop()
if not info.ignore:
self.try_except_infos.insert(0, info)
if sys.version_info[0] < 3:
visit_TryExcept = visit_Try
def visit_ExceptHandler(self, node):
info = self._stack[-1]
info.ignore = False
if info.except_line == -1:
info.except_line = node.lineno
self._in_except_stack.append(info)
self.generic_visit(node)
if hasattr(node, 'end_lineno'):
info.except_end_line = node.end_lineno
else:
info.except_end_line = self.max_line
self._in_except_stack.pop()
if sys.version_info[0] >= 3:
def visit_Raise(self, node):
for info in self._in_except_stack:
if node.exc is None:
info.raise_lines_in_except.append(node.lineno)
self.generic_visit(node)
else:
def visit_Raise(self, node):
for info in self._in_except_stack:
if node.type is None and node.tback is None:
info.raise_lines_in_except.append(node.lineno)
self.generic_visit(node)
def collect_try_except_info_from_source(filename):
with open(filename, 'rb') as stream:
contents = stream.read()
return collect_try_except_info_from_contents(contents, filename)
def collect_try_except_info_from_contents(contents, filename='<unknown>'):
ast = ast_module.parse(contents, filename)
visitor = _Visitor()
visitor.visit(ast)
return visitor.try_except_infos
RESTART_FROM_LOOKAHEAD = object()
SEPARATOR = object()
class _MsgPart(object):
def __init__(self, line, tok):
assert line >= 0
self.line = line
self.tok = tok
@classmethod
def add_to_line_to_contents(cls, obj, line_to_contents, line=None):
if isinstance(obj, (list, tuple)):
for o in obj:
cls.add_to_line_to_contents(o, line_to_contents, line=line)
return
if isinstance(obj, str):
assert line is not None
line = int(line)
lst = line_to_contents.setdefault(line, [])
lst.append(obj)
return
if isinstance(obj, _MsgPart):
if isinstance(obj.tok, (list, tuple)):
cls.add_to_line_to_contents(obj.tok, line_to_contents, line=obj.line)
return
if isinstance(obj.tok, str):
lst = line_to_contents.setdefault(obj.line, [])
lst.append(obj.tok)
return
raise AssertionError("Unhandled: %" % (obj,))
class _Disassembler(object):
def __init__(self, co, firstlineno, level=0):
self.co = co
self.firstlineno = firstlineno
self.level = level
self.instructions = list(iter_instructions(co))
op_offset_to_line = self.op_offset_to_line = dict(dis.findlinestarts(co))
# Update offsets so that all offsets have the line index (and update it based on
# the passed firstlineno).
line_index = co.co_firstlineno - firstlineno
for instruction in self.instructions:
new_line_index = op_offset_to_line.get(instruction.offset)
if new_line_index is not None:
line_index = new_line_index - firstlineno
op_offset_to_line[instruction.offset] = line_index
else:
op_offset_to_line[instruction.offset] = line_index
BIG_LINE_INT = 9999999
SMALL_LINE_INT = -1
def min_line(self, *args):
m = self.BIG_LINE_INT
for arg in args:
if isinstance(arg, (list, tuple)):
m = min(m, self.min_line(*arg))
elif isinstance(arg, _MsgPart):
m = min(m, arg.line)
elif hasattr(arg, 'offset'):
m = min(m, self.op_offset_to_line[arg.offset])
return m
def max_line(self, *args):
m = self.SMALL_LINE_INT
for arg in args:
if isinstance(arg, (list, tuple)):
m = max(m, self.max_line(*arg))
elif isinstance(arg, _MsgPart):
m = max(m, arg.line)
elif hasattr(arg, 'offset'):
m = max(m, self.op_offset_to_line[arg.offset])
return m
def _lookahead(self):
'''
This handles and converts some common constructs from bytecode to actual source code.
It may change the list of instructions.
'''
msg = self._create_msg_part
found = []
fullrepr = None
# Collect all the load instructions
for next_instruction in self.instructions:
if next_instruction.opname in ('LOAD_GLOBAL', 'LOAD_FAST', 'LOAD_CONST', 'LOAD_NAME'):
found.append(next_instruction)
else:
break
if not found:
return None
if next_instruction.opname == 'LOAD_ATTR':
prev_instruction = found[-1]
# Remove the current LOAD_ATTR
assert self.instructions.pop(len(found)) is next_instruction
# Add the LOAD_ATTR to the previous LOAD
self.instructions[len(found) - 1] = _Instruction(
prev_instruction.opname,
prev_instruction.opcode,
prev_instruction.starts_line,
prev_instruction.argval,
False, # prev_instruction.is_jump_target,
prev_instruction.offset,
(
msg(prev_instruction),
msg(prev_instruction, '.'),
msg(next_instruction)
),
)
return RESTART_FROM_LOOKAHEAD
if next_instruction.opname in ('CALL_FUNCTION', 'PRECALL'):
if len(found) == next_instruction.argval + 1:
force_restart = False
delta = 0
else:
force_restart = True
if len(found) > next_instruction.argval + 1:
delta = len(found) - (next_instruction.argval + 1)
else:
return None # This is odd
del_upto = delta + next_instruction.argval + 2 # +2 = NAME / CALL_FUNCTION
if next_instruction.opname == 'PRECALL':
del_upto += 1 # Also remove the CALL right after the PRECALL.
del self.instructions[delta:del_upto]
found = iter(found[delta:])
call_func = next(found)
args = list(found)
fullrepr = [
msg(call_func),
msg(call_func, '('),
]
prev = call_func
for i, arg in enumerate(args):
if i > 0:
fullrepr.append(msg(prev, ', '))
prev = arg
fullrepr.append(msg(arg))
fullrepr.append(msg(prev, ')'))
if force_restart:
self.instructions.insert(delta, _Instruction(
call_func.opname,
call_func.opcode,
call_func.starts_line,
call_func.argval,
False, # call_func.is_jump_target,
call_func.offset,
tuple(fullrepr),
))
return RESTART_FROM_LOOKAHEAD
elif next_instruction.opname == 'BUILD_TUPLE':
if len(found) == next_instruction.argval:
force_restart = False
delta = 0
else:
force_restart = True
if len(found) > next_instruction.argval:
delta = len(found) - (next_instruction.argval)
else:
return None # This is odd
del self.instructions[delta:delta + next_instruction.argval + 1] # +1 = BUILD_TUPLE
found = iter(found[delta:])
args = [instruction for instruction in found]
if args:
first_instruction = args[0]
else:
first_instruction = next_instruction
prev = first_instruction
fullrepr = []
fullrepr.append(msg(prev, '('))
for i, arg in enumerate(args):
if i > 0:
fullrepr.append(msg(prev, ', '))
prev = arg
fullrepr.append(msg(arg))
fullrepr.append(msg(prev, ')'))
if force_restart:
self.instructions.insert(delta, _Instruction(
first_instruction.opname,
first_instruction.opcode,
first_instruction.starts_line,
first_instruction.argval,
False, # first_instruction.is_jump_target,
first_instruction.offset,
tuple(fullrepr),
))
return RESTART_FROM_LOOKAHEAD
if fullrepr is not None and self.instructions:
if self.instructions[0].opname == 'POP_TOP':
self.instructions.pop(0)
if self.instructions[0].opname in ('STORE_FAST', 'STORE_NAME'):
next_instruction = self.instructions.pop(0)
return msg(next_instruction), msg(next_instruction, ' = '), fullrepr
if self.instructions[0].opname == 'RETURN_VALUE':
next_instruction = self.instructions.pop(0)
return msg(next_instruction, 'return ', line=self.min_line(next_instruction, fullrepr)), fullrepr
return fullrepr
def _decorate_jump_target(self, instruction, instruction_repr):
if instruction.is_jump_target:
return ('|', str(instruction.offset), '|', instruction_repr)
return instruction_repr
def _create_msg_part(self, instruction, tok=None, line=None):
dec = self._decorate_jump_target
if line is None or line in (self.BIG_LINE_INT, self.SMALL_LINE_INT):
line = self.op_offset_to_line[instruction.offset]
argrepr = instruction.argrepr
if isinstance(argrepr, str) and argrepr.startswith('NULL + '):
argrepr = argrepr[7:]
return _MsgPart(
line, tok if tok is not None else dec(instruction, argrepr))
def _next_instruction_to_str(self, line_to_contents):
# indent = ''
# if self.level > 0:
# indent += ' ' * self.level
# print(indent, 'handle', self.instructions[0])
if self.instructions:
ret = self._lookahead()
if ret:
return ret
msg = self._create_msg_part
instruction = self.instructions.pop(0)
if instruction.opname in 'RESUME':
return None
if instruction.opname in ('LOAD_GLOBAL', 'LOAD_FAST', 'LOAD_CONST', 'LOAD_NAME'):
next_instruction = self.instructions[0]
if next_instruction.opname in ('STORE_FAST', 'STORE_NAME'):
self.instructions.pop(0)
return (
msg(next_instruction),
msg(next_instruction, ' = '),
msg(instruction))
if next_instruction.opname == 'RETURN_VALUE':
self.instructions.pop(0)
return (msg(instruction, 'return ', line=self.min_line(instruction)), msg(instruction))
if next_instruction.opname == 'RAISE_VARARGS' and next_instruction.argval == 1:
self.instructions.pop(0)
return (msg(instruction, 'raise ', line=self.min_line(instruction)), msg(instruction))
if instruction.opname == 'LOAD_CONST':
if inspect.iscode(instruction.argval):
code_line_to_contents = _Disassembler(
instruction.argval, self.firstlineno, self.level + 1
).build_line_to_contents()
for contents in code_line_to_contents.values():
contents.insert(0, ' ')
for line, contents in code_line_to_contents.items():
line_to_contents.setdefault(line, []).extend(contents)
return msg(instruction, 'LOAD_CONST(code)')
if instruction.opname == 'RAISE_VARARGS':
if instruction.argval == 0:
return msg(instruction, 'raise')
if instruction.opname == 'SETUP_FINALLY':
return msg(instruction, ('try(', instruction.argrepr, '):'))
if instruction.argrepr:
return msg(instruction, (instruction.opname, '(', instruction.argrepr, ')'))
if instruction.argval:
return msg(instruction, '%s{%s}' % (instruction.opname, instruction.argval,))
return msg(instruction, instruction.opname)
def build_line_to_contents(self):
# print('----')
# for instruction in self.instructions:
# print(instruction)
# print('----\n\n')
line_to_contents = {}
instructions = self.instructions
while instructions:
s = self._next_instruction_to_str(line_to_contents)
if s is RESTART_FROM_LOOKAHEAD:
continue
if s is None:
continue
_MsgPart.add_to_line_to_contents(s, line_to_contents)
m = self.max_line(s)
if m != self.SMALL_LINE_INT:
line_to_contents.setdefault(m, []).append(SEPARATOR)
return line_to_contents
def disassemble(self):
line_to_contents = self.build_line_to_contents()
stream = StringIO()
last_line = 0
show_lines = False
for line, contents in sorted(line_to_contents.items()):
while last_line < line - 1:
if show_lines:
stream.write('%s.\n' % (last_line + 1,))
else:
stream.write('\n')
last_line += 1
if show_lines:
stream.write('%s. ' % (line,))
for i, content in enumerate(contents):
if content == SEPARATOR:
if i != len(contents) - 1:
stream.write(', ')
else:
stream.write(content)
stream.write('\n')
last_line = line
return stream.getvalue()
def code_to_bytecode_representation(co, use_func_first_line=False):
'''
A simple disassemble of bytecode.
It does not attempt to provide the full Python source code, rather, it provides a low-level
representation of the bytecode, respecting the lines (so, its target is making the bytecode
easier to grasp and not providing the original source code).
Note that it does show jump locations/targets and converts some common bytecode constructs to
Python code to make it a bit easier to understand.
'''
# Reference for bytecodes:
# https://docs.python.org/3/library/dis.html
if use_func_first_line:
firstlineno = co.co_firstlineno
else:
firstlineno = 0
return _Disassembler(co, firstlineno).disassemble()
| 37,141 | Python | 39.110151 | 202 | 0.562532 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_frame_utils.py | from _pydevd_bundle.pydevd_constants import EXCEPTION_TYPE_USER_UNHANDLED, EXCEPTION_TYPE_UNHANDLED
from _pydev_bundle import pydev_log
class Frame(object):
def __init__(
self,
f_back,
f_fileno,
f_code,
f_locals,
f_globals=None,
f_trace=None):
self.f_back = f_back
self.f_lineno = f_fileno
self.f_code = f_code
self.f_locals = f_locals
self.f_globals = f_globals
self.f_trace = f_trace
if self.f_globals is None:
self.f_globals = {}
class FCode(object):
def __init__(self, name, filename):
self.co_name = name
self.co_filename = filename
self.co_firstlineno = 1
self.co_flags = 0
def add_exception_to_frame(frame, exception_info):
frame.f_locals['__exception__'] = exception_info
def remove_exception_from_frame(frame):
frame.f_locals.pop('__exception__', None)
FILES_WITH_IMPORT_HOOKS = ['pydev_monkey_qt.py', 'pydev_import_hook.py']
def just_raised(trace):
if trace is None:
return False
return trace.tb_next is None
def ignore_exception_trace(trace):
while trace is not None:
filename = trace.tb_frame.f_code.co_filename
if filename in (
'<frozen importlib._bootstrap>', '<frozen importlib._bootstrap_external>'):
# Do not stop on inner exceptions in py3 while importing
return True
# ImportError should appear in a user's code, not inside debugger
for file in FILES_WITH_IMPORT_HOOKS:
if filename.endswith(file):
return True
trace = trace.tb_next
return False
def cached_call(obj, func, *args):
cached_name = '_cached_' + func.__name__
if not hasattr(obj, cached_name):
setattr(obj, cached_name, func(*args))
return getattr(obj, cached_name)
class FramesList(object):
def __init__(self):
self._frames = []
# If available, the line number for the frame will be gotten from this dict,
# otherwise frame.f_lineno will be used (needed for unhandled exceptions as
# the place where we report may be different from the place where it's raised).
self.frame_id_to_lineno = {}
self.exc_type = None
self.exc_desc = None
self.trace_obj = None
# This may be set to set the current frame (for the case where we have
# an unhandled exception where we want to show the root bu we have a different
# executing frame).
self.current_frame = None
# This is to know whether an exception was extracted from a __cause__ or __context__.
self.exc_context_msg = ''
def append(self, frame):
self._frames.append(frame)
def last_frame(self):
return self._frames[-1]
def __len__(self):
return len(self._frames)
def __iter__(self):
return iter(self._frames)
def __repr__(self):
lst = ['FramesList(']
lst.append('\n exc_type: ')
lst.append(str(self.exc_type))
lst.append('\n exc_desc: ')
lst.append(str(self.exc_desc))
lst.append('\n trace_obj: ')
lst.append(str(self.trace_obj))
lst.append('\n current_frame: ')
lst.append(str(self.current_frame))
for frame in self._frames:
lst.append('\n ')
lst.append(repr(frame))
lst.append(',')
lst.append('\n)')
return ''.join(lst)
__str__ = __repr__
class _DummyFrameWrapper(object):
def __init__(self, frame, f_lineno, f_back):
self._base_frame = frame
self.f_lineno = f_lineno
self.f_back = f_back
self.f_trace = None
original_code = frame.f_code
self.f_code = FCode(original_code.co_name , original_code.co_filename)
@property
def f_locals(self):
return self._base_frame.f_locals
@property
def f_globals(self):
return self._base_frame.f_globals
_cause_message = (
"\nThe above exception was the direct cause "
"of the following exception:\n\n")
_context_message = (
"\nDuring handling of the above exception, "
"another exception occurred:\n\n")
def create_frames_list_from_exception_cause(trace_obj, frame, exc_type, exc_desc, memo):
lst = []
msg = '<Unknown context>'
try:
exc_cause = getattr(exc_desc, '__cause__', None)
msg = _cause_message
except Exception:
exc_cause = None
if exc_cause is None:
try:
exc_cause = getattr(exc_desc, '__context__', None)
msg = _context_message
except Exception:
exc_cause = None
if exc_cause is None or id(exc_cause) in memo:
return None
# The traceback module does this, so, let's play safe here too...
memo.add(id(exc_cause))
tb = exc_cause.__traceback__
frames_list = FramesList()
frames_list.exc_type = type(exc_cause)
frames_list.exc_desc = exc_cause
frames_list.trace_obj = tb
frames_list.exc_context_msg = msg
while tb is not None:
# Note: we don't use the actual tb.tb_frame because if the cause of the exception
# uses the same frame object, the id(frame) would be the same and the frame_id_to_lineno
# would be wrong as the same frame needs to appear with 2 different lines.
lst.append((_DummyFrameWrapper(tb.tb_frame, tb.tb_lineno, None), tb.tb_lineno))
tb = tb.tb_next
for tb_frame, tb_lineno in lst:
frames_list.append(tb_frame)
frames_list.frame_id_to_lineno[id(tb_frame)] = tb_lineno
return frames_list
def create_frames_list_from_traceback(trace_obj, frame, exc_type, exc_desc, exception_type=None):
'''
:param trace_obj:
This is the traceback from which the list should be created.
:param frame:
This is the first frame to be considered (i.e.: topmost frame). If None is passed, all
the frames from the traceback are shown (so, None should be passed for unhandled exceptions).
:param exception_type:
If this is an unhandled exception or user unhandled exception, we'll not trim the stack to create from the passed
frame, rather, we'll just mark the frame in the frames list.
'''
lst = []
tb = trace_obj
if tb is not None and tb.tb_frame is not None:
f = tb.tb_frame.f_back
while f is not None:
lst.insert(0, (f, f.f_lineno))
f = f.f_back
while tb is not None:
lst.append((tb.tb_frame, tb.tb_lineno))
tb = tb.tb_next
curr = exc_desc
memo = set()
while True:
initial = curr
try:
curr = getattr(initial, '__cause__', None)
except Exception:
curr = None
if curr is None:
try:
curr = getattr(initial, '__context__', None)
except Exception:
curr = None
if curr is None or id(curr) in memo:
break
# The traceback module does this, so, let's play safe here too...
memo.add(id(curr))
tb = getattr(curr, '__traceback__', None)
while tb is not None:
# Note: we don't use the actual tb.tb_frame because if the cause of the exception
# uses the same frame object, the id(frame) would be the same and the frame_id_to_lineno
# would be wrong as the same frame needs to appear with 2 different lines.
lst.append((_DummyFrameWrapper(tb.tb_frame, tb.tb_lineno, None), tb.tb_lineno))
tb = tb.tb_next
frames_list = None
for tb_frame, tb_lineno in reversed(lst):
if frames_list is None and (
(frame is tb_frame) or
(frame is None) or
(exception_type == EXCEPTION_TYPE_USER_UNHANDLED)
):
frames_list = FramesList()
if frames_list is not None:
frames_list.append(tb_frame)
frames_list.frame_id_to_lineno[id(tb_frame)] = tb_lineno
if frames_list is None and frame is not None:
# Fallback (shouldn't happen in practice).
pydev_log.info('create_frames_list_from_traceback did not find topmost frame in list.')
frames_list = create_frames_list_from_frame(frame)
frames_list.exc_type = exc_type
frames_list.exc_desc = exc_desc
frames_list.trace_obj = trace_obj
if exception_type == EXCEPTION_TYPE_USER_UNHANDLED:
frames_list.current_frame = frame
elif exception_type == EXCEPTION_TYPE_UNHANDLED:
if len(frames_list) > 0:
frames_list.current_frame = frames_list.last_frame()
return frames_list
def create_frames_list_from_frame(frame):
lst = FramesList()
while frame is not None:
lst.append(frame)
frame = frame.f_back
return lst
| 8,923 | Python | 28.452145 | 121 | 0.596212 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_code_to_source.py | """
Decompiler that can be used with the debugger (where statements correctly represent the
line numbers).
Note: this is a work in progress / proof of concept / not ready to be used.
"""
import dis
from _pydevd_bundle.pydevd_collect_bytecode_info import iter_instructions
from _pydev_bundle import pydev_log
import sys
import inspect
from io import StringIO
class _Stack(object):
def __init__(self):
self._contents = []
def push(self, obj):
# print('push', obj)
self._contents.append(obj)
def pop(self):
return self._contents.pop(-1)
INDENT_MARKER = object()
DEDENT_MARKER = object()
_SENTINEL = object()
DEBUG = False
class _Token(object):
def __init__(self, i_line, instruction=None, tok=_SENTINEL, priority=0, after=None, end_of_line=False):
'''
:param i_line:
:param instruction:
:param tok:
:param priority:
:param after:
:param end_of_line:
Marker to signal only after all the other tokens have been written.
'''
self.i_line = i_line
if tok is not _SENTINEL:
self.tok = tok
else:
if instruction is not None:
if inspect.iscode(instruction.argval):
self.tok = ''
else:
self.tok = str(instruction.argval)
else:
raise AssertionError('Either the tok or the instruction is needed.')
self.instruction = instruction
self.priority = priority
self.end_of_line = end_of_line
self._after_tokens = set()
self._after_handler_tokens = set()
if after:
self.mark_after(after)
def mark_after(self, v):
if isinstance(v, _Token):
self._after_tokens.add(v)
elif isinstance(v, _BaseHandler):
self._after_handler_tokens.add(v)
else:
raise AssertionError('Unhandled: %s' % (v,))
def get_after_tokens(self):
ret = self._after_tokens.copy()
for handler in self._after_handler_tokens:
ret.update(handler.tokens)
return ret
def __repr__(self):
return 'Token(%s, after: %s)' % (self.tok, self.get_after_tokens())
__str__ = __repr__
class _Writer(object):
def __init__(self):
self.line_to_contents = {}
self.all_tokens = set()
def get_line(self, line):
lst = self.line_to_contents.get(line)
if lst is None:
lst = self.line_to_contents[line] = []
return lst
def indent(self, line):
self.get_line(line).append(INDENT_MARKER)
def dedent(self, line):
self.get_line(line).append(DEDENT_MARKER)
def write(self, line, token):
if token in self.all_tokens:
return
self.all_tokens.add(token)
assert isinstance(token, _Token)
lst = self.get_line(line)
lst.append(token)
class _BaseHandler(object):
def __init__(self, i_line, instruction, stack, writer, disassembler):
self.i_line = i_line
self.instruction = instruction
self.stack = stack
self.writer = writer
self.disassembler = disassembler
self.tokens = []
self._handle()
def _write_tokens(self):
for token in self.tokens:
self.writer.write(token.i_line, token)
def _handle(self):
raise NotImplementedError(self)
def __repr__(self, *args, **kwargs):
try:
return "%s line:%s" % (self.instruction, self.i_line)
except:
return object.__repr__(self)
__str__ = __repr__
_op_name_to_handler = {}
def _register(cls):
_op_name_to_handler[cls.opname] = cls
return cls
class _BasePushHandler(_BaseHandler):
def _handle(self):
self.stack.push(self)
class _BaseLoadHandler(_BasePushHandler):
def _handle(self):
_BasePushHandler._handle(self)
self.tokens = [_Token(self.i_line, self.instruction)]
@_register
class _LoadBuildClass(_BasePushHandler):
opname = "LOAD_BUILD_CLASS"
@_register
class _LoadConst(_BaseLoadHandler):
opname = "LOAD_CONST"
@_register
class _LoadName(_BaseLoadHandler):
opname = "LOAD_NAME"
@_register
class _LoadGlobal(_BaseLoadHandler):
opname = "LOAD_GLOBAL"
@_register
class _LoadFast(_BaseLoadHandler):
opname = "LOAD_FAST"
@_register
class _GetIter(_BaseHandler):
'''
Implements TOS = iter(TOS).
'''
opname = "GET_ITER"
iter_target = None
def _handle(self):
self.iter_target = self.stack.pop()
self.tokens.extend(self.iter_target.tokens)
self.stack.push(self)
@_register
class _ForIter(_BaseHandler):
'''
TOS is an iterator. Call its __next__() method. If this yields a new value, push it on the stack
(leaving the iterator below it). If the iterator indicates it is exhausted TOS is popped, and
the byte code counter is incremented by delta.
'''
opname = "FOR_ITER"
iter_in = None
def _handle(self):
self.iter_in = self.stack.pop()
self.stack.push(self)
def store_in_name(self, store_name):
for_token = _Token(self.i_line, None, 'for ')
self.tokens.append(for_token)
prev = for_token
t_name = _Token(store_name.i_line, store_name.instruction, after=prev)
self.tokens.append(t_name)
prev = t_name
in_token = _Token(store_name.i_line, None, ' in ', after=prev)
self.tokens.append(in_token)
prev = in_token
max_line = store_name.i_line
if self.iter_in:
for t in self.iter_in.tokens:
t.mark_after(prev)
max_line = max(max_line, t.i_line)
prev = t
self.tokens.extend(self.iter_in.tokens)
colon_token = _Token(self.i_line, None, ':', after=prev)
self.tokens.append(colon_token)
prev = for_token
self._write_tokens()
@_register
class _StoreName(_BaseHandler):
'''
Implements name = TOS. namei is the index of name in the attribute co_names of the code object.
The compiler tries to use STORE_FAST or STORE_GLOBAL if possible.
'''
opname = "STORE_NAME"
def _handle(self):
v = self.stack.pop()
if isinstance(v, _ForIter):
v.store_in_name(self)
else:
if not isinstance(v, _MakeFunction) or v.is_lambda:
line = self.i_line
for t in v.tokens:
line = min(line, t.i_line)
t_name = _Token(line, self.instruction)
t_equal = _Token(line, None, '=', after=t_name)
self.tokens.append(t_name)
self.tokens.append(t_equal)
for t in v.tokens:
t.mark_after(t_equal)
self.tokens.extend(v.tokens)
self._write_tokens()
@_register
class _ReturnValue(_BaseHandler):
"""
Returns with TOS to the caller of the function.
"""
opname = "RETURN_VALUE"
def _handle(self):
v = self.stack.pop()
return_token = _Token(self.i_line, None, 'return ', end_of_line=True)
self.tokens.append(return_token)
for token in v.tokens:
token.mark_after(return_token)
self.tokens.extend(v.tokens)
self._write_tokens()
@_register
class _CallFunction(_BaseHandler):
"""
CALL_FUNCTION(argc)
Calls a callable object with positional arguments. argc indicates the number of positional
arguments. The top of the stack contains positional arguments, with the right-most argument
on top. Below the arguments is a callable object to call. CALL_FUNCTION pops all arguments
and the callable object off the stack, calls the callable object with those arguments, and
pushes the return value returned by the callable object.
Changed in version 3.6: This opcode is used only for calls with positional arguments.
"""
opname = "CALL_FUNCTION"
def _handle(self):
args = []
for _i in range(self.instruction.argval + 1):
arg = self.stack.pop()
args.append(arg)
it = reversed(args)
name = next(it)
max_line = name.i_line
for t in name.tokens:
self.tokens.append(t)
tok_open_parens = _Token(name.i_line, None, '(', after=name)
self.tokens.append(tok_open_parens)
prev = tok_open_parens
for i, arg in enumerate(it):
for t in arg.tokens:
t.mark_after(name)
t.mark_after(prev)
max_line = max(max_line, t.i_line)
self.tokens.append(t)
prev = arg
if i > 0:
comma_token = _Token(prev.i_line, None, ',', after=prev)
self.tokens.append(comma_token)
prev = comma_token
tok_close_parens = _Token(max_line, None, ')', after=prev)
self.tokens.append(tok_close_parens)
self._write_tokens()
self.stack.push(self)
@_register
class _MakeFunctionPy3(_BaseHandler):
"""
Pushes a new function object on the stack. From bottom to top, the consumed stack must consist
of values if the argument carries a specified flag value
0x01 a tuple of default values for positional-only and positional-or-keyword parameters in positional order
0x02 a dictionary of keyword-only parameters' default values
0x04 an annotation dictionary
0x08 a tuple containing cells for free variables, making a closure
the code associated with the function (at TOS1)
the qualified name of the function (at TOS)
"""
opname = "MAKE_FUNCTION"
is_lambda = False
def _handle(self):
stack = self.stack
self.qualified_name = stack.pop()
self.code = stack.pop()
default_node = None
if self.instruction.argval & 0x01:
default_node = stack.pop()
is_lambda = self.is_lambda = '<lambda>' in [x.tok for x in self.qualified_name.tokens]
if not is_lambda:
def_token = _Token(self.i_line, None, 'def ')
self.tokens.append(def_token)
for token in self.qualified_name.tokens:
self.tokens.append(token)
if not is_lambda:
token.mark_after(def_token)
prev = token
open_parens_token = _Token(self.i_line, None, '(', after=prev)
self.tokens.append(open_parens_token)
prev = open_parens_token
code = self.code.instruction.argval
if default_node:
defaults = ([_SENTINEL] * (len(code.co_varnames) - len(default_node.instruction.argval))) + list(default_node.instruction.argval)
else:
defaults = [_SENTINEL] * len(code.co_varnames)
for i, arg in enumerate(code.co_varnames):
if i > 0:
comma_token = _Token(prev.i_line, None, ', ', after=prev)
self.tokens.append(comma_token)
prev = comma_token
arg_token = _Token(self.i_line, None, arg, after=prev)
self.tokens.append(arg_token)
default = defaults[i]
if default is not _SENTINEL:
eq_token = _Token(default_node.i_line, None, '=', after=prev)
self.tokens.append(eq_token)
prev = eq_token
default_token = _Token(default_node.i_line, None, str(default), after=prev)
self.tokens.append(default_token)
prev = default_token
tok_close_parens = _Token(prev.i_line, None, '):', after=prev)
self.tokens.append(tok_close_parens)
self._write_tokens()
stack.push(self)
self.writer.indent(prev.i_line + 1)
self.writer.dedent(max(self.disassembler.merge_code(code)))
_MakeFunction = _MakeFunctionPy3
def _print_after_info(line_contents, stream=None):
if stream is None:
stream = sys.stdout
for token in line_contents:
after_tokens = token.get_after_tokens()
if after_tokens:
s = '%s after: %s\n' % (
repr(token.tok),
('"' + '", "'.join(t.tok for t in token.get_after_tokens()) + '"'))
stream.write(s)
else:
stream.write('%s (NO REQUISITES)' % repr(token.tok))
def _compose_line_contents(line_contents, previous_line_tokens):
lst = []
handled = set()
add_to_end_of_line = []
delete_indexes = []
for i, token in enumerate(line_contents):
if token.end_of_line:
add_to_end_of_line.append(token)
delete_indexes.append(i)
for i in reversed(delete_indexes):
del line_contents[i]
del delete_indexes
while line_contents:
added = False
delete_indexes = []
for i, token in enumerate(line_contents):
after_tokens = token.get_after_tokens()
for after in after_tokens:
if after not in handled and after not in previous_line_tokens:
break
else:
added = True
previous_line_tokens.add(token)
handled.add(token)
lst.append(token.tok)
delete_indexes.append(i)
for i in reversed(delete_indexes):
del line_contents[i]
if not added:
if add_to_end_of_line:
line_contents.extend(add_to_end_of_line)
del add_to_end_of_line[:]
continue
# Something is off, let's just add as is.
for token in line_contents:
if token not in handled:
lst.append(token.tok)
stream = StringIO()
_print_after_info(line_contents, stream)
pydev_log.critical('Error. After markers are not correct:\n%s', stream.getvalue())
break
return ''.join(lst)
class _PyCodeToSource(object):
def __init__(self, co, memo=None):
if memo is None:
memo = {}
self.memo = memo
self.co = co
self.instructions = list(iter_instructions(co))
self.stack = _Stack()
self.writer = _Writer()
def _process_next(self, i_line):
instruction = self.instructions.pop(0)
handler_class = _op_name_to_handler.get(instruction.opname)
if handler_class is not None:
s = handler_class(i_line, instruction, self.stack, self.writer, self)
if DEBUG:
print(s)
else:
if DEBUG:
print("UNHANDLED", instruction)
def build_line_to_contents(self):
co = self.co
op_offset_to_line = dict(dis.findlinestarts(co))
curr_line_index = 0
instructions = self.instructions
while instructions:
instruction = instructions[0]
new_line_index = op_offset_to_line.get(instruction.offset)
if new_line_index is not None:
if new_line_index is not None:
curr_line_index = new_line_index
self._process_next(curr_line_index)
return self.writer.line_to_contents
def merge_code(self, code):
if DEBUG:
print('merge code ----')
# for d in dir(code):
# if not d.startswith('_'):
# print(d, getattr(code, d))
line_to_contents = _PyCodeToSource(code, self.memo).build_line_to_contents()
lines = []
for line, contents in sorted(line_to_contents.items()):
lines.append(line)
self.writer.get_line(line).extend(contents)
if DEBUG:
print('end merge code ----')
return lines
def disassemble(self):
show_lines = False
line_to_contents = self.build_line_to_contents()
stream = StringIO()
last_line = 0
indent = ''
previous_line_tokens = set()
for i_line, contents in sorted(line_to_contents.items()):
while last_line < i_line - 1:
if show_lines:
stream.write(u"%s.\n" % (last_line + 1,))
else:
stream.write(u"\n")
last_line += 1
line_contents = []
dedents_found = 0
for part in contents:
if part is INDENT_MARKER:
if DEBUG:
print('found indent', i_line)
indent += ' '
continue
if part is DEDENT_MARKER:
if DEBUG:
print('found dedent', i_line)
dedents_found += 1
continue
line_contents.append(part)
s = indent + _compose_line_contents(line_contents, previous_line_tokens)
if show_lines:
stream.write(u"%s. %s\n" % (i_line, s))
else:
stream.write(u"%s\n" % s)
if dedents_found:
indent = indent[:-(4 * dedents_found)]
last_line = i_line
return stream.getvalue()
def code_obj_to_source(co):
"""
Converts a code object to source code to provide a suitable representation for the compiler when
the actual source code is not found.
This is a work in progress / proof of concept / not ready to be used.
"""
ret = _PyCodeToSource(co).disassemble()
if DEBUG:
print(ret)
return ret
| 17,622 | Python | 27.795752 | 141 | 0.563046 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_save_locals.py | """
Utility for saving locals.
"""
import sys
try:
import types
frame_type = types.FrameType
except:
frame_type = type(sys._getframe())
def is_save_locals_available():
return save_locals_impl is not None
def save_locals(frame):
"""
Copy values from locals_dict into the fast stack slots in the given frame.
Note: the 'save_locals' branch had a different approach wrapping the frame (much more code, but it gives ideas
on how to save things partially, not the 'whole' locals).
"""
if not isinstance(frame, frame_type):
# Fix exception when changing Django variable (receiving DjangoTemplateFrame)
return
if save_locals_impl is not None:
try:
save_locals_impl(frame)
except:
pass
def make_save_locals_impl():
"""
Factory for the 'save_locals_impl' method. This may seem like a complicated pattern but it is essential that the method is created at
module load time. Inner imports after module load time would cause an occasional debugger deadlock due to the importer lock and debugger
lock being taken in different order in different threads.
"""
try:
if '__pypy__' in sys.builtin_module_names:
import __pypy__ # @UnresolvedImport
save_locals = __pypy__.locals_to_fast
except:
pass
else:
if '__pypy__' in sys.builtin_module_names:
def save_locals_pypy_impl(frame):
save_locals(frame)
return save_locals_pypy_impl
try:
import ctypes
locals_to_fast = ctypes.pythonapi.PyFrame_LocalsToFast
except:
pass
else:
def save_locals_ctypes_impl(frame):
locals_to_fast(ctypes.py_object(frame), ctypes.c_int(0))
return save_locals_ctypes_impl
return None
save_locals_impl = make_save_locals_impl()
def update_globals_and_locals(updated_globals, initial_globals, frame):
# We don't have the locals and passed all in globals, so, we have to
# manually choose how to update the variables.
#
# Note that the current implementation is a bit tricky: it does work in general
# but if we do something as 'some_var = 10' and 'some_var' is already defined to have
# the value '10' in the globals, we won't actually put that value in the locals
# (which means that the frame locals won't be updated).
# Still, the approach to have a single namespace was chosen because it was the only
# one that enabled creating and using variables during the same evaluation.
assert updated_globals is not None
f_locals = None
for key, val in updated_globals.items():
if initial_globals.get(key) is not val:
if f_locals is None:
# Note: we call f_locals only once because each time
# we call it the values may be reset.
f_locals = frame.f_locals
f_locals[key] = val
if f_locals is not None:
save_locals(frame)
| 3,020 | Python | 30.14433 | 140 | 0.647682 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_custom_frames.py | from _pydevd_bundle.pydevd_constants import get_current_thread_id, Null, ForkSafeLock
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame
from _pydev_bundle._pydev_saved_modules import thread, threading
import sys
from _pydev_bundle import pydev_log
DEBUG = False
class CustomFramesContainer:
# Actual Values initialized later on.
custom_frames_lock = None # : :type custom_frames_lock: threading.Lock
custom_frames = None
_next_frame_id = None
_py_db_command_thread_event = None
def custom_frames_container_init(): # Note: no staticmethod on jython 2.1 (so, use free-function)
CustomFramesContainer.custom_frames_lock = ForkSafeLock()
# custom_frames can only be accessed if properly locked with custom_frames_lock!
# Key is a string identifying the frame (as well as the thread it belongs to).
# Value is a CustomFrame.
#
CustomFramesContainer.custom_frames = {}
# Only to be used in this module
CustomFramesContainer._next_frame_id = 0
# This is the event we must set to release an internal process events. It's later set by the actual debugger
# when we do create the debugger.
CustomFramesContainer._py_db_command_thread_event = Null()
# Initialize it the first time (it may be reinitialized later on when dealing with a fork).
custom_frames_container_init()
class CustomFrame:
def __init__(self, name, frame, thread_id):
# 0 = string with the representation of that frame
self.name = name
# 1 = the frame to show
self.frame = frame
# 2 = an integer identifying the last time the frame was changed.
self.mod_time = 0
# 3 = the thread id of the given frame
self.thread_id = thread_id
def add_custom_frame(frame, name, thread_id):
'''
It's possible to show paused frames by adding a custom frame through this API (it's
intended to be used for coroutines, but could potentially be used for generators too).
:param frame:
The topmost frame to be shown paused when a thread with thread.ident == thread_id is paused.
:param name:
The name to be shown for the custom thread in the UI.
:param thread_id:
The thread id to which this frame is related (must match thread.ident).
:return: str
Returns the custom thread id which will be used to show the given frame paused.
'''
with CustomFramesContainer.custom_frames_lock:
curr_thread_id = get_current_thread_id(threading.current_thread())
next_id = CustomFramesContainer._next_frame_id = CustomFramesContainer._next_frame_id + 1
# Note: the frame id kept contains an id and thread information on the thread where the frame was added
# so that later on we can check if the frame is from the current thread by doing frame_id.endswith('|'+thread_id).
frame_custom_thread_id = '__frame__:%s|%s' % (next_id, curr_thread_id)
if DEBUG:
sys.stderr.write('add_custom_frame: %s (%s) %s %s\n' % (
frame_custom_thread_id, get_abs_path_real_path_and_base_from_frame(frame)[-1], frame.f_lineno, frame.f_code.co_name))
CustomFramesContainer.custom_frames[frame_custom_thread_id] = CustomFrame(name, frame, thread_id)
CustomFramesContainer._py_db_command_thread_event.set()
return frame_custom_thread_id
def update_custom_frame(frame_custom_thread_id, frame, thread_id, name=None):
with CustomFramesContainer.custom_frames_lock:
if DEBUG:
sys.stderr.write('update_custom_frame: %s\n' % frame_custom_thread_id)
try:
old = CustomFramesContainer.custom_frames[frame_custom_thread_id]
if name is not None:
old.name = name
old.mod_time += 1
old.thread_id = thread_id
except:
sys.stderr.write('Unable to get frame to replace: %s\n' % (frame_custom_thread_id,))
pydev_log.exception()
CustomFramesContainer._py_db_command_thread_event.set()
def remove_custom_frame(frame_custom_thread_id):
with CustomFramesContainer.custom_frames_lock:
if DEBUG:
sys.stderr.write('remove_custom_frame: %s\n' % frame_custom_thread_id)
CustomFramesContainer.custom_frames.pop(frame_custom_thread_id, None)
CustomFramesContainer._py_db_command_thread_event.set()
| 4,399 | Python | 36.606837 | 133 | 0.677881 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_safe_repr.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
# Gotten from ptvsd for supporting the format expected there.
import sys
from _pydevd_bundle.pydevd_constants import IS_PY36_OR_GREATER
import locale
from _pydev_bundle import pydev_log
class SafeRepr(object):
# Can be used to override the encoding from locale.getpreferredencoding()
locale_preferred_encoding = None
# Can be used to override the encoding used for sys.stdout.encoding
sys_stdout_encoding = None
# String types are truncated to maxstring_outer when at the outer-
# most level, and truncated to maxstring_inner characters inside
# collections.
maxstring_outer = 2 ** 16
maxstring_inner = 30
string_types = (str, bytes)
bytes = bytes
set_info = (set, '{', '}', False)
frozenset_info = (frozenset, 'frozenset({', '})', False)
int_types = (int,)
long_iter_types = (list, tuple, bytearray, range,
dict, set, frozenset)
# Collection types are recursively iterated for each limit in
# maxcollection.
maxcollection = (15, 10)
# Specifies type, prefix string, suffix string, and whether to include a
# comma if there is only one element. (Using a sequence rather than a
# mapping because we use isinstance() to determine the matching type.)
collection_types = [
(tuple, '(', ')', True),
(list, '[', ']', False),
frozenset_info,
set_info,
]
try:
from collections import deque
collection_types.append((deque, 'deque([', '])', False))
except Exception:
pass
# type, prefix string, suffix string, item prefix string,
# item key/value separator, item suffix string
dict_types = [(dict, '{', '}', '', ': ', '')]
try:
from collections import OrderedDict
dict_types.append((OrderedDict, 'OrderedDict([', '])', '(', ', ', ')'))
except Exception:
pass
# All other types are treated identically to strings, but using
# different limits.
maxother_outer = 2 ** 16
maxother_inner = 30
convert_to_hex = False
raw_value = False
def __call__(self, obj):
'''
:param object obj:
The object for which we want a representation.
:return str:
Returns bytes encoded as utf-8 on py2 and str on py3.
'''
try:
return ''.join(self._repr(obj, 0))
except Exception:
try:
return 'An exception was raised: %r' % sys.exc_info()[1]
except Exception:
return 'An exception was raised'
def _repr(self, obj, level):
'''Returns an iterable of the parts in the final repr string.'''
try:
obj_repr = type(obj).__repr__
except Exception:
obj_repr = None
def has_obj_repr(t):
r = t.__repr__
try:
return obj_repr == r
except Exception:
return obj_repr is r
for t, prefix, suffix, comma in self.collection_types:
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_iter(obj, level, prefix, suffix, comma)
for t, prefix, suffix, item_prefix, item_sep, item_suffix in self.dict_types: # noqa
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_dict(obj, level, prefix, suffix,
item_prefix, item_sep, item_suffix)
for t in self.string_types:
if isinstance(obj, t) and has_obj_repr(t):
return self._repr_str(obj, level)
if self._is_long_iter(obj):
return self._repr_long_iter(obj)
return self._repr_other(obj, level)
# Determines whether an iterable exceeds the limits set in
# maxlimits, and is therefore unsafe to repr().
def _is_long_iter(self, obj, level=0):
try:
# Strings have their own limits (and do not nest). Because
# they don't have __iter__ in 2.x, this check goes before
# the next one.
if isinstance(obj, self.string_types):
return len(obj) > self.maxstring_inner
# If it's not an iterable (and not a string), it's fine.
if not hasattr(obj, '__iter__'):
return False
# If it's not an instance of these collection types then it
# is fine. Note: this is a fix for
# https://github.com/Microsoft/ptvsd/issues/406
if not isinstance(obj, self.long_iter_types):
return False
# Iterable is its own iterator - this is a one-off iterable
# like generator or enumerate(). We can't really count that,
# but repr() for these should not include any elements anyway,
# so we can treat it the same as non-iterables.
if obj is iter(obj):
return False
# range reprs fine regardless of length.
if isinstance(obj, range):
return False
# numpy and scipy collections (ndarray etc) have
# self-truncating repr, so they're always safe.
try:
module = type(obj).__module__.partition('.')[0]
if module in ('numpy', 'scipy'):
return False
except Exception:
pass
# Iterables that nest too deep are considered long.
if level >= len(self.maxcollection):
return True
# It is too long if the length exceeds the limit, or any
# of its elements are long iterables.
if hasattr(obj, '__len__'):
try:
size = len(obj)
except Exception:
size = None
if size is not None and size > self.maxcollection[level]:
return True
return any((self._is_long_iter(item, level + 1) for item in obj)) # noqa
return any(i > self.maxcollection[level] or self._is_long_iter(item, level + 1) for i, item in enumerate(obj)) # noqa
except Exception:
# If anything breaks, assume the worst case.
return True
def _repr_iter(self, obj, level, prefix, suffix,
comma_after_single_element=False):
yield prefix
if level >= len(self.maxcollection):
yield '...'
else:
count = self.maxcollection[level]
yield_comma = False
for item in obj:
if yield_comma:
yield ', '
yield_comma = True
count -= 1
if count <= 0:
yield '...'
break
for p in self._repr(item, 100 if item is obj else level + 1):
yield p
else:
if comma_after_single_element:
if count == self.maxcollection[level] - 1:
yield ','
yield suffix
def _repr_long_iter(self, obj):
try:
length = hex(len(obj)) if self.convert_to_hex else len(obj)
obj_repr = '<%s, len() = %s>' % (type(obj).__name__, length)
except Exception:
try:
obj_repr = '<' + type(obj).__name__ + '>'
except Exception:
obj_repr = '<no repr available for object>'
yield obj_repr
def _repr_dict(self, obj, level, prefix, suffix,
item_prefix, item_sep, item_suffix):
if not obj:
yield prefix + suffix
return
if level >= len(self.maxcollection):
yield prefix + '...' + suffix
return
yield prefix
count = self.maxcollection[level]
yield_comma = False
if IS_PY36_OR_GREATER:
# On Python 3.6 (onwards) dictionaries now keep
# insertion order.
sorted_keys = list(obj)
else:
try:
sorted_keys = sorted(obj)
except Exception:
sorted_keys = list(obj)
for key in sorted_keys:
if yield_comma:
yield ', '
yield_comma = True
count -= 1
if count <= 0:
yield '...'
break
yield item_prefix
for p in self._repr(key, level + 1):
yield p
yield item_sep
try:
item = obj[key]
except Exception:
yield '<?>'
else:
for p in self._repr(item, 100 if item is obj else level + 1):
yield p
yield item_suffix
yield suffix
def _repr_str(self, obj, level):
try:
if self.raw_value:
# For raw value retrieval, ignore all limits.
if isinstance(obj, bytes):
yield obj.decode('latin-1')
else:
yield obj
return
limit_inner = self.maxother_inner
limit_outer = self.maxother_outer
limit = limit_inner if level > 0 else limit_outer
if len(obj) <= limit:
# Note that we check the limit before doing the repr (so, the final string
# may actually be considerably bigger on some cases, as besides
# the additional u, b, ' chars, some chars may be escaped in repr, so
# even a single char such as \U0010ffff may end up adding more
# chars than expected).
yield self._convert_to_unicode_or_bytes_repr(repr(obj))
return
# Slightly imprecise calculations - we may end up with a string that is
# up to 6 characters longer than limit. If you need precise formatting,
# you are using the wrong class.
left_count, right_count = max(1, int(2 * limit / 3)), max(1, int(limit / 3)) # noqa
# Important: only do repr after slicing to avoid duplicating a byte array that could be
# huge.
# Note: we don't deal with high surrogates here because we're not dealing with the
# repr() of a random object.
# i.e.: A high surrogate unicode char may be splitted on Py2, but as we do a `repr`
# afterwards, that's ok.
# Also, we just show the unicode/string/bytes repr() directly to make clear what the
# input type was (so, on py2 a unicode would start with u' and on py3 a bytes would
# start with b').
part1 = obj[:left_count]
part1 = repr(part1)
part1 = part1[:part1.rindex("'")] # Remove the last '
part2 = obj[-right_count:]
part2 = repr(part2)
part2 = part2[part2.index("'") + 1:] # Remove the first ' (and possibly u or b).
yield part1
yield '...'
yield part2
except:
# This shouldn't really happen, but let's play it safe.
pydev_log.exception('Error getting string representation to show.')
for part in self._repr_obj(obj, level,
self.maxother_inner, self.maxother_outer):
yield part
def _repr_other(self, obj, level):
return self._repr_obj(obj, level,
self.maxother_inner, self.maxother_outer)
def _repr_obj(self, obj, level, limit_inner, limit_outer):
try:
if self.raw_value:
# For raw value retrieval, ignore all limits.
if isinstance(obj, bytes):
yield obj.decode('latin-1')
return
try:
mv = memoryview(obj)
except Exception:
yield self._convert_to_unicode_or_bytes_repr(repr(obj))
return
else:
# Map bytes to Unicode codepoints with same values.
yield mv.tobytes().decode('latin-1')
return
elif self.convert_to_hex and isinstance(obj, self.int_types):
obj_repr = hex(obj)
else:
obj_repr = repr(obj)
except Exception:
try:
obj_repr = object.__repr__(obj)
except Exception:
try:
obj_repr = '<no repr available for ' + type(obj).__name__ + '>' # noqa
except Exception:
obj_repr = '<no repr available for object>'
limit = limit_inner if level > 0 else limit_outer
if limit >= len(obj_repr):
yield self._convert_to_unicode_or_bytes_repr(obj_repr)
return
# Slightly imprecise calculations - we may end up with a string that is
# up to 3 characters longer than limit. If you need precise formatting,
# you are using the wrong class.
left_count, right_count = max(1, int(2 * limit / 3)), max(1, int(limit / 3)) # noqa
yield obj_repr[:left_count]
yield '...'
yield obj_repr[-right_count:]
def _convert_to_unicode_or_bytes_repr(self, obj_repr):
return obj_repr
def _bytes_as_unicode_if_possible(self, obj_repr):
# We try to decode with 3 possible encoding (sys.stdout.encoding,
# locale.getpreferredencoding() and 'utf-8). If no encoding can decode
# the input, we return the original bytes.
try_encodings = []
encoding = self.sys_stdout_encoding or getattr(sys.stdout, 'encoding', '')
if encoding:
try_encodings.append(encoding.lower())
preferred_encoding = self.locale_preferred_encoding or locale.getpreferredencoding()
if preferred_encoding:
preferred_encoding = preferred_encoding.lower()
if preferred_encoding not in try_encodings:
try_encodings.append(preferred_encoding)
if 'utf-8' not in try_encodings:
try_encodings.append('utf-8')
for encoding in try_encodings:
try:
return obj_repr.decode(encoding)
except UnicodeDecodeError:
pass
return obj_repr # Return the original version (in bytes)
| 14,554 | Python | 35.3875 | 130 | 0.535592 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_dont_trace.py | '''
Support for a tag that allows skipping over functions while debugging.
'''
import linecache
import re
# To suppress tracing a method, add the tag @DontTrace
# to a comment either preceding or on the same line as
# the method definition
#
# E.g.:
# #@DontTrace
# def test1():
# pass
#
# ... or ...
#
# def test2(): #@DontTrace
# pass
DONT_TRACE_TAG = '@DontTrace'
# Regular expression to match a decorator (at the beginning
# of a line).
RE_DECORATOR = re.compile(r'^\s*@')
# Mapping from code object to bool.
# If the key exists, the value is the cached result of should_trace_hook
_filename_to_ignored_lines = {}
def default_should_trace_hook(frame, absolute_filename):
'''
Return True if this frame should be traced, False if tracing should be blocked.
'''
# First, check whether this code object has a cached value
ignored_lines = _filename_to_ignored_lines.get(absolute_filename)
if ignored_lines is None:
# Now, look up that line of code and check for a @DontTrace
# preceding or on the same line as the method.
# E.g.:
# #@DontTrace
# def test():
# pass
# ... or ...
# def test(): #@DontTrace
# pass
ignored_lines = {}
lines = linecache.getlines(absolute_filename)
for i_line, line in enumerate(lines):
j = line.find('#')
if j >= 0:
comment = line[j:]
if DONT_TRACE_TAG in comment:
ignored_lines[i_line] = 1
# Note: when it's found in the comment, mark it up and down for the decorator lines found.
k = i_line - 1
while k >= 0:
if RE_DECORATOR.match(lines[k]):
ignored_lines[k] = 1
k -= 1
else:
break
k = i_line + 1
while k <= len(lines):
if RE_DECORATOR.match(lines[k]):
ignored_lines[k] = 1
k += 1
else:
break
_filename_to_ignored_lines[absolute_filename] = ignored_lines
func_line = frame.f_code.co_firstlineno - 1 # co_firstlineno is 1-based, so -1 is needed
return not (
func_line - 1 in ignored_lines or # -1 to get line before method
func_line in ignored_lines) # method line
should_trace_hook = None
def clear_trace_filter_cache():
'''
Clear the trace filter cache.
Call this after reloading.
'''
global should_trace_hook
try:
# Need to temporarily disable a hook because otherwise
# _filename_to_ignored_lines.clear() will never complete.
old_hook = should_trace_hook
should_trace_hook = None
# Clear the linecache
linecache.clearcache()
_filename_to_ignored_lines.clear()
finally:
should_trace_hook = old_hook
def trace_filter(mode):
'''
Set the trace filter mode.
mode: Whether to enable the trace hook.
True: Trace filtering on (skipping methods tagged @DontTrace)
False: Trace filtering off (trace methods tagged @DontTrace)
None/default: Toggle trace filtering.
'''
global should_trace_hook
if mode is None:
mode = should_trace_hook is None
if mode:
should_trace_hook = default_should_trace_hook
else:
should_trace_hook = None
return mode
| 3,567 | Python | 27.774193 | 110 | 0.561256 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_reload.py | """
Based on the python xreload.
Changes
======================
1. we don't recreate the old namespace from new classes. Rather, we keep the existing namespace,
load a new version of it and update only some of the things we can inplace. That way, we don't break
things such as singletons or end up with a second representation of the same class in memory.
2. If we find it to be a __metaclass__, we try to update it as a regular class.
3. We don't remove old attributes (and leave them lying around even if they're no longer used).
4. Reload hooks were changed
These changes make it more stable, especially in the common case (where in a debug session only the
contents of a function are changed), besides providing flexibility for users that want to extend
on it.
Hooks
======================
Classes/modules can be specially crafted to work with the reload (so that it can, for instance,
update some constant which was changed).
1. To participate in the change of some attribute:
In a module:
__xreload_old_new__(namespace, name, old, new)
in a class:
@classmethod
__xreload_old_new__(cls, name, old, new)
A class or module may include a method called '__xreload_old_new__' which is called when we're
unable to reload a given attribute.
2. To do something after the whole reload is finished:
In a module:
__xreload_after_reload_update__(namespace):
In a class:
@classmethod
__xreload_after_reload_update__(cls):
A class or module may include a method called '__xreload_after_reload_update__' which is called
after the reload finishes.
Important: when providing a hook, always use the namespace or cls provided and not anything in the global
namespace, as the global namespace are only temporarily created during the reload and may not reflect the
actual application state (while the cls and namespace passed are).
Current limitations
======================
- Attributes/constants are added, but not changed (so singletons and the application state is not
broken -- use provided hooks to workaround it).
- Code using metaclasses may not always work.
- Functions and methods using decorators (other than classmethod and staticmethod) are not handled
correctly.
- Renamings are not handled correctly.
- Dependent modules are not reloaded.
- New __slots__ can't be added to existing classes.
Info
======================
Original: http://svn.python.org/projects/sandbox/trunk/xreload/xreload.py
Note: it seems https://github.com/plone/plone.reload/blob/master/plone/reload/xreload.py enhances it (to check later)
Interesting alternative: https://code.google.com/p/reimport/
Alternative to reload().
This works by executing the module in a scratch namespace, and then patching classes, methods and
functions in place. This avoids the need to patch instances. New objects are copied into the
target namespace.
"""
from _pydev_bundle.pydev_imports import execfile
from _pydevd_bundle import pydevd_dont_trace
import types
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_constants import get_global_debugger
NO_DEBUG = 0
LEVEL1 = 1
LEVEL2 = 2
DEBUG = NO_DEBUG
def write_err(*args):
py_db = get_global_debugger()
if py_db is not None:
new_lst = []
for a in args:
new_lst.append(str(a))
msg = ' '.join(new_lst)
s = 'code reload: %s\n' % (msg,)
cmd = py_db.cmd_factory.make_io_message(s, 2)
if py_db.writer is not None:
py_db.writer.add_command(cmd)
def notify_info0(*args):
write_err(*args)
def notify_info(*args):
if DEBUG >= LEVEL1:
write_err(*args)
def notify_info2(*args):
if DEBUG >= LEVEL2:
write_err(*args)
def notify_error(*args):
write_err(*args)
#=======================================================================================================================
# code_objects_equal
#=======================================================================================================================
def code_objects_equal(code0, code1):
for d in dir(code0):
if d.startswith('_') or 'line' in d or d in ('replace', 'co_positions', 'co_qualname'):
continue
if getattr(code0, d) != getattr(code1, d):
return False
return True
#=======================================================================================================================
# xreload
#=======================================================================================================================
def xreload(mod):
"""Reload a module in place, updating classes, methods and functions.
mod: a module object
Returns a boolean indicating whether a change was done.
"""
r = Reload(mod)
r.apply()
found_change = r.found_change
r = None
pydevd_dont_trace.clear_trace_filter_cache()
return found_change
# This isn't actually used... Initially I planned to reload variables which are immutable on the
# namespace, but this can destroy places where we're saving state, which may not be what we want,
# so, we're being conservative and giving the user hooks if he wants to do a reload.
#
# immutable_types = [int, str, float, tuple] #That should be common to all Python versions
#
# for name in 'long basestr unicode frozenset'.split():
# try:
# immutable_types.append(__builtins__[name])
# except:
# pass #Just ignore: not all python versions are created equal.
# immutable_types = tuple(immutable_types)
#=======================================================================================================================
# Reload
#=======================================================================================================================
class Reload:
def __init__(self, mod, mod_name=None, mod_filename=None):
self.mod = mod
if mod_name:
self.mod_name = mod_name
else:
self.mod_name = mod.__name__ if mod is not None else None
if mod_filename:
self.mod_filename = mod_filename
else:
self.mod_filename = mod.__file__ if mod is not None else None
self.found_change = False
def apply(self):
mod = self.mod
self._on_finish_callbacks = []
try:
# Get the module namespace (dict) early; this is part of the type check
modns = mod.__dict__
# Execute the code. We copy the module dict to a temporary; then
# clear the module dict; then execute the new code in the module
# dict; then swap things back and around. This trick (due to
# Glyph Lefkowitz) ensures that the (readonly) __globals__
# attribute of methods and functions is set to the correct dict
# object.
new_namespace = modns.copy()
new_namespace.clear()
if self.mod_filename:
new_namespace["__file__"] = self.mod_filename
try:
new_namespace["__builtins__"] = __builtins__
except NameError:
raise # Ok if not there.
if self.mod_name:
new_namespace["__name__"] = self.mod_name
if new_namespace["__name__"] == '__main__':
# We do this because usually the __main__ starts-up the program, guarded by
# the if __name__ == '__main__', but we don't want to start the program again
# on a reload.
new_namespace["__name__"] = '__main_reloaded__'
execfile(self.mod_filename, new_namespace, new_namespace)
# Now we get to the hard part
oldnames = set(modns)
newnames = set(new_namespace)
# Create new tokens (note: not deleting existing)
for name in newnames - oldnames:
notify_info0('Added:', name, 'to namespace')
self.found_change = True
modns[name] = new_namespace[name]
# Update in-place what we can
for name in oldnames & newnames:
self._update(modns, name, modns[name], new_namespace[name])
self._handle_namespace(modns)
for c in self._on_finish_callbacks:
c()
del self._on_finish_callbacks[:]
except:
pydev_log.exception()
def _handle_namespace(self, namespace, is_class_namespace=False):
on_finish = None
if is_class_namespace:
xreload_after_update = getattr(namespace, '__xreload_after_reload_update__', None)
if xreload_after_update is not None:
self.found_change = True
on_finish = lambda: xreload_after_update()
elif '__xreload_after_reload_update__' in namespace:
xreload_after_update = namespace['__xreload_after_reload_update__']
self.found_change = True
on_finish = lambda: xreload_after_update(namespace)
if on_finish is not None:
# If a client wants to know about it, give him a chance.
self._on_finish_callbacks.append(on_finish)
def _update(self, namespace, name, oldobj, newobj, is_class_namespace=False):
"""Update oldobj, if possible in place, with newobj.
If oldobj is immutable, this simply returns newobj.
Args:
oldobj: the object to be updated
newobj: the object used as the source for the update
"""
try:
notify_info2('Updating: ', oldobj)
if oldobj is newobj:
# Probably something imported
return
if type(oldobj) is not type(newobj):
# Cop-out: if the type changed, give up
if name not in ('__builtins__',):
notify_error('Type of: %s (old: %s != new: %s) changed... Skipping.' % (name, type(oldobj), type(newobj)))
return
if isinstance(newobj, types.FunctionType):
self._update_function(oldobj, newobj)
return
if isinstance(newobj, types.MethodType):
self._update_method(oldobj, newobj)
return
if isinstance(newobj, classmethod):
self._update_classmethod(oldobj, newobj)
return
if isinstance(newobj, staticmethod):
self._update_staticmethod(oldobj, newobj)
return
if hasattr(types, 'ClassType'):
classtype = (types.ClassType, type) # object is not instance of types.ClassType.
else:
classtype = type
if isinstance(newobj, classtype):
self._update_class(oldobj, newobj)
return
# New: dealing with metaclasses.
if hasattr(newobj, '__metaclass__') and hasattr(newobj, '__class__') and newobj.__metaclass__ == newobj.__class__:
self._update_class(oldobj, newobj)
return
if namespace is not None:
# Check for the `__xreload_old_new__` protocol (don't even compare things
# as even doing a comparison may break things -- see: https://github.com/microsoft/debugpy/issues/615).
xreload_old_new = None
if is_class_namespace:
xreload_old_new = getattr(namespace, '__xreload_old_new__', None)
if xreload_old_new is not None:
self.found_change = True
xreload_old_new(name, oldobj, newobj)
elif '__xreload_old_new__' in namespace:
xreload_old_new = namespace['__xreload_old_new__']
xreload_old_new(namespace, name, oldobj, newobj)
self.found_change = True
# Too much information to the user...
# else:
# notify_info0('%s NOT updated. Create __xreload_old_new__(name, old, new) for custom reload' % (name,))
except:
notify_error('Exception found when updating %s. Proceeding for other items.' % (name,))
pydev_log.exception()
# All of the following functions have the same signature as _update()
def _update_function(self, oldfunc, newfunc):
"""Update a function object."""
oldfunc.__doc__ = newfunc.__doc__
oldfunc.__dict__.update(newfunc.__dict__)
try:
newfunc.__code__
attr_name = '__code__'
except AttributeError:
newfunc.func_code
attr_name = 'func_code'
old_code = getattr(oldfunc, attr_name)
new_code = getattr(newfunc, attr_name)
if not code_objects_equal(old_code, new_code):
notify_info0('Updated function code:', oldfunc)
setattr(oldfunc, attr_name, new_code)
self.found_change = True
try:
oldfunc.__defaults__ = newfunc.__defaults__
except AttributeError:
oldfunc.func_defaults = newfunc.func_defaults
return oldfunc
def _update_method(self, oldmeth, newmeth):
"""Update a method object."""
# XXX What if im_func is not a function?
if hasattr(oldmeth, 'im_func') and hasattr(newmeth, 'im_func'):
self._update(None, None, oldmeth.im_func, newmeth.im_func)
elif hasattr(oldmeth, '__func__') and hasattr(newmeth, '__func__'):
self._update(None, None, oldmeth.__func__, newmeth.__func__)
return oldmeth
def _update_class(self, oldclass, newclass):
"""Update a class object."""
olddict = oldclass.__dict__
newdict = newclass.__dict__
oldnames = set(olddict)
newnames = set(newdict)
for name in newnames - oldnames:
setattr(oldclass, name, newdict[name])
notify_info0('Added:', name, 'to', oldclass)
self.found_change = True
# Note: not removing old things...
# for name in oldnames - newnames:
# notify_info('Removed:', name, 'from', oldclass)
# delattr(oldclass, name)
for name in (oldnames & newnames) - set(['__dict__', '__doc__']):
self._update(oldclass, name, olddict[name], newdict[name], is_class_namespace=True)
old_bases = getattr(oldclass, '__bases__', None)
new_bases = getattr(newclass, '__bases__', None)
if str(old_bases) != str(new_bases):
notify_error('Changing the hierarchy of a class is not supported. %s may be inconsistent.' % (oldclass,))
self._handle_namespace(oldclass, is_class_namespace=True)
def _update_classmethod(self, oldcm, newcm):
"""Update a classmethod update."""
# While we can't modify the classmethod object itself (it has no
# mutable attributes), we *can* extract the underlying function
# (by calling __get__(), which returns a method object) and update
# it in-place. We don't have the class available to pass to
# __get__() but any object except None will do.
self._update(None, None, oldcm.__get__(0), newcm.__get__(0))
def _update_staticmethod(self, oldsm, newsm):
"""Update a staticmethod update."""
# While we can't modify the staticmethod object itself (it has no
# mutable attributes), we *can* extract the underlying function
# (by calling __get__(), which returns it) and update it in-place.
# We don't have the class available to pass to __get__() but any
# object except None will do.
self._update(None, None, oldsm.__get__(0), newsm.__get__(0))
| 15,773 | Python | 35.345622 | 126 | 0.572561 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_daemon_thread.py | from _pydev_bundle._pydev_saved_modules import threading
from _pydev_bundle import _pydev_saved_modules
from _pydevd_bundle.pydevd_utils import notify_about_gevent_if_needed
import weakref
from _pydevd_bundle.pydevd_constants import IS_JYTHON, IS_IRONPYTHON, \
PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS
from _pydev_bundle.pydev_log import exception as pydev_log_exception
import sys
from _pydev_bundle import pydev_log
import pydevd_tracing
from _pydevd_bundle.pydevd_collect_bytecode_info import iter_instructions
if IS_JYTHON:
import org.python.core as JyCore # @UnresolvedImport
class PyDBDaemonThread(threading.Thread):
def __init__(self, py_db, target_and_args=None):
'''
:param target_and_args:
tuple(func, args, kwargs) if this should be a function and args to run.
-- Note: use through run_as_pydevd_daemon_thread().
'''
threading.Thread.__init__(self)
notify_about_gevent_if_needed()
self._py_db = weakref.ref(py_db)
self._kill_received = False
mark_as_pydevd_daemon_thread(self)
self._target_and_args = target_and_args
@property
def py_db(self):
return self._py_db()
def run(self):
created_pydb_daemon = self.py_db.created_pydb_daemon_threads
created_pydb_daemon[self] = 1
try:
try:
if IS_JYTHON and not isinstance(threading.current_thread(), threading._MainThread):
# we shouldn't update sys.modules for the main thread, cause it leads to the second importing 'threading'
# module, and the new instance of main thread is created
ss = JyCore.PySystemState()
# Note: Py.setSystemState() affects only the current thread.
JyCore.Py.setSystemState(ss)
self._stop_trace()
self._on_run()
except:
if sys is not None and pydev_log_exception is not None:
pydev_log_exception()
finally:
del created_pydb_daemon[self]
def _on_run(self):
if self._target_and_args is not None:
target, args, kwargs = self._target_and_args
target(*args, **kwargs)
else:
raise NotImplementedError('Should be reimplemented by: %s' % self.__class__)
def do_kill_pydev_thread(self):
if not self._kill_received:
pydev_log.debug('%s received kill signal', self.name)
self._kill_received = True
def _stop_trace(self):
if self.pydev_do_not_trace:
pydevd_tracing.SetTrace(None) # no debugging on this thread
def _collect_load_names(func):
found_load_names = set()
for instruction in iter_instructions(func.__code__):
if instruction.opname in ('LOAD_GLOBAL', 'LOAD_ATTR', 'LOAD_METHOD'):
found_load_names.add(instruction.argrepr)
return found_load_names
def _patch_threading_to_hide_pydevd_threads():
'''
Patches the needed functions on the `threading` module so that the pydevd threads are hidden.
Note that we patch the functions __code__ to avoid issues if some code had already imported those
variables prior to the patching.
'''
found_load_names = _collect_load_names(threading.enumerate)
# i.e.: we'll only apply the patching if the function seems to be what we expect.
new_threading_enumerate = None
if found_load_names in (
{'_active_limbo_lock', '_limbo', '_active', 'values', 'list'},
{'_active_limbo_lock', '_limbo', '_active', 'values', 'NULL + list'}
):
pydev_log.debug('Applying patching to hide pydevd threads (Py3 version).')
def new_threading_enumerate():
with _active_limbo_lock:
ret = list(_active.values()) + list(_limbo.values())
return [t for t in ret if not getattr(t, 'is_pydev_daemon_thread', False)]
elif found_load_names == set(('_active_limbo_lock', '_limbo', '_active', 'values')):
pydev_log.debug('Applying patching to hide pydevd threads (Py2 version).')
def new_threading_enumerate():
with _active_limbo_lock:
ret = _active.values() + _limbo.values()
return [t for t in ret if not getattr(t, 'is_pydev_daemon_thread', False)]
else:
pydev_log.info('Unable to hide pydevd threads. Found names in threading.enumerate: %s', found_load_names)
if new_threading_enumerate is not None:
def pydevd_saved_threading_enumerate():
with threading._active_limbo_lock:
return list(threading._active.values()) + list(threading._limbo.values())
_pydev_saved_modules.pydevd_saved_threading_enumerate = pydevd_saved_threading_enumerate
threading.enumerate.__code__ = new_threading_enumerate.__code__
# We also need to patch the active count (to match what we have in the enumerate).
def new_active_count():
# Note: as this will be executed in the `threading` module, `enumerate` will
# actually be threading.enumerate.
return len(enumerate())
threading.active_count.__code__ = new_active_count.__code__
# When shutting down, Python (on some versions) may do something as:
#
# def _pickSomeNonDaemonThread():
# for t in enumerate():
# if not t.daemon and t.is_alive():
# return t
# return None
#
# But in this particular case, we do want threads with `is_pydev_daemon_thread` to appear
# explicitly due to the pydevd `CheckAliveThread` (because we want the shutdown to wait on it).
# So, it can't rely on the `enumerate` for that anymore as it's patched to not return pydevd threads.
if hasattr(threading, '_pickSomeNonDaemonThread'):
def new_pick_some_non_daemon_thread():
with _active_limbo_lock:
# Ok for py2 and py3.
threads = list(_active.values()) + list(_limbo.values())
for t in threads:
if not t.daemon and t.is_alive():
return t
return None
threading._pickSomeNonDaemonThread.__code__ = new_pick_some_non_daemon_thread.__code__
_patched_threading_to_hide_pydevd_threads = False
def mark_as_pydevd_daemon_thread(thread):
if not IS_JYTHON and not IS_IRONPYTHON and PYDEVD_APPLY_PATCHING_TO_HIDE_PYDEVD_THREADS:
global _patched_threading_to_hide_pydevd_threads
if not _patched_threading_to_hide_pydevd_threads:
# When we mark the first thread as a pydevd daemon thread, we also change the threading
# functions to hide pydevd threads.
# Note: we don't just "hide" the pydevd threads from the threading module by not using it
# (i.e.: just using the `thread.start_new_thread` instead of `threading.Thread`)
# because there's 1 thread (the `CheckAliveThread`) which is a pydevd thread but
# isn't really a daemon thread (so, we need CPython to wait on it for shutdown,
# in which case it needs to be in `threading` and the patching would be needed anyways).
_patched_threading_to_hide_pydevd_threads = True
try:
_patch_threading_to_hide_pydevd_threads()
except:
pydev_log.exception('Error applying patching to hide pydevd threads.')
thread.pydev_do_not_trace = True
thread.is_pydev_daemon_thread = True
thread.daemon = True
def run_as_pydevd_daemon_thread(py_db, func, *args, **kwargs):
'''
Runs a function as a pydevd daemon thread (without any tracing in place).
'''
t = PyDBDaemonThread(py_db, target_and_args=(func, args, kwargs))
t.name = '%s (pydevd daemon thread)' % (func.__name__,)
t.start()
return t
| 7,964 | Python | 40.056701 | 125 | 0.621673 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_filtering.py | import fnmatch
import glob
import os.path
import sys
from _pydev_bundle import pydev_log
import pydevd_file_utils
import json
from collections import namedtuple
from _pydev_bundle._pydev_saved_modules import threading
from pydevd_file_utils import normcase
from _pydevd_bundle.pydevd_constants import USER_CODE_BASENAMES_STARTING_WITH, \
LIBRARY_CODE_BASENAMES_STARTING_WITH, IS_PYPY, IS_WINDOWS
from _pydevd_bundle import pydevd_constants
ExcludeFilter = namedtuple('ExcludeFilter', 'name, exclude, is_path')
def _convert_to_str_and_clear_empty(roots):
new_roots = []
for root in roots:
assert isinstance(root, str), '%s not str (found: %s)' % (root, type(root))
if root:
new_roots.append(root)
return new_roots
def _check_matches(patterns, paths):
if not patterns and not paths:
# Matched to the end.
return True
if (not patterns and paths) or (patterns and not paths):
return False
pattern = normcase(patterns[0])
path = normcase(paths[0])
if not glob.has_magic(pattern):
if pattern != path:
return False
elif pattern == '**':
if len(patterns) == 1:
return True # if ** is the last one it matches anything to the right.
for i in range(len(paths)):
# Recursively check the remaining patterns as the
# current pattern could match any number of paths.
if _check_matches(patterns[1:], paths[i:]):
return True
elif not fnmatch.fnmatch(path, pattern):
# Current part doesn't match.
return False
return _check_matches(patterns[1:], paths[1:])
def glob_matches_path(path, pattern, sep=os.sep, altsep=os.altsep):
if altsep:
pattern = pattern.replace(altsep, sep)
path = path.replace(altsep, sep)
drive = ''
if len(path) > 1 and path[1] == ':':
drive, path = path[0], path[2:]
if drive and len(pattern) > 1:
if pattern[1] == ':':
if drive.lower() != pattern[0].lower():
return False
pattern = pattern[2:]
patterns = pattern.split(sep)
paths = path.split(sep)
if paths:
if paths[0] == '':
paths = paths[1:]
if patterns:
if patterns[0] == '':
patterns = patterns[1:]
return _check_matches(patterns, paths)
class FilesFiltering(object):
'''
Note: calls at FilesFiltering are uncached.
The actual API used should be through PyDB.
'''
def __init__(self):
self._exclude_filters = []
self._project_roots = []
self._library_roots = []
# Filter out libraries?
self._use_libraries_filter = False
self.require_module = False # True if some exclude filter filters by the module.
self.set_use_libraries_filter(os.getenv('PYDEVD_FILTER_LIBRARIES') is not None)
project_roots = os.getenv('IDE_PROJECT_ROOTS', None)
if project_roots is not None:
project_roots = project_roots.split(os.pathsep)
else:
project_roots = []
self.set_project_roots(project_roots)
library_roots = os.getenv('LIBRARY_ROOTS', None)
if library_roots is not None:
library_roots = library_roots.split(os.pathsep)
else:
library_roots = self._get_default_library_roots()
self.set_library_roots(library_roots)
# Stepping filters.
pydevd_filters = os.getenv('PYDEVD_FILTERS', '')
# To filter out it's something as: {'**/not_my_code/**': True}
if pydevd_filters:
pydev_log.debug("PYDEVD_FILTERS %s", (pydevd_filters,))
if pydevd_filters.startswith('{'):
# dict(glob_pattern (str) -> exclude(True or False))
exclude_filters = []
for key, val in json.loads(pydevd_filters).items():
exclude_filters.append(ExcludeFilter(key, val, True))
self._exclude_filters = exclude_filters
else:
# A ';' separated list of strings with globs for the
# list of excludes.
filters = pydevd_filters.split(';')
new_filters = []
for new_filter in filters:
if new_filter.strip():
new_filters.append(ExcludeFilter(new_filter.strip(), True, True))
self._exclude_filters = new_filters
@classmethod
def _get_default_library_roots(cls):
pydev_log.debug("Collecting default library roots.")
# Provide sensible defaults if not in env vars.
import site
roots = []
try:
import sysconfig # Python 2.7 onwards only.
except ImportError:
pass
else:
for path_name in set(('stdlib', 'platstdlib', 'purelib', 'platlib')) & set(sysconfig.get_path_names()):
roots.append(sysconfig.get_path(path_name))
# Make sure we always get at least the standard library location (based on the `os` and
# `threading` modules -- it's a bit weird that it may be different on the ci, but it happens).
roots.append(os.path.dirname(os.__file__))
roots.append(os.path.dirname(threading.__file__))
if IS_PYPY:
# On PyPy 3.6 (7.3.1) it wrongly says that sysconfig.get_path('stdlib') is
# <install>/lib-pypy when the installed version is <install>/lib_pypy.
try:
import _pypy_wait
except ImportError:
pydev_log.debug("Unable to import _pypy_wait on PyPy when collecting default library roots.")
else:
pypy_lib_dir = os.path.dirname(_pypy_wait.__file__)
pydev_log.debug("Adding %s to default library roots.", pypy_lib_dir)
roots.append(pypy_lib_dir)
if hasattr(site, 'getusersitepackages'):
site_paths = site.getusersitepackages()
if isinstance(site_paths, (list, tuple)):
for site_path in site_paths:
roots.append(site_path)
else:
roots.append(site_paths)
if hasattr(site, 'getsitepackages'):
site_paths = site.getsitepackages()
if isinstance(site_paths, (list, tuple)):
for site_path in site_paths:
roots.append(site_path)
else:
roots.append(site_paths)
for path in sys.path:
if os.path.exists(path) and os.path.basename(path) in ('site-packages', 'pip-global'):
roots.append(path)
roots.extend([os.path.realpath(path) for path in roots])
return sorted(set(roots))
def _fix_roots(self, roots):
roots = _convert_to_str_and_clear_empty(roots)
new_roots = []
for root in roots:
path = self._absolute_normalized_path(root)
if pydevd_constants.IS_WINDOWS:
new_roots.append(path + '\\')
else:
new_roots.append(path + '/')
return new_roots
def _absolute_normalized_path(self, filename):
'''
Provides a version of the filename that's absolute and normalized.
'''
return normcase(pydevd_file_utils.absolute_path(filename))
def set_project_roots(self, project_roots):
self._project_roots = self._fix_roots(project_roots)
pydev_log.debug("IDE_PROJECT_ROOTS %s\n" % project_roots)
def _get_project_roots(self):
return self._project_roots
def set_library_roots(self, roots):
self._library_roots = self._fix_roots(roots)
pydev_log.debug("LIBRARY_ROOTS %s\n" % roots)
def _get_library_roots(self):
return self._library_roots
def in_project_roots(self, received_filename):
'''
Note: don't call directly. Use PyDb.in_project_scope (there's no caching here and it doesn't
handle all possibilities for knowing whether a project is actually in the scope, it
just handles the heuristics based on the absolute_normalized_filename without the actual frame).
'''
DEBUG = False
if received_filename.startswith(USER_CODE_BASENAMES_STARTING_WITH):
if DEBUG:
pydev_log.debug('In in_project_roots - user basenames - starts with %s (%s)', received_filename, USER_CODE_BASENAMES_STARTING_WITH)
return True
if received_filename.startswith(LIBRARY_CODE_BASENAMES_STARTING_WITH):
if DEBUG:
pydev_log.debug('Not in in_project_roots - library basenames - starts with %s (%s)', received_filename, LIBRARY_CODE_BASENAMES_STARTING_WITH)
return False
project_roots = self._get_project_roots() # roots are absolute/normalized.
absolute_normalized_filename = self._absolute_normalized_path(received_filename)
absolute_normalized_filename_as_dir = absolute_normalized_filename + ('\\' if IS_WINDOWS else '/')
found_in_project = []
for root in project_roots:
if root and (absolute_normalized_filename.startswith(root) or root == absolute_normalized_filename_as_dir):
if DEBUG:
pydev_log.debug('In project: %s (%s)', absolute_normalized_filename, root)
found_in_project.append(root)
found_in_library = []
library_roots = self._get_library_roots()
for root in library_roots:
if root and (absolute_normalized_filename.startswith(root) or root == absolute_normalized_filename_as_dir):
found_in_library.append(root)
if DEBUG:
pydev_log.debug('In library: %s (%s)', absolute_normalized_filename, root)
else:
if DEBUG:
pydev_log.debug('Not in library: %s (%s)', absolute_normalized_filename, root)
if not project_roots:
# If we have no project roots configured, consider it being in the project
# roots if it's not found in site-packages (because we have defaults for those
# and not the other way around).
in_project = not found_in_library
if DEBUG:
pydev_log.debug('Final in project (no project roots): %s (%s)', absolute_normalized_filename, in_project)
else:
in_project = False
if found_in_project:
if not found_in_library:
if DEBUG:
pydev_log.debug('Final in project (in_project and not found_in_library): %s (True)', absolute_normalized_filename)
in_project = True
else:
# Found in both, let's see which one has the bigger path matched.
if max(len(x) for x in found_in_project) > max(len(x) for x in found_in_library):
in_project = True
if DEBUG:
pydev_log.debug('Final in project (found in both): %s (%s)', absolute_normalized_filename, in_project)
return in_project
def use_libraries_filter(self):
'''
Should we debug only what's inside project folders?
'''
return self._use_libraries_filter
def set_use_libraries_filter(self, use):
pydev_log.debug("pydevd: Use libraries filter: %s\n" % use)
self._use_libraries_filter = use
def use_exclude_filters(self):
# Enabled if we have any filters registered.
return len(self._exclude_filters) > 0
def exclude_by_filter(self, absolute_filename, module_name):
'''
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
'''
for exclude_filter in self._exclude_filters: # : :type exclude_filter: ExcludeFilter
if exclude_filter.is_path:
if glob_matches_path(absolute_filename, exclude_filter.name):
return exclude_filter.exclude
else:
# Module filter.
if exclude_filter.name == module_name or module_name.startswith(exclude_filter.name + '.'):
return exclude_filter.exclude
return None
def set_exclude_filters(self, exclude_filters):
'''
:param list(ExcludeFilter) exclude_filters:
'''
self._exclude_filters = exclude_filters
self.require_module = False
for exclude_filter in exclude_filters:
if not exclude_filter.is_path:
self.require_module = True
break
| 12,701 | Python | 37.259036 | 157 | 0.587985 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command.py | import json
import os
import sys
import traceback
from _pydev_bundle import pydev_log
from _pydev_bundle.pydev_log import exception as pydev_log_exception
from _pydevd_bundle import pydevd_traceproperty, pydevd_dont_trace, pydevd_utils
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_breakpoints import get_exception_class
from _pydevd_bundle.pydevd_comm import (
InternalEvaluateConsoleExpression, InternalConsoleGetCompletions, InternalRunCustomOperation,
internal_get_next_statement_targets, internal_get_smart_step_into_variants)
from _pydevd_bundle.pydevd_constants import NEXT_VALUE_SEPARATOR, IS_WINDOWS, NULL
from _pydevd_bundle.pydevd_comm_constants import ID_TO_MEANING, CMD_EXEC_EXPRESSION, CMD_AUTHENTICATE
from _pydevd_bundle.pydevd_api import PyDevdAPI
from io import StringIO
from _pydevd_bundle.pydevd_net_command import NetCommand
from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id
import pydevd_file_utils
class _PyDevCommandProcessor(object):
def __init__(self):
self.api = PyDevdAPI()
def process_net_command(self, py_db, cmd_id, seq, text):
'''Processes a command received from the Java side
@param cmd_id: the id of the command
@param seq: the sequence of the command
@param text: the text received in the command
'''
# We can only proceed if the client is already authenticated or if it's the
# command to authenticate.
if cmd_id != CMD_AUTHENTICATE and not py_db.authentication.is_authenticated():
cmd = py_db.cmd_factory.make_error_message(seq, 'Client not authenticated.')
py_db.writer.add_command(cmd)
return
meaning = ID_TO_MEANING[str(cmd_id)]
# print('Handling %s (%s)' % (meaning, text))
method_name = meaning.lower()
on_command = getattr(self, method_name.lower(), None)
if on_command is None:
# I have no idea what this is all about
cmd = py_db.cmd_factory.make_error_message(seq, "unexpected command " + str(cmd_id))
py_db.writer.add_command(cmd)
return
lock = py_db._main_lock
if method_name == 'cmd_thread_dump_to_stderr':
# We can skip the main debugger locks for cases where we know it's not needed.
lock = NULL
with lock:
try:
cmd = on_command(py_db, cmd_id, seq, text)
if cmd is not None:
py_db.writer.add_command(cmd)
except:
if traceback is not None and sys is not None and pydev_log_exception is not None:
pydev_log_exception()
stream = StringIO()
traceback.print_exc(file=stream)
cmd = py_db.cmd_factory.make_error_message(
seq,
"Unexpected exception in process_net_command.\nInitial params: %s. Exception: %s" % (
((cmd_id, seq, text), stream.getvalue())
)
)
if cmd is not None:
py_db.writer.add_command(cmd)
def cmd_authenticate(self, py_db, cmd_id, seq, text):
access_token = text
py_db.authentication.login(access_token)
if py_db.authentication.is_authenticated():
return NetCommand(cmd_id, seq, py_db.authentication.client_access_token)
return py_db.cmd_factory.make_error_message(seq, 'Client not authenticated.')
def cmd_run(self, py_db, cmd_id, seq, text):
return self.api.run(py_db)
def cmd_list_threads(self, py_db, cmd_id, seq, text):
return self.api.list_threads(py_db, seq)
def cmd_get_completions(self, py_db, cmd_id, seq, text):
# we received some command to get a variable
# the text is: thread_id\tframe_id\tactivation token
thread_id, frame_id, _scope, act_tok = text.split('\t', 3)
return self.api.request_completions(py_db, seq, thread_id, frame_id, act_tok)
def cmd_get_thread_stack(self, py_db, cmd_id, seq, text):
# Receives a thread_id and a given timeout, which is the time we should
# wait to the provide the stack if a given thread is still not suspended.
if '\t' in text:
thread_id, timeout = text.split('\t')
timeout = float(timeout)
else:
thread_id = text
timeout = .5 # Default timeout is .5 seconds
return self.api.request_stack(py_db, seq, thread_id, fmt={}, timeout=timeout)
def cmd_set_protocol(self, py_db, cmd_id, seq, text):
return self.api.set_protocol(py_db, seq, text.strip())
def cmd_thread_suspend(self, py_db, cmd_id, seq, text):
return self.api.request_suspend_thread(py_db, text.strip())
def cmd_version(self, py_db, cmd_id, seq, text):
# Default based on server process (although ideally the IDE should
# provide it).
if IS_WINDOWS:
ide_os = 'WINDOWS'
else:
ide_os = 'UNIX'
# Breakpoints can be grouped by 'LINE' or by 'ID'.
breakpoints_by = 'LINE'
splitted = text.split('\t')
if len(splitted) == 1:
_local_version = splitted
elif len(splitted) == 2:
_local_version, ide_os = splitted
elif len(splitted) == 3:
_local_version, ide_os, breakpoints_by = splitted
version_msg = self.api.set_ide_os_and_breakpoints_by(py_db, seq, ide_os, breakpoints_by)
# Enable thread notifications after the version command is completed.
self.api.set_enable_thread_notifications(py_db, True)
return version_msg
def cmd_thread_run(self, py_db, cmd_id, seq, text):
return self.api.request_resume_thread(text.strip())
def _cmd_step(self, py_db, cmd_id, seq, text):
return self.api.request_step(py_db, text.strip(), cmd_id)
cmd_step_into = _cmd_step
cmd_step_into_my_code = _cmd_step
cmd_step_over = _cmd_step
cmd_step_over_my_code = _cmd_step
cmd_step_return = _cmd_step
cmd_step_return_my_code = _cmd_step
def _cmd_set_next(self, py_db, cmd_id, seq, text):
thread_id, line, func_name = text.split('\t', 2)
return self.api.request_set_next(py_db, seq, thread_id, cmd_id, None, line, func_name)
cmd_run_to_line = _cmd_set_next
cmd_set_next_statement = _cmd_set_next
def cmd_smart_step_into(self, py_db, cmd_id, seq, text):
thread_id, line_or_bytecode_offset, func_name = text.split('\t', 2)
if line_or_bytecode_offset.startswith('offset='):
# In this case we request the smart step into to stop given the parent frame
# and the location of the parent frame bytecode offset and not just the func_name
# (this implies that `CMD_GET_SMART_STEP_INTO_VARIANTS` was previously used
# to know what are the valid stop points).
temp = line_or_bytecode_offset[len('offset='):]
if ';' in temp:
offset, child_offset = temp.split(';')
offset = int(offset)
child_offset = int(child_offset)
else:
child_offset = -1
offset = int(temp)
return self.api.request_smart_step_into(py_db, seq, thread_id, offset, child_offset)
else:
# If the offset wasn't passed, just use the line/func_name to do the stop.
return self.api.request_smart_step_into_by_func_name(py_db, seq, thread_id, line_or_bytecode_offset, func_name)
def cmd_reload_code(self, py_db, cmd_id, seq, text):
text = text.strip()
if '\t' not in text:
module_name = text.strip()
filename = None
else:
module_name, filename = text.split('\t', 1)
self.api.request_reload_code(py_db, seq, module_name, filename)
def cmd_change_variable(self, py_db, cmd_id, seq, text):
# the text is: thread\tstackframe\tFRAME|GLOBAL\tattribute_to_change\tvalue_to_change
thread_id, frame_id, scope, attr_and_value = text.split('\t', 3)
tab_index = attr_and_value.rindex('\t')
attr = attr_and_value[0:tab_index].replace('\t', '.')
value = attr_and_value[tab_index + 1:]
self.api.request_change_variable(py_db, seq, thread_id, frame_id, scope, attr, value)
def cmd_get_variable(self, py_db, cmd_id, seq, text):
# we received some command to get a variable
# the text is: thread_id\tframe_id\tFRAME|GLOBAL\tattributes*
thread_id, frame_id, scopeattrs = text.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
self.api.request_get_variable(py_db, seq, thread_id, frame_id, scope, attrs)
def cmd_get_array(self, py_db, cmd_id, seq, text):
# Note: untested and unused in pydev
# we received some command to get an array variable
# the text is: thread_id\tframe_id\tFRAME|GLOBAL\tname\ttemp\troffs\tcoffs\trows\tcols\tformat
roffset, coffset, rows, cols, format, thread_id, frame_id, scopeattrs = text.split('\t', 7)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
self.api.request_get_array(py_db, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs)
def cmd_show_return_values(self, py_db, cmd_id, seq, text):
show_return_values = text.split('\t')[1]
self.api.set_show_return_values(py_db, int(show_return_values) == 1)
def cmd_load_full_value(self, py_db, cmd_id, seq, text):
# Note: untested and unused in pydev
thread_id, frame_id, scopeattrs = text.split('\t', 2)
vars = scopeattrs.split(NEXT_VALUE_SEPARATOR)
self.api.request_load_full_value(py_db, seq, thread_id, frame_id, vars)
def cmd_get_description(self, py_db, cmd_id, seq, text):
# Note: untested and unused in pydev
thread_id, frame_id, expression = text.split('\t', 2)
self.api.request_get_description(py_db, seq, thread_id, frame_id, expression)
def cmd_get_frame(self, py_db, cmd_id, seq, text):
thread_id, frame_id, scope = text.split('\t', 2)
self.api.request_get_frame(py_db, seq, thread_id, frame_id)
def cmd_set_break(self, py_db, cmd_id, seq, text):
# func name: 'None': match anything. Empty: match global, specified: only method context.
# command to add some breakpoint.
# text is filename\tline. Add to breakpoints dictionary
suspend_policy = u"NONE" # Can be 'NONE' or 'ALL'
is_logpoint = False
hit_condition = None
if py_db._set_breakpoints_with_id:
try:
try:
breakpoint_id, btype, filename, line, func_name, condition, expression, hit_condition, is_logpoint, suspend_policy = text.split(u'\t', 9)
except ValueError: # not enough values to unpack
# No suspend_policy passed (use default).
breakpoint_id, btype, filename, line, func_name, condition, expression, hit_condition, is_logpoint = text.split(u'\t', 8)
is_logpoint = is_logpoint == u'True'
except ValueError: # not enough values to unpack
breakpoint_id, btype, filename, line, func_name, condition, expression = text.split(u'\t', 6)
breakpoint_id = int(breakpoint_id)
line = int(line)
# We must restore new lines and tabs as done in
# AbstractDebugTarget.breakpointAdded
condition = condition.replace(u"@_@NEW_LINE_CHAR@_@", u'\n').\
replace(u"@_@TAB_CHAR@_@", u'\t').strip()
expression = expression.replace(u"@_@NEW_LINE_CHAR@_@", u'\n').\
replace(u"@_@TAB_CHAR@_@", u'\t').strip()
else:
# Note: this else should be removed after PyCharm migrates to setting
# breakpoints by id (and ideally also provides func_name).
btype, filename, line, func_name, suspend_policy, condition, expression = text.split(u'\t', 6)
# If we don't have an id given for each breakpoint, consider
# the id to be the line.
breakpoint_id = line = int(line)
condition = condition.replace(u"@_@NEW_LINE_CHAR@_@", u'\n'). \
replace(u"@_@TAB_CHAR@_@", u'\t').strip()
expression = expression.replace(u"@_@NEW_LINE_CHAR@_@", u'\n'). \
replace(u"@_@TAB_CHAR@_@", u'\t').strip()
if condition is not None and (len(condition) <= 0 or condition == u"None"):
condition = None
if expression is not None and (len(expression) <= 0 or expression == u"None"):
expression = None
if hit_condition is not None and (len(hit_condition) <= 0 or hit_condition == u"None"):
hit_condition = None
def on_changed_breakpoint_state(breakpoint_id, add_breakpoint_result):
error_code = add_breakpoint_result.error_code
translated_line = add_breakpoint_result.translated_line
translated_filename = add_breakpoint_result.translated_filename
msg = ''
if error_code:
if error_code == self.api.ADD_BREAKPOINT_FILE_NOT_FOUND:
msg = 'pydev debugger: Trying to add breakpoint to file that does not exist: %s (will have no effect).\n' % (translated_filename,)
elif error_code == self.api.ADD_BREAKPOINT_FILE_EXCLUDED_BY_FILTERS:
msg = 'pydev debugger: Trying to add breakpoint to file that is excluded by filters: %s (will have no effect).\n' % (translated_filename,)
elif error_code == self.api.ADD_BREAKPOINT_LAZY_VALIDATION:
msg = '' # Ignore this here (if/when loaded, it'll call on_changed_breakpoint_state again accordingly).
elif error_code == self.api.ADD_BREAKPOINT_INVALID_LINE:
msg = 'pydev debugger: Trying to add breakpoint to line (%s) that is not valid in: %s.\n' % (translated_line, translated_filename,)
else:
# Shouldn't get here.
msg = 'pydev debugger: Breakpoint not validated (reason unknown -- please report as error): %s (%s).\n' % (translated_filename, translated_line)
else:
if add_breakpoint_result.original_line != translated_line:
msg = 'pydev debugger (info): Breakpoint in line: %s moved to line: %s (in %s).\n' % (add_breakpoint_result.original_line, translated_line, translated_filename)
if msg:
py_db.writer.add_command(py_db.cmd_factory.make_warning_message(msg))
result = self.api.add_breakpoint(
py_db, self.api.filename_to_str(filename), btype, breakpoint_id, line, condition, func_name,
expression, suspend_policy, hit_condition, is_logpoint, on_changed_breakpoint_state=on_changed_breakpoint_state)
on_changed_breakpoint_state(breakpoint_id, result)
def cmd_remove_break(self, py_db, cmd_id, seq, text):
# command to remove some breakpoint
# text is type\file\tid. Remove from breakpoints dictionary
breakpoint_type, filename, breakpoint_id = text.split('\t', 2)
filename = self.api.filename_to_str(filename)
try:
breakpoint_id = int(breakpoint_id)
except ValueError:
pydev_log.critical('Error removing breakpoint. Expected breakpoint_id to be an int. Found: %s', breakpoint_id)
else:
self.api.remove_breakpoint(py_db, filename, breakpoint_type, breakpoint_id)
def _cmd_exec_or_evaluate_expression(self, py_db, cmd_id, seq, text):
# command to evaluate the given expression
# text is: thread\tstackframe\tLOCAL\texpression
attr_to_set_result = ""
try:
thread_id, frame_id, scope, expression, trim, attr_to_set_result = text.split('\t', 5)
except ValueError:
thread_id, frame_id, scope, expression, trim = text.split('\t', 4)
is_exec = cmd_id == CMD_EXEC_EXPRESSION
trim_if_too_big = int(trim) == 1
self.api.request_exec_or_evaluate(
py_db, seq, thread_id, frame_id, expression, is_exec, trim_if_too_big, attr_to_set_result)
cmd_evaluate_expression = _cmd_exec_or_evaluate_expression
cmd_exec_expression = _cmd_exec_or_evaluate_expression
def cmd_console_exec(self, py_db, cmd_id, seq, text):
# command to exec expression in console, in case expression is only partially valid 'False' is returned
# text is: thread\tstackframe\tLOCAL\texpression
thread_id, frame_id, scope, expression = text.split('\t', 3)
self.api.request_console_exec(py_db, seq, thread_id, frame_id, expression)
def cmd_set_path_mapping_json(self, py_db, cmd_id, seq, text):
'''
:param text:
Json text. Something as:
{
"pathMappings": [
{
"localRoot": "c:/temp",
"remoteRoot": "/usr/temp"
}
],
"debug": true,
"force": false
}
'''
as_json = json.loads(text)
force = as_json.get('force', False)
path_mappings = []
for pathMapping in as_json.get('pathMappings', []):
localRoot = pathMapping.get('localRoot', '')
remoteRoot = pathMapping.get('remoteRoot', '')
if (localRoot != '') and (remoteRoot != ''):
path_mappings.append((localRoot, remoteRoot))
if bool(path_mappings) or force:
pydevd_file_utils.setup_client_server_paths(path_mappings)
debug = as_json.get('debug', False)
if debug or force:
pydevd_file_utils.DEBUG_CLIENT_SERVER_TRANSLATION = debug
def cmd_set_py_exception_json(self, py_db, cmd_id, seq, text):
# This API is optional and works 'in bulk' -- it's possible
# to get finer-grained control with CMD_ADD_EXCEPTION_BREAK/CMD_REMOVE_EXCEPTION_BREAK
# which allows setting caught/uncaught per exception, although global settings such as:
# - skip_on_exceptions_thrown_in_same_context
# - ignore_exceptions_thrown_in_lines_with_ignore_exception
# must still be set through this API (before anything else as this clears all existing
# exception breakpoints).
try:
py_db.break_on_uncaught_exceptions = {}
py_db.break_on_caught_exceptions = {}
py_db.break_on_user_uncaught_exceptions = {}
as_json = json.loads(text)
break_on_uncaught = as_json.get('break_on_uncaught', False)
break_on_caught = as_json.get('break_on_caught', False)
break_on_user_caught = as_json.get('break_on_user_caught', False)
py_db.skip_on_exceptions_thrown_in_same_context = as_json.get('skip_on_exceptions_thrown_in_same_context', False)
py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = as_json.get('ignore_exceptions_thrown_in_lines_with_ignore_exception', False)
ignore_libraries = as_json.get('ignore_libraries', False)
exception_types = as_json.get('exception_types', [])
for exception_type in exception_types:
if not exception_type:
continue
py_db.add_break_on_exception(
exception_type,
condition=None,
expression=None,
notify_on_handled_exceptions=break_on_caught,
notify_on_unhandled_exceptions=break_on_uncaught,
notify_on_user_unhandled_exceptions=break_on_user_caught,
notify_on_first_raise_only=True,
ignore_libraries=ignore_libraries,
)
py_db.on_breakpoints_changed()
except:
pydev_log.exception("Error when setting exception list. Received: %s", text)
def cmd_set_py_exception(self, py_db, cmd_id, seq, text):
# DEPRECATED. Use cmd_set_py_exception_json instead.
try:
splitted = text.split(';')
py_db.break_on_uncaught_exceptions = {}
py_db.break_on_caught_exceptions = {}
py_db.break_on_user_uncaught_exceptions = {}
if len(splitted) >= 5:
if splitted[0] == 'true':
break_on_uncaught = True
else:
break_on_uncaught = False
if splitted[1] == 'true':
break_on_caught = True
else:
break_on_caught = False
if splitted[2] == 'true':
py_db.skip_on_exceptions_thrown_in_same_context = True
else:
py_db.skip_on_exceptions_thrown_in_same_context = False
if splitted[3] == 'true':
py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
else:
py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception = False
if splitted[4] == 'true':
ignore_libraries = True
else:
ignore_libraries = False
for exception_type in splitted[5:]:
exception_type = exception_type.strip()
if not exception_type:
continue
py_db.add_break_on_exception(
exception_type,
condition=None,
expression=None,
notify_on_handled_exceptions=break_on_caught,
notify_on_unhandled_exceptions=break_on_uncaught,
notify_on_user_unhandled_exceptions=False, # TODO (not currently supported in this API).
notify_on_first_raise_only=True,
ignore_libraries=ignore_libraries,
)
else:
pydev_log.exception("Expected to have at least 5 ';' separated items. Received: %s", text)
except:
pydev_log.exception("Error when setting exception list. Received: %s", text)
def _load_source(self, py_db, cmd_id, seq, text):
filename = text
filename = self.api.filename_to_str(filename)
self.api.request_load_source(py_db, seq, filename)
cmd_load_source = _load_source
cmd_get_file_contents = _load_source
def cmd_load_source_from_frame_id(self, py_db, cmd_id, seq, text):
frame_id = text
self.api.request_load_source_from_frame_id(py_db, seq, frame_id)
def cmd_set_property_trace(self, py_db, cmd_id, seq, text):
# Command which receives whether to trace property getter/setter/deleter
# text is feature_state(true/false);disable_getter/disable_setter/disable_deleter
if text:
splitted = text.split(';')
if len(splitted) >= 3:
if not py_db.disable_property_trace and splitted[0] == 'true':
# Replacing property by custom property only when the debugger starts
pydevd_traceproperty.replace_builtin_property()
py_db.disable_property_trace = True
# Enable/Disable tracing of the property getter
if splitted[1] == 'true':
py_db.disable_property_getter_trace = True
else:
py_db.disable_property_getter_trace = False
# Enable/Disable tracing of the property setter
if splitted[2] == 'true':
py_db.disable_property_setter_trace = True
else:
py_db.disable_property_setter_trace = False
# Enable/Disable tracing of the property deleter
if splitted[3] == 'true':
py_db.disable_property_deleter_trace = True
else:
py_db.disable_property_deleter_trace = False
def cmd_add_exception_break(self, py_db, cmd_id, seq, text):
# Note that this message has some idiosyncrasies...
#
# notify_on_handled_exceptions can be 0, 1 or 2
# 0 means we should not stop on handled exceptions.
# 1 means we should stop on handled exceptions showing it on all frames where the exception passes.
# 2 means we should stop on handled exceptions but we should only notify about it once.
#
# To ignore_libraries properly, besides setting ignore_libraries to 1, the IDE_PROJECT_ROOTS environment
# variable must be set (so, we'll ignore anything not below IDE_PROJECT_ROOTS) -- this is not ideal as
# the environment variable may not be properly set if it didn't start from the debugger (we should
# create a custom message for that).
#
# There are 2 global settings which can only be set in CMD_SET_PY_EXCEPTION. Namely:
#
# py_db.skip_on_exceptions_thrown_in_same_context
# - If True, we should only show the exception in a caller, not where it was first raised.
#
# py_db.ignore_exceptions_thrown_in_lines_with_ignore_exception
# - If True exceptions thrown in lines with '@IgnoreException' will not be shown.
condition = ""
expression = ""
if text.find('\t') != -1:
try:
exception, condition, expression, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text.split('\t', 5)
except:
exception, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text.split('\t', 3)
else:
exception, notify_on_handled_exceptions, notify_on_unhandled_exceptions, ignore_libraries = text, 0, 0, 0
condition = condition.replace("@_@NEW_LINE_CHAR@_@", '\n').replace("@_@TAB_CHAR@_@", '\t').strip()
if condition is not None and (len(condition) == 0 or condition == "None"):
condition = None
expression = expression.replace("@_@NEW_LINE_CHAR@_@", '\n').replace("@_@TAB_CHAR@_@", '\t').strip()
if expression is not None and (len(expression) == 0 or expression == "None"):
expression = None
if exception.find('-') != -1:
breakpoint_type, exception = exception.split('-')
else:
breakpoint_type = 'python'
if breakpoint_type == 'python':
self.api.add_python_exception_breakpoint(
py_db, exception, condition, expression,
notify_on_handled_exceptions=int(notify_on_handled_exceptions) > 0,
notify_on_unhandled_exceptions=int(notify_on_unhandled_exceptions) == 1,
notify_on_user_unhandled_exceptions=0, # TODO (not currently supported in this API).
notify_on_first_raise_only=int(notify_on_handled_exceptions) == 2,
ignore_libraries=int(ignore_libraries) > 0,
)
else:
self.api.add_plugins_exception_breakpoint(py_db, breakpoint_type, exception)
def cmd_remove_exception_break(self, py_db, cmd_id, seq, text):
exception = text
if exception.find('-') != -1:
exception_type, exception = exception.split('-')
else:
exception_type = 'python'
if exception_type == 'python':
self.api.remove_python_exception_breakpoint(py_db, exception)
else:
self.api.remove_plugins_exception_breakpoint(py_db, exception_type, exception)
def cmd_add_django_exception_break(self, py_db, cmd_id, seq, text):
self.api.add_plugins_exception_breakpoint(py_db, breakpoint_type='django', exception=text)
def cmd_remove_django_exception_break(self, py_db, cmd_id, seq, text):
self.api.remove_plugins_exception_breakpoint(py_db, exception_type='django', exception=text)
def cmd_evaluate_console_expression(self, py_db, cmd_id, seq, text):
# Command which takes care for the debug console communication
if text != "":
thread_id, frame_id, console_command = text.split('\t', 2)
console_command, line = console_command.split('\t')
if console_command == 'EVALUATE':
int_cmd = InternalEvaluateConsoleExpression(
seq, thread_id, frame_id, line, buffer_output=True)
elif console_command == 'EVALUATE_UNBUFFERED':
int_cmd = InternalEvaluateConsoleExpression(
seq, thread_id, frame_id, line, buffer_output=False)
elif console_command == 'GET_COMPLETIONS':
int_cmd = InternalConsoleGetCompletions(seq, thread_id, frame_id, line)
else:
raise ValueError('Unrecognized command: %s' % (console_command,))
py_db.post_internal_command(int_cmd, thread_id)
def cmd_run_custom_operation(self, py_db, cmd_id, seq, text):
# Command which runs a custom operation
if text != "":
try:
location, custom = text.split('||', 1)
except:
sys.stderr.write('Custom operation now needs a || separator. Found: %s\n' % (text,))
raise
thread_id, frame_id, scopeattrs = location.split('\t', 2)
if scopeattrs.find('\t') != -1: # there are attributes beyond scope
scope, attrs = scopeattrs.split('\t', 1)
else:
scope, attrs = (scopeattrs, None)
# : style: EXECFILE or EXEC
# : encoded_code_or_file: file to execute or code
# : fname: name of function to be executed in the resulting namespace
style, encoded_code_or_file, fnname = custom.split('\t', 3)
int_cmd = InternalRunCustomOperation(seq, thread_id, frame_id, scope, attrs,
style, encoded_code_or_file, fnname)
py_db.post_internal_command(int_cmd, thread_id)
def cmd_ignore_thrown_exception_at(self, py_db, cmd_id, seq, text):
if text:
replace = 'REPLACE:' # Not all 3.x versions support u'REPLACE:', so, doing workaround.
if text.startswith(replace):
text = text[8:]
py_db.filename_to_lines_where_exceptions_are_ignored.clear()
if text:
for line in text.split('||'): # Can be bulk-created (one in each line)
original_filename, line_number = line.split('|')
original_filename = self.api.filename_to_server(original_filename)
canonical_normalized_filename = pydevd_file_utils.canonical_normalized_path(original_filename)
absolute_filename = pydevd_file_utils.absolute_path(original_filename)
if os.path.exists(absolute_filename):
lines_ignored = py_db.filename_to_lines_where_exceptions_are_ignored.get(canonical_normalized_filename)
if lines_ignored is None:
lines_ignored = py_db.filename_to_lines_where_exceptions_are_ignored[canonical_normalized_filename] = {}
lines_ignored[int(line_number)] = 1
else:
sys.stderr.write('pydev debugger: warning: trying to ignore exception thrown'\
' on file that does not exist: %s (will have no effect)\n' % (absolute_filename,))
def cmd_enable_dont_trace(self, py_db, cmd_id, seq, text):
if text:
true_str = 'true' # Not all 3.x versions support u'str', so, doing workaround.
mode = text.strip() == true_str
pydevd_dont_trace.trace_filter(mode)
def cmd_redirect_output(self, py_db, cmd_id, seq, text):
if text:
py_db.enable_output_redirection('STDOUT' in text, 'STDERR' in text)
def cmd_get_next_statement_targets(self, py_db, cmd_id, seq, text):
thread_id, frame_id = text.split('\t', 1)
py_db.post_method_as_internal_command(
thread_id, internal_get_next_statement_targets, seq, thread_id, frame_id)
def cmd_get_smart_step_into_variants(self, py_db, cmd_id, seq, text):
thread_id, frame_id, start_line, end_line = text.split('\t', 3)
py_db.post_method_as_internal_command(
thread_id, internal_get_smart_step_into_variants, seq, thread_id, frame_id, start_line, end_line, set_additional_thread_info=set_additional_thread_info)
def cmd_set_project_roots(self, py_db, cmd_id, seq, text):
self.api.set_project_roots(py_db, text.split(u'\t'))
def cmd_thread_dump_to_stderr(self, py_db, cmd_id, seq, text):
pydevd_utils.dump_threads()
def cmd_stop_on_start(self, py_db, cmd_id, seq, text):
if text.strip() in ('True', 'true', '1'):
self.api.stop_on_entry()
def cmd_pydevd_json_config(self, py_db, cmd_id, seq, text):
# Expected to receive a json string as:
# {
# 'skip_suspend_on_breakpoint_exception': [<exception names where we should suspend>],
# 'skip_print_breakpoint_exception': [<exception names where we should print>],
# 'multi_threads_single_notification': bool,
# }
msg = json.loads(text.strip())
if 'skip_suspend_on_breakpoint_exception' in msg:
py_db.skip_suspend_on_breakpoint_exception = tuple(
get_exception_class(x) for x in msg['skip_suspend_on_breakpoint_exception'])
if 'skip_print_breakpoint_exception' in msg:
py_db.skip_print_breakpoint_exception = tuple(
get_exception_class(x) for x in msg['skip_print_breakpoint_exception'])
if 'multi_threads_single_notification' in msg:
py_db.multi_threads_single_notification = msg['multi_threads_single_notification']
def cmd_get_exception_details(self, py_db, cmd_id, seq, text):
thread_id = text
t = pydevd_find_thread_by_id(thread_id)
frame = None
if t and not getattr(t, 'pydev_do_not_trace', None):
additional_info = set_additional_thread_info(t)
frame = additional_info.get_topmost_frame(t)
try:
return py_db.cmd_factory.make_get_exception_details_message(py_db, seq, thread_id, frame)
finally:
frame = None
t = None
process_net_command = _PyDevCommandProcessor().process_net_command
| 35,106 | Python | 45.315303 | 180 | 0.59326 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_net_command_factory_xml.py | import json
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle._pydev_saved_modules import thread
from _pydevd_bundle import pydevd_xml, pydevd_frame_utils, pydevd_constants, pydevd_utils
from _pydevd_bundle.pydevd_comm_constants import (
CMD_THREAD_CREATE, CMD_THREAD_KILL, CMD_THREAD_SUSPEND, CMD_THREAD_RUN, CMD_GET_VARIABLE,
CMD_EVALUATE_EXPRESSION, CMD_GET_FRAME, CMD_WRITE_TO_CONSOLE, CMD_GET_COMPLETIONS,
CMD_LOAD_SOURCE, CMD_SET_NEXT_STATEMENT, CMD_EXIT, CMD_GET_FILE_CONTENTS,
CMD_EVALUATE_CONSOLE_EXPRESSION, CMD_RUN_CUSTOM_OPERATION,
CMD_GET_BREAKPOINT_EXCEPTION, CMD_SEND_CURR_EXCEPTION_TRACE,
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, CMD_SHOW_CONSOLE, CMD_GET_ARRAY,
CMD_INPUT_REQUESTED, CMD_GET_DESCRIPTION, CMD_PROCESS_CREATED,
CMD_SHOW_CYTHON_WARNING, CMD_LOAD_FULL_VALUE, CMD_GET_THREAD_STACK,
CMD_GET_EXCEPTION_DETAILS, CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION,
CMD_THREAD_RESUME_SINGLE_NOTIFICATION,
CMD_GET_NEXT_STATEMENT_TARGETS, CMD_VERSION,
CMD_RETURN, CMD_SET_PROTOCOL, CMD_ERROR, MAX_IO_MSG_SIZE, VERSION_STRING,
CMD_RELOAD_CODE, CMD_LOAD_SOURCE_FROM_FRAME_ID)
from _pydevd_bundle.pydevd_constants import (DebugInfoHolder, get_thread_id,
get_global_debugger, GetGlobalDebugger, set_global_debugger) # Keep for backward compatibility @UnusedImport
from _pydevd_bundle.pydevd_net_command import NetCommand, NULL_NET_COMMAND, NULL_EXIT_COMMAND
from _pydevd_bundle.pydevd_utils import quote_smart as quote, get_non_pydevd_threads
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame
import pydevd_file_utils
from pydevd_tracing import get_exception_traceback_str
from _pydev_bundle._pydev_completer import completions_to_xml
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_frame_utils import FramesList
from io import StringIO
#=======================================================================================================================
# NetCommandFactory
#=======================================================================================================================
class NetCommandFactory(object):
def __init__(self):
self._additional_thread_id_to_thread_name = {}
def _thread_to_xml(self, thread):
""" thread information as XML """
name = pydevd_xml.make_valid_xml_value(thread.name)
cmd_text = '<thread name="%s" id="%s" />' % (quote(name), get_thread_id(thread))
return cmd_text
def make_error_message(self, seq, text):
cmd = NetCommand(CMD_ERROR, seq, text)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
pydev_log.error("Error: %s" % (text,))
return cmd
def make_protocol_set_message(self, seq):
return NetCommand(CMD_SET_PROTOCOL, seq, '')
def make_thread_created_message(self, thread):
cmdText = "<xml>" + self._thread_to_xml(thread) + "</xml>"
return NetCommand(CMD_THREAD_CREATE, 0, cmdText)
def make_process_created_message(self):
cmdText = '<process/>'
return NetCommand(CMD_PROCESS_CREATED, 0, cmdText)
def make_process_about_to_be_replaced_message(self):
return NULL_NET_COMMAND
def make_show_cython_warning_message(self):
try:
return NetCommand(CMD_SHOW_CYTHON_WARNING, 0, '')
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_custom_frame_created_message(self, frame_id, frame_description):
self._additional_thread_id_to_thread_name[frame_id] = frame_description
frame_description = pydevd_xml.make_valid_xml_value(frame_description)
return NetCommand(CMD_THREAD_CREATE, 0, '<xml><thread name="%s" id="%s"/></xml>' % (frame_description, frame_id))
def make_list_threads_message(self, py_db, seq):
""" returns thread listing as XML """
try:
threads = get_non_pydevd_threads()
cmd_text = ["<xml>"]
append = cmd_text.append
for thread in threads:
if is_thread_alive(thread):
append(self._thread_to_xml(thread))
for thread_id, thread_name in list(self._additional_thread_id_to_thread_name.items()):
name = pydevd_xml.make_valid_xml_value(thread_name)
append('<thread name="%s" id="%s" />' % (quote(name), thread_id))
append("</xml>")
return NetCommand(CMD_RETURN, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_thread_stack_message(self, py_db, seq, thread_id, topmost_frame, fmt, must_be_suspended=False, start_frame=0, levels=0):
"""
Returns thread stack as XML.
:param must_be_suspended: If True and the thread is not suspended, returns None.
"""
try:
# If frame is None, the return is an empty frame list.
cmd_text = ['<xml><thread id="%s">' % (thread_id,)]
if topmost_frame is not None:
try:
# : :type suspended_frames_manager: SuspendedFramesManager
suspended_frames_manager = py_db.suspended_frames_manager
frames_list = suspended_frames_manager.get_frames_list(thread_id)
if frames_list is None:
# Could not find stack of suspended frame...
if must_be_suspended:
return None
else:
frames_list = pydevd_frame_utils.create_frames_list_from_frame(topmost_frame)
cmd_text.append(self.make_thread_stack_str(py_db, frames_list))
finally:
topmost_frame = None
cmd_text.append('</thread></xml>')
return NetCommand(CMD_GET_THREAD_STACK, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_variable_changed_message(self, seq, payload):
# notify debugger that value was changed successfully
return NetCommand(CMD_RETURN, seq, payload)
def make_warning_message(self, msg):
return self.make_io_message(msg, 2)
def make_io_message(self, msg, ctx):
'''
@param msg: the message to pass to the debug server
@param ctx: 1 for stdio 2 for stderr
'''
try:
msg = pydevd_constants.as_str(msg)
if len(msg) > MAX_IO_MSG_SIZE:
msg = msg[0:MAX_IO_MSG_SIZE]
msg += '...'
msg = pydevd_xml.make_valid_xml_value(quote(msg, '/>_= '))
return NetCommand(str(CMD_WRITE_TO_CONSOLE), 0, '<xml><io s="%s" ctx="%s"/></xml>' % (msg, ctx))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_version_message(self, seq):
try:
return NetCommand(CMD_VERSION, seq, VERSION_STRING)
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_thread_killed_message(self, tid):
self._additional_thread_id_to_thread_name.pop(tid, None)
try:
return NetCommand(CMD_THREAD_KILL, 0, str(tid))
except:
return self.make_error_message(0, get_exception_traceback_str())
def _iter_visible_frames_info(self, py_db, frames_list):
assert frames_list.__class__ == FramesList
for frame in frames_list:
show_as_current_frame = frame is frames_list.current_frame
if frame.f_code is None:
pydev_log.info('Frame without f_code: %s', frame)
continue # IronPython sometimes does not have it!
method_name = frame.f_code.co_name # method name (if in method) or ? if global
if method_name is None:
pydev_log.info('Frame without co_name: %s', frame)
continue # IronPython sometimes does not have it!
abs_path_real_path_and_base = get_abs_path_real_path_and_base_from_frame(frame)
if py_db.get_file_type(frame, abs_path_real_path_and_base) == py_db.PYDEV_FILE:
# Skip pydevd files.
frame = frame.f_back
continue
frame_id = id(frame)
lineno = frames_list.frame_id_to_lineno.get(frame_id, frame.f_lineno)
filename_in_utf8, lineno, changed = py_db.source_mapping.map_to_client(abs_path_real_path_and_base[0], lineno)
new_filename_in_utf8, applied_mapping = pydevd_file_utils.map_file_to_client(filename_in_utf8)
applied_mapping = applied_mapping or changed
yield frame_id, frame, method_name, abs_path_real_path_and_base[0], new_filename_in_utf8, lineno, applied_mapping, show_as_current_frame
def make_thread_stack_str(self, py_db, frames_list):
assert frames_list.__class__ == FramesList
make_valid_xml_value = pydevd_xml.make_valid_xml_value
cmd_text_list = []
append = cmd_text_list.append
try:
for frame_id, frame, method_name, _original_filename, filename_in_utf8, lineno, _applied_mapping, _show_as_current_frame in self._iter_visible_frames_info(
py_db, frames_list
):
# print("file is ", filename_in_utf8)
# print("line is ", lineno)
# Note: variables are all gotten 'on-demand'.
append('<frame id="%s" name="%s" ' % (frame_id , make_valid_xml_value(method_name)))
append('file="%s" line="%s">' % (quote(make_valid_xml_value(filename_in_utf8), '/>_= \t'), lineno))
append("</frame>")
except:
pydev_log.exception()
return ''.join(cmd_text_list)
def make_thread_suspend_str(
self,
py_db,
thread_id,
frames_list,
stop_reason=None,
message=None,
suspend_type="trace",
):
"""
:return tuple(str,str):
Returns tuple(thread_suspended_str, thread_stack_str).
i.e.:
(
'''
<xml>
<thread id="id" stop_reason="reason">
<frame id="id" name="functionName " file="file" line="line">
</frame>
</thread>
</xml>
'''
,
'''
<frame id="id" name="functionName " file="file" line="line">
</frame>
'''
)
"""
assert frames_list.__class__ == FramesList
make_valid_xml_value = pydevd_xml.make_valid_xml_value
cmd_text_list = []
append = cmd_text_list.append
cmd_text_list.append('<xml>')
if message:
message = make_valid_xml_value(message)
append('<thread id="%s"' % (thread_id,))
if stop_reason is not None:
append(' stop_reason="%s"' % (stop_reason,))
if message is not None:
append(' message="%s"' % (message,))
if suspend_type is not None:
append(' suspend_type="%s"' % (suspend_type,))
append('>')
thread_stack_str = self.make_thread_stack_str(py_db, frames_list)
append(thread_stack_str)
append("</thread></xml>")
return ''.join(cmd_text_list), thread_stack_str
def make_thread_suspend_message(self, py_db, thread_id, frames_list, stop_reason, message, suspend_type):
try:
thread_suspend_str, thread_stack_str = self.make_thread_suspend_str(
py_db, thread_id, frames_list, stop_reason, message, suspend_type)
cmd = NetCommand(CMD_THREAD_SUSPEND, 0, thread_suspend_str)
cmd.thread_stack_str = thread_stack_str
cmd.thread_suspend_str = thread_suspend_str
return cmd
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_suspend_single_notification(self, py_db, thread_id, stop_reason):
try:
return NetCommand(CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION, 0, json.dumps(
{'thread_id': thread_id, 'stop_reason':stop_reason}))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_resume_single_notification(self, thread_id):
try:
return NetCommand(CMD_THREAD_RESUME_SINGLE_NOTIFICATION, 0, json.dumps(
{'thread_id': thread_id}))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_thread_run_message(self, thread_id, reason):
try:
return NetCommand(CMD_THREAD_RUN, 0, "%s\t%s" % (thread_id, reason))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_get_variable_message(self, seq, payload):
try:
return NetCommand(CMD_GET_VARIABLE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_array_message(self, seq, payload):
try:
return NetCommand(CMD_GET_ARRAY, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_description_message(self, seq, payload):
try:
return NetCommand(CMD_GET_DESCRIPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_frame_message(self, seq, payload):
try:
return NetCommand(CMD_GET_FRAME, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_evaluate_expression_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_completions_message(self, seq, completions, qualifier, start):
try:
payload = completions_to_xml(completions)
return NetCommand(CMD_GET_COMPLETIONS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_file_contents(self, seq, payload):
try:
return NetCommand(CMD_GET_FILE_CONTENTS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_reloaded_code_message(self, seq, reloaded_ok):
try:
return NetCommand(CMD_RELOAD_CODE, seq, '<xml><reloaded ok="%s"></reloaded></xml>' % reloaded_ok)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_breakpoint_exception_message(self, seq, payload):
try:
return NetCommand(CMD_GET_BREAKPOINT_EXCEPTION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def _make_send_curr_exception_trace_str(self, py_db, thread_id, exc_type, exc_desc, trace_obj):
frames_list = pydevd_frame_utils.create_frames_list_from_traceback(trace_obj, None, exc_type, exc_desc)
exc_type = pydevd_xml.make_valid_xml_value(str(exc_type)).replace('\t', ' ') or 'exception: type unknown'
exc_desc = pydevd_xml.make_valid_xml_value(str(exc_desc)).replace('\t', ' ') or 'exception: no description'
thread_suspend_str, thread_stack_str = self.make_thread_suspend_str(
py_db, thread_id, frames_list, CMD_SEND_CURR_EXCEPTION_TRACE, '')
return exc_type, exc_desc, thread_suspend_str, thread_stack_str
def make_send_curr_exception_trace_message(self, py_db, seq, thread_id, curr_frame_id, exc_type, exc_desc, trace_obj):
try:
exc_type, exc_desc, thread_suspend_str, _thread_stack_str = self._make_send_curr_exception_trace_str(
py_db, thread_id, exc_type, exc_desc, trace_obj)
payload = str(curr_frame_id) + '\t' + exc_type + "\t" + exc_desc + "\t" + thread_suspend_str
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_exception_details_message(self, py_db, seq, thread_id, topmost_frame):
"""Returns exception details as XML """
try:
# If the debugger is not suspended, just return the thread and its id.
cmd_text = ['<xml><thread id="%s" ' % (thread_id,)]
if topmost_frame is not None:
try:
frame = topmost_frame
topmost_frame = None
while frame is not None:
if frame.f_code.co_name == 'do_wait_suspend' and frame.f_code.co_filename.endswith('pydevd.py'):
arg = frame.f_locals.get('arg', None)
if arg is not None:
exc_type, exc_desc, _thread_suspend_str, thread_stack_str = self._make_send_curr_exception_trace_str(
py_db, thread_id, *arg)
cmd_text.append('exc_type="%s" ' % (exc_type,))
cmd_text.append('exc_desc="%s" ' % (exc_desc,))
cmd_text.append('>')
cmd_text.append(thread_stack_str)
break
frame = frame.f_back
else:
cmd_text.append('>')
finally:
frame = None
cmd_text.append('</thread></xml>')
return NetCommand(CMD_GET_EXCEPTION_DETAILS, seq, ''.join(cmd_text))
except:
return self.make_error_message(seq, get_exception_traceback_str())
def make_send_curr_exception_trace_proceeded_message(self, seq, thread_id):
try:
return NetCommand(CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED, 0, str(thread_id))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_send_console_message(self, seq, payload):
try:
return NetCommand(CMD_EVALUATE_CONSOLE_EXPRESSION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_custom_operation_message(self, seq, payload):
try:
return NetCommand(CMD_RUN_CUSTOM_OPERATION, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_load_source_message(self, seq, source):
return NetCommand(CMD_LOAD_SOURCE, seq, source)
def make_load_source_from_frame_id_message(self, seq, source):
return NetCommand(CMD_LOAD_SOURCE_FROM_FRAME_ID, seq, source)
def make_show_console_message(self, py_db, thread_id, frame):
try:
frames_list = pydevd_frame_utils.create_frames_list_from_frame(frame)
thread_suspended_str, _thread_stack_str = self.make_thread_suspend_str(
py_db, thread_id, frames_list, CMD_SHOW_CONSOLE, '')
return NetCommand(CMD_SHOW_CONSOLE, 0, thread_suspended_str)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_input_requested_message(self, started):
try:
return NetCommand(CMD_INPUT_REQUESTED, 0, str(started))
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_set_next_stmnt_status_message(self, seq, is_success, exception_msg):
try:
message = str(is_success) + '\t' + exception_msg
return NetCommand(CMD_SET_NEXT_STATEMENT, int(seq), message)
except:
return self.make_error_message(0, get_exception_traceback_str())
def make_load_full_value_message(self, seq, payload):
try:
return NetCommand(CMD_LOAD_FULL_VALUE, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_get_next_statement_targets_message(self, seq, payload):
try:
return NetCommand(CMD_GET_NEXT_STATEMENT_TARGETS, seq, payload)
except Exception:
return self.make_error_message(seq, get_exception_traceback_str())
def make_skipped_step_in_because_of_filters(self, py_db, frame):
return NULL_NET_COMMAND # Not a part of the xml protocol
def make_evaluation_timeout_msg(self, py_db, expression, thread):
msg = '''pydevd: Evaluating: %s did not finish after %.2f seconds.
This may mean a number of things:
- This evaluation is really slow and this is expected.
In this case it's possible to silence this error by raising the timeout, setting the
PYDEVD_WARN_EVALUATION_TIMEOUT environment variable to a bigger value.
- The evaluation may need other threads running while it's running:
In this case, you may need to manually let other paused threads continue.
Alternatively, it's also possible to skip breaking on a particular thread by setting a
`pydev_do_not_trace = True` attribute in the related threading.Thread instance
(if some thread should always be running and no breakpoints are expected to be hit in it).
- The evaluation is deadlocked:
In this case you may set the PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT
environment variable to true so that a thread dump is shown along with this message and
optionally, set the PYDEVD_INTERRUPT_THREAD_TIMEOUT to some value so that the debugger
tries to interrupt the evaluation (if possible) when this happens.
''' % (expression, pydevd_constants.PYDEVD_WARN_EVALUATION_TIMEOUT)
if pydevd_constants.PYDEVD_THREAD_DUMP_ON_WARN_EVALUATION_TIMEOUT:
stream = StringIO()
pydevd_utils.dump_threads(stream, show_pydevd_threads=False)
msg += '\n\n%s\n' % stream.getvalue()
return self.make_warning_message(msg)
def make_exit_command(self, py_db):
return NULL_EXIT_COMMAND
| 22,531 | Python | 44.611336 | 167 | 0.602503 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm_constants.py | CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_SHOW_RETURN_VALUES = 146
CMD_INPUT_REQUESTED = 147
CMD_GET_DESCRIPTION = 148
CMD_PROCESS_CREATED = 149
CMD_SHOW_CYTHON_WARNING = 150
CMD_LOAD_FULL_VALUE = 151
CMD_GET_THREAD_STACK = 152
# This is mostly for unit-tests to diagnose errors on ci.
CMD_THREAD_DUMP_TO_STDERR = 153
# Sent from the client to signal that we should stop when we start executing user code.
CMD_STOP_ON_START = 154
# When the debugger is stopped in an exception, this command will provide the details of the current exception (in the current thread).
CMD_GET_EXCEPTION_DETAILS = 155
# Allows configuring pydevd settings (can be called multiple times and only keys
# available in the json will be configured -- keys not passed will not change the
# previous configuration).
CMD_PYDEVD_JSON_CONFIG = 156
CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION = 157
CMD_THREAD_RESUME_SINGLE_NOTIFICATION = 158
CMD_STEP_OVER_MY_CODE = 159
CMD_STEP_RETURN_MY_CODE = 160
CMD_SET_PY_EXCEPTION_JSON = 161
CMD_SET_PATH_MAPPING_JSON = 162
CMD_GET_SMART_STEP_INTO_VARIANTS = 163 # XXX: PyCharm has 160 for this (we're currently incompatible anyways).
CMD_REDIRECT_OUTPUT = 200
CMD_GET_NEXT_STATEMENT_TARGETS = 201
CMD_SET_PROJECT_ROOTS = 202
CMD_MODULE_EVENT = 203
CMD_PROCESS_EVENT = 204
CMD_AUTHENTICATE = 205
CMD_STEP_INTO_COROUTINE = 206
CMD_LOAD_SOURCE_FROM_FRAME_ID = 207
CMD_SET_FUNCTION_BREAK = 208
CMD_VERSION = 501
CMD_RETURN = 502
CMD_SET_PROTOCOL = 503
CMD_ERROR = 901
# this number can be changed if there's need to do so
# if the io is too big, we'll not send all (could make the debugger too non-responsive)
MAX_IO_MSG_SIZE = 10000
VERSION_STRING = "@@BUILD_NUMBER@@"
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
file_system_encoding = getfilesystemencoding()
filesystem_encoding_is_utf8 = file_system_encoding.lower() in ('utf-8', 'utf_8', 'utf8')
ID_TO_MEANING = {
'101': 'CMD_RUN',
'102': 'CMD_LIST_THREADS',
'103': 'CMD_THREAD_CREATE',
'104': 'CMD_THREAD_KILL',
'105': 'CMD_THREAD_SUSPEND',
'106': 'CMD_THREAD_RUN',
'107': 'CMD_STEP_INTO',
'108': 'CMD_STEP_OVER',
'109': 'CMD_STEP_RETURN',
'110': 'CMD_GET_VARIABLE',
'111': 'CMD_SET_BREAK',
'112': 'CMD_REMOVE_BREAK',
'113': 'CMD_EVALUATE_EXPRESSION',
'114': 'CMD_GET_FRAME',
'115': 'CMD_EXEC_EXPRESSION',
'116': 'CMD_WRITE_TO_CONSOLE',
'117': 'CMD_CHANGE_VARIABLE',
'118': 'CMD_RUN_TO_LINE',
'119': 'CMD_RELOAD_CODE',
'120': 'CMD_GET_COMPLETIONS',
'121': 'CMD_CONSOLE_EXEC',
'122': 'CMD_ADD_EXCEPTION_BREAK',
'123': 'CMD_REMOVE_EXCEPTION_BREAK',
'124': 'CMD_LOAD_SOURCE',
'125': 'CMD_ADD_DJANGO_EXCEPTION_BREAK',
'126': 'CMD_REMOVE_DJANGO_EXCEPTION_BREAK',
'127': 'CMD_SET_NEXT_STATEMENT',
'128': 'CMD_SMART_STEP_INTO',
'129': 'CMD_EXIT',
'130': 'CMD_SIGNATURE_CALL_TRACE',
'131': 'CMD_SET_PY_EXCEPTION',
'132': 'CMD_GET_FILE_CONTENTS',
'133': 'CMD_SET_PROPERTY_TRACE',
'134': 'CMD_EVALUATE_CONSOLE_EXPRESSION',
'135': 'CMD_RUN_CUSTOM_OPERATION',
'136': 'CMD_GET_BREAKPOINT_EXCEPTION',
'137': 'CMD_STEP_CAUGHT_EXCEPTION',
'138': 'CMD_SEND_CURR_EXCEPTION_TRACE',
'139': 'CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED',
'140': 'CMD_IGNORE_THROWN_EXCEPTION_AT',
'141': 'CMD_ENABLE_DONT_TRACE',
'142': 'CMD_SHOW_CONSOLE',
'143': 'CMD_GET_ARRAY',
'144': 'CMD_STEP_INTO_MY_CODE',
'145': 'CMD_GET_CONCURRENCY_EVENT',
'146': 'CMD_SHOW_RETURN_VALUES',
'147': 'CMD_INPUT_REQUESTED',
'148': 'CMD_GET_DESCRIPTION',
'149': 'CMD_PROCESS_CREATED', # Note: this is actually a notification of a sub-process created.
'150': 'CMD_SHOW_CYTHON_WARNING',
'151': 'CMD_LOAD_FULL_VALUE',
'152': 'CMD_GET_THREAD_STACK',
'153': 'CMD_THREAD_DUMP_TO_STDERR',
'154': 'CMD_STOP_ON_START',
'155': 'CMD_GET_EXCEPTION_DETAILS',
'156': 'CMD_PYDEVD_JSON_CONFIG',
'157': 'CMD_THREAD_SUSPEND_SINGLE_NOTIFICATION',
'158': 'CMD_THREAD_RESUME_SINGLE_NOTIFICATION',
'159': 'CMD_STEP_OVER_MY_CODE',
'160': 'CMD_STEP_RETURN_MY_CODE',
'161': 'CMD_SET_PY_EXCEPTION_JSON',
'162': 'CMD_SET_PATH_MAPPING_JSON',
'163': 'CMD_GET_SMART_STEP_INTO_VARIANTS',
'200': 'CMD_REDIRECT_OUTPUT',
'201': 'CMD_GET_NEXT_STATEMENT_TARGETS',
'202': 'CMD_SET_PROJECT_ROOTS',
'203': 'CMD_MODULE_EVENT',
'204': 'CMD_PROCESS_EVENT', # DAP process event.
'205': 'CMD_AUTHENTICATE',
'206': 'CMD_STEP_INTO_COROUTINE',
'207': 'CMD_LOAD_SOURCE_FROM_FRAME_ID',
'501': 'CMD_VERSION',
'502': 'CMD_RETURN',
'503': 'CMD_SET_PROTOCOL',
'901': 'CMD_ERROR',
}
def constant_to_str(constant):
s = ID_TO_MEANING.get(str(constant))
if not s:
s = '<Unknown: %s>' % (constant,)
return s
| 6,084 | Python | 28.114832 | 135 | 0.674556 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_bytecode_utils.py | """
Bytecode analysing utils. Originally added for using in smart step into.
Note: not importable from Python 2.
"""
from _pydev_bundle import pydev_log
from types import CodeType
from _pydevd_frame_eval.vendored.bytecode.instr import _Variable
from _pydevd_frame_eval.vendored import bytecode
from _pydevd_frame_eval.vendored.bytecode import cfg as bytecode_cfg
import dis
import opcode as _opcode
from _pydevd_bundle.pydevd_constants import KeyifyList, DebugInfoHolder, IS_PY311_OR_GREATER
from bisect import bisect
from collections import deque
# When True, throws errors on unknown bytecodes, when False, ignore those as if they didn't change the stack.
STRICT_MODE = False
DEBUG = False
_BINARY_OPS = set([opname for opname in dis.opname if opname.startswith('BINARY_')])
_BINARY_OP_MAP = {
'BINARY_POWER': '__pow__',
'BINARY_MULTIPLY': '__mul__',
'BINARY_MATRIX_MULTIPLY': '__matmul__',
'BINARY_FLOOR_DIVIDE': '__floordiv__',
'BINARY_TRUE_DIVIDE': '__div__',
'BINARY_MODULO': '__mod__',
'BINARY_ADD': '__add__',
'BINARY_SUBTRACT': '__sub__',
'BINARY_LSHIFT': '__lshift__',
'BINARY_RSHIFT': '__rshift__',
'BINARY_AND': '__and__',
'BINARY_OR': '__or__',
'BINARY_XOR': '__xor__',
'BINARY_SUBSCR': '__getitem__',
'BINARY_DIVIDE': '__div__'
}
_COMP_OP_MAP = {
'<': '__lt__',
'<=': '__le__',
'==': '__eq__',
'!=': '__ne__',
'>': '__gt__',
'>=': '__ge__',
'in': '__contains__',
'not in': '__contains__',
}
class Target(object):
__slots__ = ['arg', 'lineno', 'offset', 'children_targets']
def __init__(self, arg, lineno, offset, children_targets=()):
self.arg = arg
self.lineno = lineno
self.offset = offset
self.children_targets = children_targets
def __repr__(self):
ret = []
for s in self.__slots__:
ret.append('%s: %s' % (s, getattr(self, s)))
return 'Target(%s)' % ', '.join(ret)
__str__ = __repr__
class _TargetIdHashable(object):
def __init__(self, target):
self.target = target
def __eq__(self, other):
if not hasattr(other, 'target'):
return
return other.target is self.target
def __ne__(self, other):
return not self == other
def __hash__(self):
return id(self.target)
class _StackInterpreter(object):
'''
Good reference: https://github.com/python/cpython/blob/fcb55c0037baab6f98f91ee38ce84b6f874f034a/Python/ceval.c
'''
def __init__(self, bytecode):
self.bytecode = bytecode
self._stack = deque()
self.function_calls = []
self.load_attrs = {}
self.func = set()
self.func_name_id_to_code_object = {}
def __str__(self):
return 'Stack:\nFunction calls:\n%s\nLoad attrs:\n%s\n' % (self.function_calls, list(self.load_attrs.values()))
def _getname(self, instr):
if instr.opcode in _opcode.hascompare:
cmp_op = dis.cmp_op[instr.arg]
if cmp_op not in ('exception match', 'BAD'):
return _COMP_OP_MAP.get(cmp_op, cmp_op)
return instr.arg
def _getcallname(self, instr):
if instr.name == 'BINARY_SUBSCR':
return '__getitem__().__call__'
if instr.name == 'CALL_FUNCTION':
# Note: previously a '__call__().__call__' was returned, but this was a bit weird
# and on Python 3.9 this construct could appear for some internal things where
# it wouldn't be expected.
# Note: it'd be what we had in func()().
return None
if instr.name == 'MAKE_FUNCTION':
return '__func__().__call__'
if instr.name == 'LOAD_ASSERTION_ERROR':
return 'AssertionError'
name = self._getname(instr)
if isinstance(name, CodeType):
name = name.co_qualname # Note: only available for Python 3.11
if isinstance(name, _Variable):
name = name.name
if not isinstance(name, str):
return None
if name.endswith('>'): # xxx.<listcomp>, xxx.<lambda>, ...
return name.split('.')[-1]
return name
def _no_stack_change(self, instr):
pass # Can be aliased when the instruction does nothing.
def on_LOAD_GLOBAL(self, instr):
self._stack.append(instr)
def on_POP_TOP(self, instr):
try:
self._stack.pop()
except IndexError:
pass # Ok (in the end of blocks)
def on_LOAD_ATTR(self, instr):
self.on_POP_TOP(instr) # replaces the current top
self._stack.append(instr)
self.load_attrs[_TargetIdHashable(instr)] = Target(self._getname(instr), instr.lineno, instr.offset)
on_LOOKUP_METHOD = on_LOAD_ATTR # Improvement in PyPy
def on_LOAD_CONST(self, instr):
self._stack.append(instr)
on_LOAD_DEREF = on_LOAD_CONST
on_LOAD_NAME = on_LOAD_CONST
on_LOAD_CLOSURE = on_LOAD_CONST
on_LOAD_CLASSDEREF = on_LOAD_CONST
# Although it actually changes the stack, it's inconsequential for us as a function call can't
# really be found there.
on_IMPORT_NAME = _no_stack_change
on_IMPORT_FROM = _no_stack_change
on_IMPORT_STAR = _no_stack_change
on_SETUP_ANNOTATIONS = _no_stack_change
def on_STORE_FAST(self, instr):
try:
self._stack.pop()
except IndexError:
pass # Ok, we may have a block just with the store
# Note: it stores in the locals and doesn't put anything in the stack.
on_STORE_GLOBAL = on_STORE_FAST
on_STORE_DEREF = on_STORE_FAST
on_STORE_ATTR = on_STORE_FAST
on_STORE_NAME = on_STORE_FAST
on_DELETE_NAME = on_POP_TOP
on_DELETE_ATTR = on_POP_TOP
on_DELETE_GLOBAL = on_POP_TOP
on_DELETE_FAST = on_POP_TOP
on_DELETE_DEREF = on_POP_TOP
on_DICT_UPDATE = on_POP_TOP
on_SET_UPDATE = on_POP_TOP
on_GEN_START = on_POP_TOP
def on_NOP(self, instr):
pass
def _handle_call_from_instr(self, func_name_instr, func_call_instr):
self.load_attrs.pop(_TargetIdHashable(func_name_instr), None)
call_name = self._getcallname(func_name_instr)
target = None
if not call_name:
pass # Ignore if we can't identify a name
elif call_name in ('<listcomp>', '<genexpr>', '<setcomp>', '<dictcomp>'):
code_obj = self.func_name_id_to_code_object[_TargetIdHashable(func_name_instr)]
if code_obj is not None:
children_targets = _get_smart_step_into_targets(code_obj)
if children_targets:
# i.e.: we have targets inside of a <listcomp> or <genexpr>.
# Note that to actually match this in the debugger we need to do matches on 2 frames,
# the one with the <listcomp> and then the actual target inside the <listcomp>.
target = Target(call_name, func_name_instr.lineno, func_call_instr.offset, children_targets)
self.function_calls.append(
target)
else:
# Ok, regular call
target = Target(call_name, func_name_instr.lineno, func_call_instr.offset)
self.function_calls.append(target)
if DEBUG and target is not None:
print('Created target', target)
self._stack.append(func_call_instr) # Keep the func call as the result
def on_COMPARE_OP(self, instr):
try:
_right = self._stack.pop()
except IndexError:
return
try:
_left = self._stack.pop()
except IndexError:
return
cmp_op = dis.cmp_op[instr.arg]
if cmp_op not in ('exception match', 'BAD'):
self.function_calls.append(Target(self._getname(instr), instr.lineno, instr.offset))
self._stack.append(instr)
def on_IS_OP(self, instr):
try:
self._stack.pop()
except IndexError:
return
try:
self._stack.pop()
except IndexError:
return
def on_BINARY_SUBSCR(self, instr):
try:
_sub = self._stack.pop()
except IndexError:
return
try:
_container = self._stack.pop()
except IndexError:
return
self.function_calls.append(Target(_BINARY_OP_MAP[instr.name], instr.lineno, instr.offset))
self._stack.append(instr)
on_BINARY_MATRIX_MULTIPLY = on_BINARY_SUBSCR
on_BINARY_POWER = on_BINARY_SUBSCR
on_BINARY_MULTIPLY = on_BINARY_SUBSCR
on_BINARY_FLOOR_DIVIDE = on_BINARY_SUBSCR
on_BINARY_TRUE_DIVIDE = on_BINARY_SUBSCR
on_BINARY_MODULO = on_BINARY_SUBSCR
on_BINARY_ADD = on_BINARY_SUBSCR
on_BINARY_SUBTRACT = on_BINARY_SUBSCR
on_BINARY_LSHIFT = on_BINARY_SUBSCR
on_BINARY_RSHIFT = on_BINARY_SUBSCR
on_BINARY_AND = on_BINARY_SUBSCR
on_BINARY_OR = on_BINARY_SUBSCR
on_BINARY_XOR = on_BINARY_SUBSCR
def on_LOAD_METHOD(self, instr):
self.on_POP_TOP(instr) # Remove the previous as we're loading something from it.
self._stack.append(instr)
def on_MAKE_FUNCTION(self, instr):
if not IS_PY311_OR_GREATER:
# The qualifier name is no longer put in the stack.
qualname = self._stack.pop()
code_obj_instr = self._stack.pop()
else:
# In 3.11 the code object has a co_qualname which we can use.
qualname = code_obj_instr = self._stack.pop()
arg = instr.arg
if arg & 0x08:
_func_closure = self._stack.pop()
if arg & 0x04:
_func_annotations = self._stack.pop()
if arg & 0x02:
_func_kwdefaults = self._stack.pop()
if arg & 0x01:
_func_defaults = self._stack.pop()
call_name = self._getcallname(qualname)
if call_name in ('<genexpr>', '<listcomp>', '<setcomp>', '<dictcomp>'):
if isinstance(code_obj_instr.arg, CodeType):
self.func_name_id_to_code_object[_TargetIdHashable(qualname)] = code_obj_instr.arg
self._stack.append(qualname)
def on_LOAD_FAST(self, instr):
self._stack.append(instr)
def on_LOAD_ASSERTION_ERROR(self, instr):
self._stack.append(instr)
on_LOAD_BUILD_CLASS = on_LOAD_FAST
def on_CALL_METHOD(self, instr):
# pop the actual args
for _ in range(instr.arg):
self._stack.pop()
func_name_instr = self._stack.pop()
self._handle_call_from_instr(func_name_instr, instr)
def on_PUSH_NULL(self, instr):
self._stack.append(instr)
def on_CALL_FUNCTION(self, instr):
arg = instr.arg
argc = arg & 0xff # positional args
argc += ((arg >> 8) * 2) # keyword args
# pop the actual args
for _ in range(argc):
try:
self._stack.pop()
except IndexError:
return
try:
func_name_instr = self._stack.pop()
except IndexError:
return
self._handle_call_from_instr(func_name_instr, instr)
def on_CALL_FUNCTION_KW(self, instr):
# names of kw args
_names_of_kw_args = self._stack.pop()
# pop the actual args
arg = instr.arg
argc = arg & 0xff # positional args
argc += ((arg >> 8) * 2) # keyword args
for _ in range(argc):
self._stack.pop()
func_name_instr = self._stack.pop()
self._handle_call_from_instr(func_name_instr, instr)
def on_CALL_FUNCTION_VAR(self, instr):
# var name
_var_arg = self._stack.pop()
# pop the actual args
arg = instr.arg
argc = arg & 0xff # positional args
argc += ((arg >> 8) * 2) # keyword args
for _ in range(argc):
self._stack.pop()
func_name_instr = self._stack.pop()
self._handle_call_from_instr(func_name_instr, instr)
def on_CALL_FUNCTION_VAR_KW(self, instr):
# names of kw args
_names_of_kw_args = self._stack.pop()
arg = instr.arg
argc = arg & 0xff # positional args
argc += ((arg >> 8) * 2) # keyword args
# also pop **kwargs
self._stack.pop()
# pop the actual args
for _ in range(argc):
self._stack.pop()
func_name_instr = self._stack.pop()
self._handle_call_from_instr(func_name_instr, instr)
def on_CALL_FUNCTION_EX(self, instr):
if instr.arg & 0x01:
_kwargs = self._stack.pop()
_callargs = self._stack.pop()
func_name_instr = self._stack.pop()
self._handle_call_from_instr(func_name_instr, instr)
on_YIELD_VALUE = _no_stack_change
on_GET_AITER = _no_stack_change
on_GET_ANEXT = _no_stack_change
on_END_ASYNC_FOR = _no_stack_change
on_BEFORE_ASYNC_WITH = _no_stack_change
on_SETUP_ASYNC_WITH = _no_stack_change
on_YIELD_FROM = _no_stack_change
on_SETUP_LOOP = _no_stack_change
on_FOR_ITER = _no_stack_change
on_BREAK_LOOP = _no_stack_change
on_JUMP_ABSOLUTE = _no_stack_change
on_RERAISE = _no_stack_change
on_LIST_TO_TUPLE = _no_stack_change
on_CALL_FINALLY = _no_stack_change
on_POP_FINALLY = _no_stack_change
def on_JUMP_IF_FALSE_OR_POP(self, instr):
try:
self._stack.pop()
except IndexError:
return
on_JUMP_IF_TRUE_OR_POP = on_JUMP_IF_FALSE_OR_POP
def on_JUMP_IF_NOT_EXC_MATCH(self, instr):
try:
self._stack.pop()
except IndexError:
return
try:
self._stack.pop()
except IndexError:
return
def on_ROT_TWO(self, instr):
try:
p0 = self._stack.pop()
except IndexError:
return
try:
p1 = self._stack.pop()
except:
self._stack.append(p0)
return
self._stack.append(p0)
self._stack.append(p1)
def on_ROT_THREE(self, instr):
try:
p0 = self._stack.pop()
except IndexError:
return
try:
p1 = self._stack.pop()
except:
self._stack.append(p0)
return
try:
p2 = self._stack.pop()
except:
self._stack.append(p0)
self._stack.append(p1)
return
self._stack.append(p0)
self._stack.append(p1)
self._stack.append(p2)
def on_ROT_FOUR(self, instr):
try:
p0 = self._stack.pop()
except IndexError:
return
try:
p1 = self._stack.pop()
except:
self._stack.append(p0)
return
try:
p2 = self._stack.pop()
except:
self._stack.append(p0)
self._stack.append(p1)
return
try:
p3 = self._stack.pop()
except:
self._stack.append(p0)
self._stack.append(p1)
self._stack.append(p2)
return
self._stack.append(p0)
self._stack.append(p1)
self._stack.append(p2)
self._stack.append(p3)
def on_BUILD_LIST_FROM_ARG(self, instr):
self._stack.append(instr)
def on_BUILD_MAP(self, instr):
for _i in range(instr.arg):
self._stack.pop()
self._stack.pop()
self._stack.append(instr)
def on_BUILD_CONST_KEY_MAP(self, instr):
self.on_POP_TOP(instr) # keys
for _i in range(instr.arg):
self.on_POP_TOP(instr) # value
self._stack.append(instr)
on_RETURN_VALUE = on_POP_TOP
on_POP_JUMP_IF_FALSE = on_POP_TOP
on_POP_JUMP_IF_TRUE = on_POP_TOP
on_DICT_MERGE = on_POP_TOP
on_LIST_APPEND = on_POP_TOP
on_SET_ADD = on_POP_TOP
on_LIST_EXTEND = on_POP_TOP
on_UNPACK_EX = on_POP_TOP
# ok: doesn't change the stack (converts top to getiter(top))
on_GET_ITER = _no_stack_change
on_GET_AWAITABLE = _no_stack_change
on_GET_YIELD_FROM_ITER = _no_stack_change
def on_RETURN_GENERATOR(self, instr):
self._stack.append(instr)
on_RETURN_GENERATOR = _no_stack_change
on_RESUME = _no_stack_change
def on_MAP_ADD(self, instr):
self.on_POP_TOP(instr)
self.on_POP_TOP(instr)
def on_UNPACK_SEQUENCE(self, instr):
self._stack.pop()
for _i in range(instr.arg):
self._stack.append(instr)
def on_BUILD_LIST(self, instr):
for _i in range(instr.arg):
self.on_POP_TOP(instr)
self._stack.append(instr)
on_BUILD_TUPLE = on_BUILD_LIST
on_BUILD_STRING = on_BUILD_LIST
on_BUILD_TUPLE_UNPACK_WITH_CALL = on_BUILD_LIST
on_BUILD_TUPLE_UNPACK = on_BUILD_LIST
on_BUILD_LIST_UNPACK = on_BUILD_LIST
on_BUILD_MAP_UNPACK_WITH_CALL = on_BUILD_LIST
on_BUILD_MAP_UNPACK = on_BUILD_LIST
on_BUILD_SET = on_BUILD_LIST
on_BUILD_SET_UNPACK = on_BUILD_LIST
on_SETUP_FINALLY = _no_stack_change
on_POP_FINALLY = _no_stack_change
on_BEGIN_FINALLY = _no_stack_change
on_END_FINALLY = _no_stack_change
def on_RAISE_VARARGS(self, instr):
for _i in range(instr.arg):
self.on_POP_TOP(instr)
on_POP_BLOCK = _no_stack_change
on_JUMP_FORWARD = _no_stack_change
on_POP_EXCEPT = _no_stack_change
on_SETUP_EXCEPT = _no_stack_change
on_WITH_EXCEPT_START = _no_stack_change
on_END_FINALLY = _no_stack_change
on_BEGIN_FINALLY = _no_stack_change
on_SETUP_WITH = _no_stack_change
on_WITH_CLEANUP_START = _no_stack_change
on_WITH_CLEANUP_FINISH = _no_stack_change
on_FORMAT_VALUE = _no_stack_change
on_EXTENDED_ARG = _no_stack_change
def on_INPLACE_ADD(self, instr):
# This would actually pop 2 and leave the value in the stack.
# In a += 1 it pop `a` and `1` and leave the resulting value
# for a load. In our case, let's just pop the `1` and leave the `a`
# instead of leaving the INPLACE_ADD bytecode.
try:
self._stack.pop()
except IndexError:
pass
on_INPLACE_POWER = on_INPLACE_ADD
on_INPLACE_MULTIPLY = on_INPLACE_ADD
on_INPLACE_MATRIX_MULTIPLY = on_INPLACE_ADD
on_INPLACE_TRUE_DIVIDE = on_INPLACE_ADD
on_INPLACE_FLOOR_DIVIDE = on_INPLACE_ADD
on_INPLACE_MODULO = on_INPLACE_ADD
on_INPLACE_SUBTRACT = on_INPLACE_ADD
on_INPLACE_RSHIFT = on_INPLACE_ADD
on_INPLACE_LSHIFT = on_INPLACE_ADD
on_INPLACE_AND = on_INPLACE_ADD
on_INPLACE_OR = on_INPLACE_ADD
on_INPLACE_XOR = on_INPLACE_ADD
def on_DUP_TOP(self, instr):
try:
i = self._stack[-1]
except IndexError:
# ok (in the start of block)
self._stack.append(instr)
else:
self._stack.append(i)
def on_DUP_TOP_TWO(self, instr):
if len(self._stack) == 0:
self._stack.append(instr)
return
if len(self._stack) == 1:
i = self._stack[-1]
self._stack.append(i)
self._stack.append(instr)
return
i = self._stack[-1]
j = self._stack[-2]
self._stack.append(j)
self._stack.append(i)
def on_BUILD_SLICE(self, instr):
for _ in range(instr.arg):
try:
self._stack.pop()
except IndexError:
pass
self._stack.append(instr)
def on_STORE_SUBSCR(self, instr):
try:
self._stack.pop()
self._stack.pop()
self._stack.pop()
except IndexError:
pass
def on_DELETE_SUBSCR(self, instr):
try:
self._stack.pop()
self._stack.pop()
except IndexError:
pass
# Note: on Python 3 this is only found on interactive mode to print the results of
# some evaluation.
on_PRINT_EXPR = on_POP_TOP
on_UNARY_POSITIVE = _no_stack_change
on_UNARY_NEGATIVE = _no_stack_change
on_UNARY_NOT = _no_stack_change
on_UNARY_INVERT = _no_stack_change
on_CACHE = _no_stack_change
on_PRECALL = _no_stack_change
def _get_smart_step_into_targets(code):
'''
:return list(Target)
'''
b = bytecode.Bytecode.from_code(code)
cfg = bytecode_cfg.ControlFlowGraph.from_bytecode(b)
ret = []
for block in cfg:
if DEBUG:
print('\nStart block----')
stack = _StackInterpreter(block)
for instr in block:
try:
func_name = 'on_%s' % (instr.name,)
func = getattr(stack, func_name, None)
if DEBUG:
if instr.name != 'CACHE': # Filter the ones we don't want to see.
print('\nWill handle: ', instr, '>>', stack._getname(instr), '<<')
print('Current stack:')
for entry in stack._stack:
print(' arg:', stack._getname(entry), '(', entry, ')')
if func is None:
if STRICT_MODE:
raise AssertionError('%s not found.' % (func_name,))
else:
continue
func(instr)
except:
if STRICT_MODE:
raise # Error in strict mode.
else:
# In non-strict mode, log it (if in verbose mode) and keep on going.
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 2:
pydev_log.exception('Exception computing step into targets (handled).')
ret.extend(stack.function_calls)
# No longer considering attr loads as calls (while in theory sometimes it's possible
# that something as `some.attr` can turn out to be a property which could be stepped
# in, it's not that common in practice and can be surprising for users, so, disabling
# step into from stepping into properties).
# ret.extend(stack.load_attrs.values())
return ret
# Note that the offset is unique within the frame (so, we can use it as the target id).
# Also, as the offset is the instruction offset within the frame, it's possible to
# to inspect the parent frame for frame.f_lasti to know where we actually are (as the
# caller name may not always match the new frame name).
class Variant(object):
__slots__ = ['name', 'is_visited', 'line', 'offset', 'call_order', 'children_variants', 'parent']
def __init__(self, name, is_visited, line, offset, call_order, children_variants=None):
self.name = name
self.is_visited = is_visited
self.line = line
self.offset = offset
self.call_order = call_order
self.children_variants = children_variants
self.parent = None
if children_variants:
for variant in children_variants:
variant.parent = self
def __repr__(self):
ret = []
for s in self.__slots__:
if s == 'parent':
try:
parent = self.parent
except AttributeError:
ret.append('%s: <not set>' % (s,))
else:
if parent is None:
ret.append('parent: None')
else:
ret.append('parent: %s (%s)' % (parent.name, parent.offset))
continue
if s == 'children_variants':
ret.append('children_variants: %s' % (len(self.children_variants) if self.children_variants else 0))
continue
try:
ret.append('%s: %s' % (s, getattr(self, s)))
except AttributeError:
ret.append('%s: <not set>' % (s,))
return 'Variant(%s)' % ', '.join(ret)
__str__ = __repr__
def _convert_target_to_variant(target, start_line, end_line, call_order_cache, lasti, base):
name = target.arg
if not isinstance(name, str):
return
if target.lineno > end_line:
return
if target.lineno < start_line:
return
call_order = call_order_cache.get(name, 0) + 1
call_order_cache[name] = call_order
is_visited = target.offset <= lasti
children_targets = target.children_targets
children_variants = None
if children_targets:
children_variants = [
_convert_target_to_variant(child, start_line, end_line, call_order_cache, lasti, base)
for child in target.children_targets]
return Variant(name, is_visited, target.lineno - base, target.offset, call_order, children_variants)
def calculate_smart_step_into_variants(frame, start_line, end_line, base=0):
"""
Calculate smart step into variants for the given line range.
:param frame:
:type frame: :py:class:`types.FrameType`
:param start_line:
:param end_line:
:return: A list of call names from the first to the last.
:note: it's guaranteed that the offsets appear in order.
:raise: :py:class:`RuntimeError` if failed to parse the bytecode or if dis cannot be used.
"""
variants = []
code = frame.f_code
lasti = frame.f_lasti
call_order_cache = {}
if DEBUG:
print('dis.dis:')
if IS_PY311_OR_GREATER:
dis.dis(code, show_caches=False)
else:
dis.dis(code)
for target in _get_smart_step_into_targets(code):
variant = _convert_target_to_variant(target, start_line, end_line, call_order_cache, lasti, base)
if variant is None:
continue
variants.append(variant)
return variants
def get_smart_step_into_variant_from_frame_offset(frame_f_lasti, variants):
"""
Given the frame.f_lasti, return the related `Variant`.
:note: if the offset is found before any variant available or no variants are
available, None is returned.
:rtype: Variant|NoneType
"""
if not variants:
return None
i = bisect(KeyifyList(variants, lambda entry:entry.offset), frame_f_lasti)
if i == 0:
return None
else:
return variants[i - 1]
| 26,277 | Python | 30.135071 | 119 | 0.568101 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_io.py | from _pydevd_bundle.pydevd_constants import ForkSafeLock, get_global_debugger
import os
import sys
from contextlib import contextmanager
class IORedirector:
'''
This class works to wrap a stream (stdout/stderr) with an additional redirect.
'''
def __init__(self, original, new_redirect, wrap_buffer=False):
'''
:param stream original:
The stream to be wrapped (usually stdout/stderr, but could be None).
:param stream new_redirect:
Usually IOBuf (below).
:param bool wrap_buffer:
Whether to create a buffer attribute (needed to mimick python 3 s
tdout/stderr which has a buffer to write binary data).
'''
self._lock = ForkSafeLock(rlock=True)
self._writing = False
self._redirect_to = (original, new_redirect)
if wrap_buffer and hasattr(original, 'buffer'):
self.buffer = IORedirector(original.buffer, new_redirect.buffer, False)
def write(self, s):
# Note that writing to the original stream may fail for some reasons
# (such as trying to write something that's not a string or having it closed).
with self._lock:
if self._writing:
return
self._writing = True
try:
for r in self._redirect_to:
if hasattr(r, 'write'):
r.write(s)
finally:
self._writing = False
def isatty(self):
for r in self._redirect_to:
if hasattr(r, 'isatty'):
return r.isatty()
return False
def flush(self):
for r in self._redirect_to:
if hasattr(r, 'flush'):
r.flush()
def __getattr__(self, name):
for r in self._redirect_to:
if hasattr(r, name):
return getattr(r, name)
raise AttributeError(name)
class RedirectToPyDBIoMessages(object):
def __init__(self, out_ctx, wrap_stream, wrap_buffer, on_write=None):
'''
:param out_ctx:
1=stdout and 2=stderr
:param wrap_stream:
Either sys.stdout or sys.stderr.
:param bool wrap_buffer:
If True the buffer attribute (which wraps writing bytes) should be
wrapped.
:param callable(str) on_write:
May be a custom callable to be called when to write something.
If not passed the default implementation will create an io message
and send it through the debugger.
'''
encoding = getattr(wrap_stream, 'encoding', None)
if not encoding:
encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')
self.encoding = encoding
self._out_ctx = out_ctx
if wrap_buffer:
self.buffer = RedirectToPyDBIoMessages(out_ctx, wrap_stream, wrap_buffer=False, on_write=on_write)
self._on_write = on_write
def get_pydb(self):
# Note: separate method for mocking on tests.
return get_global_debugger()
def flush(self):
pass # no-op here
def write(self, s):
if self._on_write is not None:
self._on_write(s)
return
if s:
# Need s in str
if isinstance(s, bytes):
s = s.decode(self.encoding, errors='replace')
py_db = self.get_pydb()
if py_db is not None:
# Note that the actual message contents will be a xml with utf-8, although
# the entry is str on py3 and bytes on py2.
cmd = py_db.cmd_factory.make_io_message(s, self._out_ctx)
if py_db.writer is not None:
py_db.writer.add_command(cmd)
class IOBuf:
'''This class works as a replacement for stdio and stderr.
It is a buffer and when its contents are requested, it will erase what
it has so far so that the next return will not return the same contents again.
'''
def __init__(self):
self.buflist = []
import os
self.encoding = os.environ.get('PYTHONIOENCODING', 'utf-8')
def getvalue(self):
b = self.buflist
self.buflist = [] # clear it
return ''.join(b) # bytes on py2, str on py3.
def write(self, s):
if isinstance(s, bytes):
s = s.decode(self.encoding, errors='replace')
self.buflist.append(s)
def isatty(self):
return False
def flush(self):
pass
def empty(self):
return len(self.buflist) == 0
class _RedirectInfo(object):
def __init__(self, original, redirect_to):
self.original = original
self.redirect_to = redirect_to
class _RedirectionsHolder:
_lock = ForkSafeLock(rlock=True)
_stack_stdout = []
_stack_stderr = []
_pydevd_stdout_redirect_ = None
_pydevd_stderr_redirect_ = None
def start_redirect(keep_original_redirection=False, std='stdout', redirect_to=None):
'''
@param std: 'stdout', 'stderr', or 'both'
'''
with _RedirectionsHolder._lock:
if redirect_to is None:
redirect_to = IOBuf()
if std == 'both':
config_stds = ['stdout', 'stderr']
else:
config_stds = [std]
for std in config_stds:
original = getattr(sys, std)
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
if keep_original_redirection:
wrap_buffer = True if hasattr(redirect_to, 'buffer') else False
new_std_instance = IORedirector(getattr(sys, std), redirect_to, wrap_buffer=wrap_buffer)
setattr(sys, std, new_std_instance)
else:
new_std_instance = redirect_to
setattr(sys, std, redirect_to)
stack.append(_RedirectInfo(original, new_std_instance))
return redirect_to
def end_redirect(std='stdout'):
with _RedirectionsHolder._lock:
if std == 'both':
config_stds = ['stdout', 'stderr']
else:
config_stds = [std]
for std in config_stds:
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
redirect_info = stack.pop()
setattr(sys, std, redirect_info.original)
def redirect_stream_to_pydb_io_messages(std):
'''
:param std:
'stdout' or 'stderr'
'''
with _RedirectionsHolder._lock:
redirect_to_name = '_pydevd_%s_redirect_' % (std,)
if getattr(_RedirectionsHolder, redirect_to_name) is None:
wrap_buffer = True
original = getattr(sys, std)
redirect_to = RedirectToPyDBIoMessages(1 if std == 'stdout' else 2, original, wrap_buffer)
start_redirect(keep_original_redirection=True, std=std, redirect_to=redirect_to)
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
setattr(_RedirectionsHolder, redirect_to_name, stack[-1])
return True
return False
def stop_redirect_stream_to_pydb_io_messages(std):
'''
:param std:
'stdout' or 'stderr'
'''
with _RedirectionsHolder._lock:
redirect_to_name = '_pydevd_%s_redirect_' % (std,)
redirect_info = getattr(_RedirectionsHolder, redirect_to_name)
if redirect_info is not None: # :type redirect_info: _RedirectInfo
setattr(_RedirectionsHolder, redirect_to_name, None)
stack = getattr(_RedirectionsHolder, '_stack_%s' % std)
prev_info = stack.pop()
curr = getattr(sys, std)
if curr is redirect_info.redirect_to:
setattr(sys, std, redirect_info.original)
@contextmanager
def redirect_stream_to_pydb_io_messages_context():
with _RedirectionsHolder._lock:
redirecting = []
for std in ('stdout', 'stderr'):
if redirect_stream_to_pydb_io_messages(std):
redirecting.append(std)
try:
yield
finally:
for std in redirecting:
stop_redirect_stream_to_pydb_io_messages(std)
| 8,117 | Python | 30.343629 | 110 | 0.575459 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_gevent_integration.py | import pydevd_tracing
import greenlet
import gevent
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle.pydevd_custom_frames import add_custom_frame, update_custom_frame, remove_custom_frame
from _pydevd_bundle.pydevd_constants import GEVENT_SHOW_PAUSED_GREENLETS, get_global_debugger, \
thread_get_ident
from _pydev_bundle import pydev_log
from pydevd_file_utils import basename
_saved_greenlets_to_custom_frame_thread_id = {}
if GEVENT_SHOW_PAUSED_GREENLETS:
def _get_paused_name(py_db, g):
frame = g.gr_frame
use_frame = frame
# i.e.: Show in the description of the greenlet the last user-code found.
while use_frame is not None:
if py_db.apply_files_filter(use_frame, use_frame.f_code.co_filename, True):
frame = use_frame
use_frame = use_frame.f_back
else:
break
if use_frame is None:
use_frame = frame
return '%s: %s - %s' % (type(g).__name__, use_frame.f_code.co_name, basename(use_frame.f_code.co_filename))
def greenlet_events(event, args):
if event in ('switch', 'throw'):
py_db = get_global_debugger()
origin, target = args
if not origin.dead and origin.gr_frame is not None:
frame_custom_thread_id = _saved_greenlets_to_custom_frame_thread_id.get(origin)
if frame_custom_thread_id is None:
_saved_greenlets_to_custom_frame_thread_id[origin] = add_custom_frame(
origin.gr_frame, _get_paused_name(py_db, origin), thread_get_ident())
else:
update_custom_frame(
frame_custom_thread_id, origin.gr_frame, _get_paused_name(py_db, origin), thread_get_ident())
else:
frame_custom_thread_id = _saved_greenlets_to_custom_frame_thread_id.pop(origin, None)
if frame_custom_thread_id is not None:
remove_custom_frame(frame_custom_thread_id)
# This one will be resumed, so, remove custom frame from it.
frame_custom_thread_id = _saved_greenlets_to_custom_frame_thread_id.pop(target, None)
if frame_custom_thread_id is not None:
remove_custom_frame(frame_custom_thread_id)
# The tracing needs to be reapplied for each greenlet as gevent
# clears the tracing set through sys.settrace for each greenlet.
pydevd_tracing.reapply_settrace()
else:
# i.e.: no logic related to showing paused greenlets is needed.
def greenlet_events(event, args):
pydevd_tracing.reapply_settrace()
def enable_gevent_integration():
# References:
# https://greenlet.readthedocs.io/en/latest/api.html#greenlet.settrace
# https://greenlet.readthedocs.io/en/latest/tracing.html
# Note: gevent.version_info is WRONG (gevent.__version__ must be used).
try:
if tuple(int(x) for x in gevent.__version__.split('.')[:2]) <= (20, 0):
if not GEVENT_SHOW_PAUSED_GREENLETS:
return
if not hasattr(greenlet, 'settrace'):
# In older versions it was optional.
# We still try to use if available though (because without it
pydev_log.debug('greenlet.settrace not available. GEVENT_SHOW_PAUSED_GREENLETS will have no effect.')
return
try:
greenlet.settrace(greenlet_events)
except:
pydev_log.exception('Error with greenlet.settrace.')
except:
pydev_log.exception('Error setting up gevent %s.', gevent.__version__)
def log_gevent_debug_info():
pydev_log.debug('Greenlet version: %s', greenlet.__version__)
pydev_log.debug('Gevent version: %s', gevent.__version__)
pydev_log.debug('Gevent install location: %s', gevent.__file__)
| 3,896 | Python | 40.457446 | 117 | 0.622433 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_process_net_command_json.py | import itertools
import json
import linecache
import os
import platform
import sys
from functools import partial
import pydevd_file_utils
from _pydev_bundle import pydev_log
from _pydevd_bundle._debug_adapter import pydevd_base_schema, pydevd_schema
from _pydevd_bundle._debug_adapter.pydevd_schema import (
CompletionsResponseBody, EvaluateResponseBody, ExceptionOptions,
GotoTargetsResponseBody, ModulesResponseBody, ProcessEventBody,
ProcessEvent, Scope, ScopesResponseBody, SetExpressionResponseBody,
SetVariableResponseBody, SourceBreakpoint, SourceResponseBody,
VariablesResponseBody, SetBreakpointsResponseBody, Response,
Capabilities, PydevdAuthorizeRequest, Request,
StepInTargetsResponseBody, SetFunctionBreakpointsResponseBody, BreakpointEvent,
BreakpointEventBody)
from _pydevd_bundle.pydevd_api import PyDevdAPI
from _pydevd_bundle.pydevd_breakpoints import get_exception_class, FunctionBreakpoint
from _pydevd_bundle.pydevd_comm_constants import (
CMD_PROCESS_EVENT, CMD_RETURN, CMD_SET_NEXT_STATEMENT, CMD_STEP_INTO,
CMD_STEP_INTO_MY_CODE, CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE, file_system_encoding,
CMD_STEP_RETURN_MY_CODE, CMD_STEP_RETURN)
from _pydevd_bundle.pydevd_filtering import ExcludeFilter
from _pydevd_bundle.pydevd_json_debug_options import _extract_debug_options, DebugOptions
from _pydevd_bundle.pydevd_net_command import NetCommand
from _pydevd_bundle.pydevd_utils import convert_dap_log_message_to_expression, ScopeRequest
from _pydevd_bundle.pydevd_constants import (PY_IMPL_NAME, DebugInfoHolder, PY_VERSION_STR,
PY_IMPL_VERSION_STR, IS_64BIT_PROCESS)
from _pydevd_bundle.pydevd_trace_dispatch import USING_CYTHON
from _pydevd_frame_eval.pydevd_frame_eval_main import USING_FRAME_EVAL
from _pydevd_bundle.pydevd_comm import internal_get_step_in_targets_json
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id
def _convert_rules_to_exclude_filters(rules, on_error):
exclude_filters = []
if not isinstance(rules, list):
on_error('Invalid "rules" (expected list of dicts). Found: %s' % (rules,))
else:
directory_exclude_filters = []
module_exclude_filters = []
glob_exclude_filters = []
for rule in rules:
if not isinstance(rule, dict):
on_error('Invalid "rules" (expected list of dicts). Found: %s' % (rules,))
continue
include = rule.get('include')
if include is None:
on_error('Invalid "rule" (expected dict with "include"). Found: %s' % (rule,))
continue
path = rule.get('path')
module = rule.get('module')
if path is None and module is None:
on_error('Invalid "rule" (expected dict with "path" or "module"). Found: %s' % (rule,))
continue
if path is not None:
glob_pattern = path
if '*' not in path and '?' not in path:
if os.path.isdir(glob_pattern):
# If a directory was specified, add a '/**'
# to be consistent with the glob pattern required
# by pydevd.
if not glob_pattern.endswith('/') and not glob_pattern.endswith('\\'):
glob_pattern += '/'
glob_pattern += '**'
directory_exclude_filters.append(ExcludeFilter(glob_pattern, not include, True))
else:
glob_exclude_filters.append(ExcludeFilter(glob_pattern, not include, True))
elif module is not None:
module_exclude_filters.append(ExcludeFilter(module, not include, False))
else:
on_error('Internal error: expected path or module to be specified.')
# Note that we have to sort the directory/module exclude filters so that the biggest
# paths match first.
# i.e.: if we have:
# /sub1/sub2/sub3
# a rule with /sub1/sub2 would match before a rule only with /sub1.
directory_exclude_filters = sorted(directory_exclude_filters, key=lambda exclude_filter:-len(exclude_filter.name))
module_exclude_filters = sorted(module_exclude_filters, key=lambda exclude_filter:-len(exclude_filter.name))
exclude_filters = directory_exclude_filters + glob_exclude_filters + module_exclude_filters
return exclude_filters
class IDMap(object):
def __init__(self):
self._value_to_key = {}
self._key_to_value = {}
self._next_id = partial(next, itertools.count(0))
def obtain_value(self, key):
return self._key_to_value[key]
def obtain_key(self, value):
try:
key = self._value_to_key[value]
except KeyError:
key = self._next_id()
self._key_to_value[key] = value
self._value_to_key[value] = key
return key
class PyDevJsonCommandProcessor(object):
def __init__(self, from_json):
self.from_json = from_json
self.api = PyDevdAPI()
self._options = DebugOptions()
self._next_breakpoint_id = partial(next, itertools.count(0))
self._goto_targets_map = IDMap()
self._launch_or_attach_request_done = False
def process_net_command_json(self, py_db, json_contents, send_response=True):
'''
Processes a debug adapter protocol json command.
'''
DEBUG = False
try:
if isinstance(json_contents, bytes):
json_contents = json_contents.decode('utf-8')
request = self.from_json(json_contents, update_ids_from_dap=True)
except Exception as e:
try:
loaded_json = json.loads(json_contents)
request = Request(loaded_json.get('command', '<unknown>'), loaded_json['seq'])
except:
# There's not much we can do in this case...
pydev_log.exception('Error loading json: %s', json_contents)
return
error_msg = str(e)
if error_msg.startswith("'") and error_msg.endswith("'"):
error_msg = error_msg[1:-1]
# This means a failure processing the request (but we were able to load the seq,
# so, answer with a failure response).
def on_request(py_db, request):
error_response = {
'type': 'response',
'request_seq': request.seq,
'success': False,
'command': request.command,
'message': error_msg,
}
return NetCommand(CMD_RETURN, 0, error_response, is_json=True)
else:
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS and DebugInfoHolder.DEBUG_TRACE_LEVEL >= 1:
pydev_log.info('Process %s: %s\n' % (
request.__class__.__name__, json.dumps(request.to_dict(update_ids_to_dap=True), indent=4, sort_keys=True),))
assert request.type == 'request'
method_name = 'on_%s_request' % (request.command.lower(),)
on_request = getattr(self, method_name, None)
if on_request is None:
print('Unhandled: %s not available in PyDevJsonCommandProcessor.\n' % (method_name,))
return
if DEBUG:
print('Handled in pydevd: %s (in PyDevJsonCommandProcessor).\n' % (method_name,))
with py_db._main_lock:
if request.__class__ == PydevdAuthorizeRequest:
authorize_request = request # : :type authorize_request: PydevdAuthorizeRequest
access_token = authorize_request.arguments.debugServerAccessToken
py_db.authentication.login(access_token)
if not py_db.authentication.is_authenticated():
response = Response(
request.seq, success=False, command=request.command, message='Client not authenticated.', body={})
cmd = NetCommand(CMD_RETURN, 0, response, is_json=True)
py_db.writer.add_command(cmd)
return
cmd = on_request(py_db, request)
if cmd is not None and send_response:
py_db.writer.add_command(cmd)
def on_pydevdauthorize_request(self, py_db, request):
client_access_token = py_db.authentication.client_access_token
body = {'clientAccessToken': None}
if client_access_token:
body['clientAccessToken'] = client_access_token
response = pydevd_base_schema.build_response(request, kwargs={'body': body})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_initialize_request(self, py_db, request):
body = Capabilities(
# Supported.
supportsConfigurationDoneRequest=True,
supportsConditionalBreakpoints=True,
supportsHitConditionalBreakpoints=True,
supportsEvaluateForHovers=True,
supportsSetVariable=True,
supportsGotoTargetsRequest=True,
supportsCompletionsRequest=True,
supportsModulesRequest=True,
supportsExceptionOptions=True,
supportsValueFormattingOptions=True,
supportsExceptionInfoRequest=True,
supportTerminateDebuggee=True,
supportsDelayedStackTraceLoading=True,
supportsLogPoints=True,
supportsSetExpression=True,
supportsTerminateRequest=True,
supportsClipboardContext=True,
supportsFunctionBreakpoints=True,
exceptionBreakpointFilters=[
{'filter': 'raised', 'label': 'Raised Exceptions', 'default': False},
{'filter': 'uncaught', 'label': 'Uncaught Exceptions', 'default': True},
{"filter": "userUnhandled", "label": "User Uncaught Exceptions", "default": False},
],
# Not supported.
supportsStepBack=False,
supportsRestartFrame=False,
supportsStepInTargetsRequest=True,
supportsRestartRequest=False,
supportsLoadedSourcesRequest=False,
supportsTerminateThreadsRequest=False,
supportsDataBreakpoints=False,
supportsReadMemoryRequest=False,
supportsDisassembleRequest=False,
additionalModuleColumns=[],
completionTriggerCharacters=[],
supportedChecksumAlgorithms=[],
).to_dict()
# Non-standard capabilities/info below.
body['supportsDebuggerProperties'] = True
body['pydevd'] = pydevd_info = {}
pydevd_info['processId'] = os.getpid()
self.api.notify_initialize(py_db)
response = pydevd_base_schema.build_response(request, kwargs={'body': body})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_configurationdone_request(self, py_db, request):
'''
:param ConfigurationDoneRequest request:
'''
if not self._launch_or_attach_request_done:
pydev_log.critical('Missing launch request or attach request before configuration done request.')
self.api.run(py_db)
self.api.notify_configuration_done(py_db)
configuration_done_response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, configuration_done_response, is_json=True)
def on_threads_request(self, py_db, request):
'''
:param ThreadsRequest request:
'''
return self.api.list_threads(py_db, request.seq)
def on_terminate_request(self, py_db, request):
'''
:param TerminateRequest request:
'''
self._request_terminate_process(py_db)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def _request_terminate_process(self, py_db):
self.api.request_terminate_process(py_db)
def on_completions_request(self, py_db, request):
'''
:param CompletionsRequest request:
'''
arguments = request.arguments # : :type arguments: CompletionsArguments
seq = request.seq
text = arguments.text
frame_id = arguments.frameId
thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(
frame_id)
if thread_id is None:
body = CompletionsResponseBody([])
variables_response = pydevd_base_schema.build_response(
request,
kwargs={
'body': body,
'success': False,
'message': 'Thread to get completions seems to have resumed already.'
})
return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
# Note: line and column are 1-based (convert to 0-based for pydevd).
column = arguments.column - 1
if arguments.line is None:
# line is optional
line = -1
else:
line = arguments.line - 1
self.api.request_completions(py_db, seq, thread_id, frame_id, text, line=line, column=column)
def _resolve_remote_root(self, local_root, remote_root):
if remote_root == '.':
cwd = os.getcwd()
append_pathsep = local_root.endswith('\\') or local_root.endswith('/')
return cwd + (os.path.sep if append_pathsep else '')
return remote_root
def _set_debug_options(self, py_db, args, start_reason):
rules = args.get('rules')
stepping_resumes_all_threads = args.get('steppingResumesAllThreads', True)
self.api.set_stepping_resumes_all_threads(py_db, stepping_resumes_all_threads)
terminate_child_processes = args.get('terminateChildProcesses', True)
self.api.set_terminate_child_processes(py_db, terminate_child_processes)
variable_presentation = args.get('variablePresentation', None)
if isinstance(variable_presentation, dict):
def get_variable_presentation(setting, default):
value = variable_presentation.get(setting, default)
if value not in ('group', 'inline', 'hide'):
pydev_log.info(
'The value set for "%s" (%s) in the variablePresentation is not valid. Valid values are: "group", "inline", "hide"' % (
setting, value,))
value = default
return value
default = get_variable_presentation('all', 'group')
special_presentation = get_variable_presentation('special', default)
function_presentation = get_variable_presentation('function', default)
class_presentation = get_variable_presentation('class', default)
protected_presentation = get_variable_presentation('protected', default)
self.api.set_variable_presentation(py_db, self.api.VariablePresentation(
special_presentation,
function_presentation,
class_presentation,
protected_presentation
))
exclude_filters = []
if rules is not None:
exclude_filters = _convert_rules_to_exclude_filters(
rules, lambda msg: self.api.send_error_message(py_db, msg))
self.api.set_exclude_filters(py_db, exclude_filters)
debug_options = _extract_debug_options(
args.get('options'),
args.get('debugOptions'),
)
self._options.update_fom_debug_options(debug_options)
self._options.update_from_args(args)
self.api.set_use_libraries_filter(py_db, self._options.just_my_code)
path_mappings = []
for pathMapping in args.get('pathMappings', []):
localRoot = pathMapping.get('localRoot', '')
remoteRoot = pathMapping.get('remoteRoot', '')
remoteRoot = self._resolve_remote_root(localRoot, remoteRoot)
if (localRoot != '') and (remoteRoot != ''):
path_mappings.append((localRoot, remoteRoot))
if bool(path_mappings):
pydevd_file_utils.setup_client_server_paths(path_mappings)
resolve_symlinks = args.get('resolveSymlinks', None)
if resolve_symlinks is not None:
pydevd_file_utils.set_resolve_symlinks(resolve_symlinks)
redirecting = args.get("isOutputRedirected")
if self._options.redirect_output:
py_db.enable_output_redirection(True, True)
redirecting = True
else:
py_db.enable_output_redirection(False, False)
py_db.is_output_redirected = redirecting
self.api.set_show_return_values(py_db, self._options.show_return_value)
if not self._options.break_system_exit_zero:
ignore_system_exit_codes = [0, None]
if self._options.django_debug or self._options.flask_debug:
ignore_system_exit_codes += [3]
self.api.set_ignore_system_exit_codes(py_db, ignore_system_exit_codes)
auto_reload = args.get('autoReload', {})
if not isinstance(auto_reload, dict):
pydev_log.info('Expected autoReload to be a dict. Received: %s' % (auto_reload,))
auto_reload = {}
enable_auto_reload = auto_reload.get('enable', False)
watch_dirs = auto_reload.get('watchDirectories')
if not watch_dirs:
watch_dirs = []
# Note: by default this is no longer done because on some cases there are entries in the PYTHONPATH
# such as the home directory or /python/x64, where the site packages are in /python/x64/libs, so,
# we only watch the current working directory as well as executed script.
# check = getattr(sys, 'path', [])[:]
# # By default only watch directories that are in the project roots /
# # program dir (if available), sys.argv[0], as well as the current dir (we don't want to
# # listen to the whole site-packages by default as it can be huge).
# watch_dirs = [pydevd_file_utils.absolute_path(w) for w in check]
# watch_dirs = [w for w in watch_dirs if py_db.in_project_roots_filename_uncached(w) and os.path.isdir(w)]
program = args.get('program')
if program:
if os.path.isdir(program):
watch_dirs.append(program)
else:
watch_dirs.append(os.path.dirname(program))
watch_dirs.append(os.path.abspath('.'))
argv = getattr(sys, 'argv', [])
if argv:
f = argv[0]
if f: # argv[0] could be None (https://github.com/microsoft/debugpy/issues/987)
if os.path.isdir(f):
watch_dirs.append(f)
else:
watch_dirs.append(os.path.dirname(f))
if not isinstance(watch_dirs, (list, set, tuple)):
watch_dirs = (watch_dirs,)
new_watch_dirs = set()
for w in watch_dirs:
try:
new_watch_dirs.add(pydevd_file_utils.get_path_with_real_case(pydevd_file_utils.absolute_path(w)))
except Exception:
pydev_log.exception('Error adding watch dir: %s', w)
watch_dirs = new_watch_dirs
poll_target_time = auto_reload.get('pollingInterval', 1)
exclude_patterns = auto_reload.get('exclude', ('**/.git/**', '**/__pycache__/**', '**/node_modules/**', '**/.metadata/**', '**/site-packages/**'))
include_patterns = auto_reload.get('include', ('**/*.py', '**/*.pyw'))
self.api.setup_auto_reload_watcher(
py_db, enable_auto_reload, watch_dirs, poll_target_time, exclude_patterns, include_patterns)
if self._options.stop_on_entry and start_reason == 'launch':
self.api.stop_on_entry()
self.api.set_gui_event_loop(py_db, self._options.gui_event_loop)
def _send_process_event(self, py_db, start_method):
argv = getattr(sys, 'argv', [])
if len(argv) > 0:
name = argv[0]
else:
name = ''
if isinstance(name, bytes):
name = name.decode(file_system_encoding, 'replace')
name = name.encode('utf-8')
body = ProcessEventBody(
name=name,
systemProcessId=os.getpid(),
isLocalProcess=True,
startMethod=start_method,
)
event = ProcessEvent(body)
py_db.writer.add_command(NetCommand(CMD_PROCESS_EVENT, 0, event, is_json=True))
def _handle_launch_or_attach_request(self, py_db, request, start_reason):
self._send_process_event(py_db, start_reason)
self._launch_or_attach_request_done = True
self.api.set_enable_thread_notifications(py_db, True)
self._set_debug_options(py_db, request.arguments.kwargs, start_reason=start_reason)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_launch_request(self, py_db, request):
'''
:param LaunchRequest request:
'''
return self._handle_launch_or_attach_request(py_db, request, start_reason='launch')
def on_attach_request(self, py_db, request):
'''
:param AttachRequest request:
'''
return self._handle_launch_or_attach_request(py_db, request, start_reason='attach')
def on_pause_request(self, py_db, request):
'''
:param PauseRequest request:
'''
arguments = request.arguments # : :type arguments: PauseArguments
thread_id = arguments.threadId
self.api.request_suspend_thread(py_db, thread_id=thread_id)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_continue_request(self, py_db, request):
'''
:param ContinueRequest request:
'''
arguments = request.arguments # : :type arguments: ContinueArguments
thread_id = arguments.threadId
def on_resumed():
body = {'allThreadsContinued': thread_id == '*'}
response = pydevd_base_schema.build_response(request, kwargs={'body': body})
cmd = NetCommand(CMD_RETURN, 0, response, is_json=True)
py_db.writer.add_command(cmd)
# Only send resumed notification when it has actually resumed!
# (otherwise the user could send a continue, receive the notification and then
# request a new pause which would be paused without sending any notification as
# it didn't really run in the first place).
py_db.threads_suspended_single_notification.add_on_resumed_callback(on_resumed)
self.api.request_resume_thread(thread_id)
def on_next_request(self, py_db, request):
'''
:param NextRequest request:
'''
arguments = request.arguments # : :type arguments: NextArguments
thread_id = arguments.threadId
if py_db.get_use_libraries_filter():
step_cmd_id = CMD_STEP_OVER_MY_CODE
else:
step_cmd_id = CMD_STEP_OVER
self.api.request_step(py_db, thread_id, step_cmd_id)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_stepin_request(self, py_db, request):
'''
:param StepInRequest request:
'''
arguments = request.arguments # : :type arguments: StepInArguments
thread_id = arguments.threadId
target_id = arguments.targetId
if target_id is not None:
thread = pydevd_find_thread_by_id(thread_id)
info = set_additional_thread_info(thread)
target_id_to_smart_step_into_variant = info.target_id_to_smart_step_into_variant
if not target_id_to_smart_step_into_variant:
variables_response = pydevd_base_schema.build_response(
request,
kwargs={
'success': False,
'message': 'Unable to step into target (no targets are saved in the thread info).'
})
return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
variant = target_id_to_smart_step_into_variant.get(target_id)
if variant is not None:
parent = variant.parent
if parent is not None:
self.api.request_smart_step_into(py_db, request.seq, thread_id, parent.offset, variant.offset)
else:
self.api.request_smart_step_into(py_db, request.seq, thread_id, variant.offset, -1)
else:
variables_response = pydevd_base_schema.build_response(
request,
kwargs={
'success': False,
'message': 'Unable to find step into target %s. Available targets: %s' % (
target_id, target_id_to_smart_step_into_variant)
})
return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
else:
if py_db.get_use_libraries_filter():
step_cmd_id = CMD_STEP_INTO_MY_CODE
else:
step_cmd_id = CMD_STEP_INTO
self.api.request_step(py_db, thread_id, step_cmd_id)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_stepintargets_request(self, py_db, request):
'''
:param StepInTargetsRequest request:
'''
frame_id = request.arguments.frameId
thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(
frame_id)
if thread_id is None:
body = StepInTargetsResponseBody([])
variables_response = pydevd_base_schema.build_response(
request,
kwargs={
'body': body,
'success': False,
'message': 'Unable to get thread_id from frame_id (thread to get step in targets seems to have resumed already).'
})
return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
py_db.post_method_as_internal_command(
thread_id, internal_get_step_in_targets_json, request.seq, thread_id, frame_id, request, set_additional_thread_info)
def on_stepout_request(self, py_db, request):
'''
:param StepOutRequest request:
'''
arguments = request.arguments # : :type arguments: StepOutArguments
thread_id = arguments.threadId
if py_db.get_use_libraries_filter():
step_cmd_id = CMD_STEP_RETURN_MY_CODE
else:
step_cmd_id = CMD_STEP_RETURN
self.api.request_step(py_db, thread_id, step_cmd_id)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def _get_hit_condition_expression(self, hit_condition):
'''Following hit condition values are supported
* x or == x when breakpoint is hit x times
* >= x when breakpoint is hit more than or equal to x times
* % x when breakpoint is hit multiple of x times
Returns '@HIT@ == x' where @HIT@ will be replaced by number of hits
'''
if not hit_condition:
return None
expr = hit_condition.strip()
try:
int(expr)
return '@HIT@ == {}'.format(expr)
except ValueError:
pass
if expr.startswith('%'):
return '@HIT@ {} == 0'.format(expr)
if expr.startswith('==') or \
expr.startswith('>') or \
expr.startswith('<'):
return '@HIT@ {}'.format(expr)
return hit_condition
def on_disconnect_request(self, py_db, request):
'''
:param DisconnectRequest request:
'''
if request.arguments.terminateDebuggee:
self._request_terminate_process(py_db)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
self._launch_or_attach_request_done = False
py_db.enable_output_redirection(False, False)
self.api.request_disconnect(py_db, resume_threads=True)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def _verify_launch_or_attach_done(self, request):
if not self._launch_or_attach_request_done:
# Note that to validate the breakpoints we need the launch request to be done already
# (otherwise the filters wouldn't be set for the breakpoint validation).
if request.command == 'setFunctionBreakpoints':
body = SetFunctionBreakpointsResponseBody([])
else:
body = SetBreakpointsResponseBody([])
response = pydevd_base_schema.build_response(
request,
kwargs={
'body': body,
'success': False,
'message': 'Breakpoints may only be set after the launch request is received.'
})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_setfunctionbreakpoints_request(self, py_db, request):
'''
:param SetFunctionBreakpointsRequest request:
'''
response = self._verify_launch_or_attach_done(request)
if response is not None:
return response
arguments = request.arguments # : :type arguments: SetFunctionBreakpointsArguments
function_breakpoints = []
suspend_policy = 'ALL'
# Not currently covered by the DAP.
is_logpoint = False
expression = None
breakpoints_set = []
for bp in arguments.breakpoints:
hit_condition = self._get_hit_condition_expression(bp.get('hitCondition'))
condition = bp.get('condition')
function_breakpoints.append(
FunctionBreakpoint(bp['name'], condition, expression, suspend_policy, hit_condition, is_logpoint))
# Note: always succeeds.
breakpoints_set.append(pydevd_schema.Breakpoint(
verified=True, id=self._next_breakpoint_id()).to_dict())
self.api.set_function_breakpoints(py_db, function_breakpoints)
body = {'breakpoints': breakpoints_set}
set_breakpoints_response = pydevd_base_schema.build_response(request, kwargs={'body': body})
return NetCommand(CMD_RETURN, 0, set_breakpoints_response, is_json=True)
def on_setbreakpoints_request(self, py_db, request):
'''
:param SetBreakpointsRequest request:
'''
response = self._verify_launch_or_attach_done(request)
if response is not None:
return response
arguments = request.arguments # : :type arguments: SetBreakpointsArguments
# TODO: Path is optional here it could be source reference.
filename = self.api.filename_to_str(arguments.source.path)
func_name = 'None'
self.api.remove_all_breakpoints(py_db, filename)
btype = 'python-line'
suspend_policy = 'ALL'
if not filename.lower().endswith('.py'): # Note: check based on original file, not mapping.
if self._options.django_debug:
btype = 'django-line'
elif self._options.flask_debug:
btype = 'jinja2-line'
breakpoints_set = []
for source_breakpoint in arguments.breakpoints:
source_breakpoint = SourceBreakpoint(**source_breakpoint)
line = source_breakpoint.line
condition = source_breakpoint.condition
breakpoint_id = self._next_breakpoint_id()
hit_condition = self._get_hit_condition_expression(source_breakpoint.hitCondition)
log_message = source_breakpoint.logMessage
if not log_message:
is_logpoint = None
expression = None
else:
is_logpoint = True
expression = convert_dap_log_message_to_expression(log_message)
on_changed_breakpoint_state = partial(self._on_changed_breakpoint_state, py_db, arguments.source)
result = self.api.add_breakpoint(
py_db, filename, btype, breakpoint_id, line, condition, func_name, expression,
suspend_policy, hit_condition, is_logpoint, adjust_line=True, on_changed_breakpoint_state=on_changed_breakpoint_state)
bp = self._create_breakpoint_from_add_breakpoint_result(py_db, arguments.source, breakpoint_id, result)
breakpoints_set.append(bp)
body = {'breakpoints': breakpoints_set}
set_breakpoints_response = pydevd_base_schema.build_response(request, kwargs={'body': body})
return NetCommand(CMD_RETURN, 0, set_breakpoints_response, is_json=True)
def _on_changed_breakpoint_state(self, py_db, source, breakpoint_id, result):
bp = self._create_breakpoint_from_add_breakpoint_result(py_db, source, breakpoint_id, result)
body = BreakpointEventBody(
reason='changed',
breakpoint=bp,
)
event = BreakpointEvent(body)
event_id = 0 # Actually ignored in this case
py_db.writer.add_command(NetCommand(event_id, 0, event, is_json=True))
def _create_breakpoint_from_add_breakpoint_result(self, py_db, source, breakpoint_id, result):
error_code = result.error_code
if error_code:
if error_code == self.api.ADD_BREAKPOINT_FILE_NOT_FOUND:
error_msg = 'Breakpoint in file that does not exist.'
elif error_code == self.api.ADD_BREAKPOINT_FILE_EXCLUDED_BY_FILTERS:
error_msg = 'Breakpoint in file excluded by filters.'
if py_db.get_use_libraries_filter():
error_msg += ('\nNote: may be excluded because of "justMyCode" option (default == true).'
'Try setting \"justMyCode\": false in the debug configuration (e.g., launch.json).\n')
elif error_code == self.api.ADD_BREAKPOINT_LAZY_VALIDATION:
error_msg = 'Waiting for code to be loaded to verify breakpoint.'
elif error_code == self.api.ADD_BREAKPOINT_INVALID_LINE:
error_msg = 'Breakpoint added to invalid line.'
else:
# Shouldn't get here.
error_msg = 'Breakpoint not validated (reason unknown -- please report as bug).'
return pydevd_schema.Breakpoint(
verified=False, id=breakpoint_id, line=result.translated_line, message=error_msg, source=source).to_dict()
else:
return pydevd_schema.Breakpoint(
verified=True, id=breakpoint_id, line=result.translated_line, source=source).to_dict()
def on_setexceptionbreakpoints_request(self, py_db, request):
'''
:param SetExceptionBreakpointsRequest request:
'''
# : :type arguments: SetExceptionBreakpointsArguments
arguments = request.arguments
filters = arguments.filters
exception_options = arguments.exceptionOptions
self.api.remove_all_exception_breakpoints(py_db)
# Can't set these in the DAP.
condition = None
expression = None
notify_on_first_raise_only = False
ignore_libraries = 1 if py_db.get_use_libraries_filter() else 0
if exception_options:
break_raised = False
break_uncaught = False
for option in exception_options:
option = ExceptionOptions(**option)
if not option.path:
continue
# never: never breaks
#
# always: always breaks
#
# unhandled: breaks when exception unhandled
#
# userUnhandled: breaks if the exception is not handled by user code
notify_on_handled_exceptions = 1 if option.breakMode == 'always' else 0
notify_on_unhandled_exceptions = 1 if option.breakMode == 'unhandled' else 0
notify_on_user_unhandled_exceptions = 1 if option.breakMode == 'userUnhandled' else 0
exception_paths = option.path
break_raised |= notify_on_handled_exceptions
break_uncaught |= notify_on_unhandled_exceptions
exception_names = []
if len(exception_paths) == 0:
continue
elif len(exception_paths) == 1:
if 'Python Exceptions' in exception_paths[0]['names']:
exception_names = ['BaseException']
else:
path_iterator = iter(exception_paths)
if 'Python Exceptions' in next(path_iterator)['names']:
for path in path_iterator:
for ex_name in path['names']:
exception_names.append(ex_name)
for exception_name in exception_names:
self.api.add_python_exception_breakpoint(
py_db,
exception_name,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries
)
else:
break_raised = 'raised' in filters
break_uncaught = 'uncaught' in filters
break_user = 'userUnhandled' in filters
if break_raised or break_uncaught or break_user:
notify_on_handled_exceptions = 1 if break_raised else 0
notify_on_unhandled_exceptions = 1 if break_uncaught else 0
notify_on_user_unhandled_exceptions = 1 if break_user else 0
exception = 'BaseException'
self.api.add_python_exception_breakpoint(
py_db,
exception,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries
)
if break_raised:
btype = None
if self._options.django_debug:
btype = 'django'
elif self._options.flask_debug:
btype = 'jinja2'
if btype:
self.api.add_plugins_exception_breakpoint(
py_db, btype, 'BaseException') # Note: Exception name could be anything here.
# Note: no body required on success.
set_breakpoints_response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, set_breakpoints_response, is_json=True)
def on_stacktrace_request(self, py_db, request):
'''
:param StackTraceRequest request:
'''
# : :type stack_trace_arguments: StackTraceArguments
stack_trace_arguments = request.arguments
thread_id = stack_trace_arguments.threadId
if stack_trace_arguments.startFrame:
start_frame = int(stack_trace_arguments.startFrame)
else:
start_frame = 0
if stack_trace_arguments.levels:
levels = int(stack_trace_arguments.levels)
else:
levels = 0
fmt = stack_trace_arguments.format
if hasattr(fmt, 'to_dict'):
fmt = fmt.to_dict()
self.api.request_stack(py_db, request.seq, thread_id, fmt=fmt, start_frame=start_frame, levels=levels)
def on_exceptioninfo_request(self, py_db, request):
'''
:param ExceptionInfoRequest request:
'''
# : :type exception_into_arguments: ExceptionInfoArguments
exception_into_arguments = request.arguments
thread_id = exception_into_arguments.threadId
max_frames = self._options.max_exception_stack_frames
self.api.request_exception_info_json(py_db, request, thread_id, max_frames)
def on_scopes_request(self, py_db, request):
'''
Scopes are the top-level items which appear for a frame (so, we receive the frame id
and provide the scopes it has).
:param ScopesRequest request:
'''
frame_id = request.arguments.frameId
variables_reference = frame_id
scopes = [
Scope('Locals', ScopeRequest(int(variables_reference), 'locals'), False, presentationHint='locals'),
Scope('Globals', ScopeRequest(int(variables_reference), 'globals'), False),
]
body = ScopesResponseBody(scopes)
scopes_response = pydevd_base_schema.build_response(request, kwargs={'body': body})
return NetCommand(CMD_RETURN, 0, scopes_response, is_json=True)
def on_evaluate_request(self, py_db, request):
'''
:param EvaluateRequest request:
'''
# : :type arguments: EvaluateArguments
arguments = request.arguments
if arguments.frameId is None:
self.api.request_exec_or_evaluate_json(py_db, request, thread_id='*')
else:
thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(
arguments.frameId)
if thread_id is not None:
self.api.request_exec_or_evaluate_json(
py_db, request, thread_id)
else:
body = EvaluateResponseBody('', 0)
response = pydevd_base_schema.build_response(
request,
kwargs={
'body': body,
'success': False,
'message': 'Unable to find thread for evaluation.'
})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_setexpression_request(self, py_db, request):
# : :type arguments: SetExpressionArguments
arguments = request.arguments
thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(
arguments.frameId)
if thread_id is not None:
self.api.request_set_expression_json(py_db, request, thread_id)
else:
body = SetExpressionResponseBody('')
response = pydevd_base_schema.build_response(
request,
kwargs={
'body': body,
'success': False,
'message': 'Unable to find thread to set expression.'
})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_variables_request(self, py_db, request):
'''
Variables can be asked whenever some place returned a variables reference (so, it
can be a scope gotten from on_scopes_request, the result of some evaluation, etc.).
Note that in the DAP the variables reference requires a unique int... the way this works for
pydevd is that an instance is generated for that specific variable reference and we use its
id(instance) to identify it to make sure all items are unique (and the actual {id->instance}
is added to a dict which is only valid while the thread is suspended and later cleared when
the related thread resumes execution).
see: SuspendedFramesManager
:param VariablesRequest request:
'''
arguments = request.arguments # : :type arguments: VariablesArguments
variables_reference = arguments.variablesReference
if isinstance(variables_reference, ScopeRequest):
variables_reference = variables_reference.variable_reference
thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(
variables_reference)
if thread_id is not None:
self.api.request_get_variable_json(py_db, request, thread_id)
else:
variables = []
body = VariablesResponseBody(variables)
variables_response = pydevd_base_schema.build_response(request, kwargs={
'body': body,
'success': False,
'message': 'Unable to find thread to evaluate variable reference.'
})
return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
def on_setvariable_request(self, py_db, request):
arguments = request.arguments # : :type arguments: SetVariableArguments
variables_reference = arguments.variablesReference
if isinstance(variables_reference, ScopeRequest):
variables_reference = variables_reference.variable_reference
if arguments.name.startswith('(return) '):
response = pydevd_base_schema.build_response(
request,
kwargs={
'body': SetVariableResponseBody(''),
'success': False,
'message': 'Cannot change return value'
})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
thread_id = py_db.suspended_frames_manager.get_thread_id_for_variable_reference(
variables_reference)
if thread_id is not None:
self.api.request_change_variable_json(py_db, request, thread_id)
else:
response = pydevd_base_schema.build_response(
request,
kwargs={
'body': SetVariableResponseBody(''),
'success': False,
'message': 'Unable to find thread to evaluate variable reference.'
})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_modules_request(self, py_db, request):
modules_manager = py_db.cmd_factory.modules_manager # : :type modules_manager: ModulesManager
modules_info = modules_manager.get_modules_info()
body = ModulesResponseBody(modules_info)
variables_response = pydevd_base_schema.build_response(request, kwargs={'body': body})
return NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
def on_source_request(self, py_db, request):
'''
:param SourceRequest request:
'''
source_reference = request.arguments.sourceReference
server_filename = None
content = None
if source_reference != 0:
server_filename = pydevd_file_utils.get_server_filename_from_source_reference(source_reference)
if not server_filename:
server_filename = pydevd_file_utils.get_source_reference_filename_from_linecache(source_reference)
if server_filename:
# Try direct file access first - it's much faster when available.
try:
with open(server_filename, 'r') as stream:
content = stream.read()
except:
pass
if content is None:
# File might not exist at all, or we might not have a permission to read it,
# but it might also be inside a zipfile, or an IPython cell. In this case,
# linecache might still be able to retrieve the source.
lines = (linecache.getline(server_filename, i) for i in itertools.count(1))
lines = itertools.takewhile(bool, lines) # empty lines are '\n', EOF is ''
# If we didn't get at least one line back, reset it to None so that it's
# reported as error below, and not as an empty file.
content = ''.join(lines) or None
if content is None:
frame_id = pydevd_file_utils.get_frame_id_from_source_reference(source_reference)
pydev_log.debug('Found frame id: %s for source reference: %s', frame_id, source_reference)
if frame_id is not None:
try:
content = self.api.get_decompiled_source_from_frame_id(py_db, frame_id)
except Exception:
pydev_log.exception('Error getting source for frame id: %s', frame_id)
content = None
body = SourceResponseBody(content or '')
response_args = {'body': body}
if content is None:
if source_reference == 0:
message = 'Source unavailable'
elif server_filename:
message = 'Unable to retrieve source for %s' % (server_filename,)
else:
message = 'Invalid sourceReference %d' % (source_reference,)
response_args.update({'success': False, 'message': message})
response = pydevd_base_schema.build_response(request, kwargs=response_args)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_gototargets_request(self, py_db, request):
path = request.arguments.source.path
line = request.arguments.line
target_id = self._goto_targets_map.obtain_key((path, line))
target = {
'id': target_id,
'label': '%s:%s' % (path, line),
'line': line
}
body = GotoTargetsResponseBody(targets=[target])
response_args = {'body': body}
response = pydevd_base_schema.build_response(request, kwargs=response_args)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_goto_request(self, py_db, request):
target_id = int(request.arguments.targetId)
thread_id = request.arguments.threadId
try:
path, line = self._goto_targets_map.obtain_value(target_id)
except KeyError:
response = pydevd_base_schema.build_response(
request,
kwargs={
'body': {},
'success': False,
'message': 'Unknown goto target id: %d' % (target_id,),
})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
self.api.request_set_next(py_db, request.seq, thread_id, CMD_SET_NEXT_STATEMENT, path, line, '*')
# See 'NetCommandFactoryJson.make_set_next_stmnt_status_message' for response
return None
def on_setdebuggerproperty_request(self, py_db, request):
args = request.arguments # : :type args: SetDebuggerPropertyArguments
if args.ideOS is not None:
self.api.set_ide_os(args.ideOS)
if args.dontTraceStartPatterns is not None and args.dontTraceEndPatterns is not None:
start_patterns = tuple(args.dontTraceStartPatterns)
end_patterns = tuple(args.dontTraceEndPatterns)
self.api.set_dont_trace_start_end_patterns(py_db, start_patterns, end_patterns)
if args.skipSuspendOnBreakpointException is not None:
py_db.skip_suspend_on_breakpoint_exception = tuple(
get_exception_class(x) for x in args.skipSuspendOnBreakpointException)
if args.skipPrintBreakpointException is not None:
py_db.skip_print_breakpoint_exception = tuple(
get_exception_class(x) for x in args.skipPrintBreakpointException)
if args.multiThreadsSingleNotification is not None:
py_db.multi_threads_single_notification = args.multiThreadsSingleNotification
# TODO: Support other common settings. Note that not all of these might be relevant to python.
# JustMyCodeStepping: 0 or 1
# AllowOutOfProcessSymbols: 0 or 1
# DisableJITOptimization: 0 or 1
# InterpreterOptions: 0 or 1
# StopOnExceptionCrossingManagedBoundary: 0 or 1
# WarnIfNoUserCodeOnLaunch: 0 or 1
# EnableStepFiltering: true of false
response = pydevd_base_schema.build_response(request, kwargs={'body': {}})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_pydevdsysteminfo_request(self, py_db, request):
try:
pid = os.getpid()
except AttributeError:
pid = None
# It's possible to have the ppid reported from args. In this case, use that instead of the
# real ppid (athough we're using `ppid`, what we want in meaning is the `launcher_pid` --
# so, if a python process is launched from another python process, consider that process the
# parent and not any intermediary stubs).
ppid = py_db.get_arg_ppid() or self.api.get_ppid()
try:
impl_desc = platform.python_implementation()
except AttributeError:
impl_desc = PY_IMPL_NAME
py_info = pydevd_schema.PydevdPythonInfo(
version=PY_VERSION_STR,
implementation=pydevd_schema.PydevdPythonImplementationInfo(
name=PY_IMPL_NAME,
version=PY_IMPL_VERSION_STR,
description=impl_desc,
)
)
platform_info = pydevd_schema.PydevdPlatformInfo(name=sys.platform)
process_info = pydevd_schema.PydevdProcessInfo(
pid=pid,
ppid=ppid,
executable=sys.executable,
bitness=64 if IS_64BIT_PROCESS else 32,
)
pydevd_info = pydevd_schema.PydevdInfo(
usingCython=USING_CYTHON,
usingFrameEval=USING_FRAME_EVAL,
)
body = {
'python': py_info,
'platform': platform_info,
'process': process_info,
'pydevd': pydevd_info,
}
response = pydevd_base_schema.build_response(request, kwargs={'body': body})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
def on_setpydevdsourcemap_request(self, py_db, request):
args = request.arguments # : :type args: SetPydevdSourceMapArguments
SourceMappingEntry = self.api.SourceMappingEntry
path = args.source.path
source_maps = args.pydevdSourceMaps
# : :type source_map: PydevdSourceMap
new_mappings = [
SourceMappingEntry(
source_map['line'],
source_map['endLine'],
source_map['runtimeLine'],
self.api.filename_to_str(source_map['runtimeSource']['path'])
) for source_map in source_maps
]
error_msg = self.api.set_source_mapping(py_db, path, new_mappings)
if error_msg:
response = pydevd_base_schema.build_response(
request,
kwargs={
'body': {},
'success': False,
'message': error_msg,
})
return NetCommand(CMD_RETURN, 0, response, is_json=True)
response = pydevd_base_schema.build_response(request)
return NetCommand(CMD_RETURN, 0, response, is_json=True)
| 55,820 | Python | 41.546494 | 154 | 0.593945 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_json_debug_options.py | import json
import urllib.parse as urllib_parse
class DebugOptions(object):
__slots__ = [
'just_my_code',
'redirect_output',
'show_return_value',
'break_system_exit_zero',
'django_debug',
'flask_debug',
'stop_on_entry',
'max_exception_stack_frames',
'gui_event_loop',
]
def __init__(self):
self.just_my_code = True
self.redirect_output = False
self.show_return_value = False
self.break_system_exit_zero = False
self.django_debug = False
self.flask_debug = False
self.stop_on_entry = False
self.max_exception_stack_frames = 0
self.gui_event_loop = 'matplotlib'
def to_json(self):
dct = {}
for s in self.__slots__:
dct[s] = getattr(self, s)
return json.dumps(dct)
def update_fom_debug_options(self, debug_options):
if 'DEBUG_STDLIB' in debug_options:
self.just_my_code = not debug_options.get('DEBUG_STDLIB')
if 'REDIRECT_OUTPUT' in debug_options:
self.redirect_output = debug_options.get('REDIRECT_OUTPUT')
if 'SHOW_RETURN_VALUE' in debug_options:
self.show_return_value = debug_options.get('SHOW_RETURN_VALUE')
if 'BREAK_SYSTEMEXIT_ZERO' in debug_options:
self.break_system_exit_zero = debug_options.get('BREAK_SYSTEMEXIT_ZERO')
if 'DJANGO_DEBUG' in debug_options:
self.django_debug = debug_options.get('DJANGO_DEBUG')
if 'FLASK_DEBUG' in debug_options:
self.flask_debug = debug_options.get('FLASK_DEBUG')
if 'STOP_ON_ENTRY' in debug_options:
self.stop_on_entry = debug_options.get('STOP_ON_ENTRY')
# Note: _max_exception_stack_frames cannot be set by debug options.
def update_from_args(self, args):
if 'justMyCode' in args:
self.just_my_code = bool_parser(args['justMyCode'])
else:
# i.e.: if justMyCode is provided, don't check the deprecated value
if 'debugStdLib' in args:
self.just_my_code = not bool_parser(args['debugStdLib'])
if 'redirectOutput' in args:
self.redirect_output = bool_parser(args['redirectOutput'])
if 'showReturnValue' in args:
self.show_return_value = bool_parser(args['showReturnValue'])
if 'breakOnSystemExitZero' in args:
self.break_system_exit_zero = bool_parser(args['breakOnSystemExitZero'])
if 'django' in args:
self.django_debug = bool_parser(args['django'])
if 'flask' in args:
self.flask_debug = bool_parser(args['flask'])
if 'jinja' in args:
self.flask_debug = bool_parser(args['jinja'])
if 'stopOnEntry' in args:
self.stop_on_entry = bool_parser(args['stopOnEntry'])
self.max_exception_stack_frames = int_parser(args.get('maxExceptionStackFrames', 0))
if 'guiEventLoop' in args:
self.gui_event_loop = str(args['guiEventLoop'])
def int_parser(s, default_value=0):
try:
return int(s)
except Exception:
return default_value
def bool_parser(s):
return s in ("True", "true", "1", True, 1)
def unquote(s):
return None if s is None else urllib_parse.unquote(s)
DEBUG_OPTIONS_PARSER = {
'WAIT_ON_ABNORMAL_EXIT': bool_parser,
'WAIT_ON_NORMAL_EXIT': bool_parser,
'BREAK_SYSTEMEXIT_ZERO': bool_parser,
'REDIRECT_OUTPUT': bool_parser,
'DJANGO_DEBUG': bool_parser,
'FLASK_DEBUG': bool_parser,
'FIX_FILE_PATH_CASE': bool_parser,
'CLIENT_OS_TYPE': unquote,
'DEBUG_STDLIB': bool_parser,
'STOP_ON_ENTRY': bool_parser,
'SHOW_RETURN_VALUE': bool_parser,
'MULTIPROCESS': bool_parser,
}
DEBUG_OPTIONS_BY_FLAG = {
'RedirectOutput': 'REDIRECT_OUTPUT=True',
'WaitOnNormalExit': 'WAIT_ON_NORMAL_EXIT=True',
'WaitOnAbnormalExit': 'WAIT_ON_ABNORMAL_EXIT=True',
'BreakOnSystemExitZero': 'BREAK_SYSTEMEXIT_ZERO=True',
'Django': 'DJANGO_DEBUG=True',
'Flask': 'FLASK_DEBUG=True',
'Jinja': 'FLASK_DEBUG=True',
'FixFilePathCase': 'FIX_FILE_PATH_CASE=True',
'DebugStdLib': 'DEBUG_STDLIB=True',
'WindowsClient': 'CLIENT_OS_TYPE=WINDOWS',
'UnixClient': 'CLIENT_OS_TYPE=UNIX',
'StopOnEntry': 'STOP_ON_ENTRY=True',
'ShowReturnValue': 'SHOW_RETURN_VALUE=True',
'Multiprocess': 'MULTIPROCESS=True',
}
def _build_debug_options(flags):
"""Build string representation of debug options from the launch config."""
return ';'.join(DEBUG_OPTIONS_BY_FLAG[flag]
for flag in flags or []
if flag in DEBUG_OPTIONS_BY_FLAG)
def _parse_debug_options(opts):
"""Debug options are semicolon separated key=value pairs
"""
options = {}
if not opts:
return options
for opt in opts.split(';'):
try:
key, value = opt.split('=')
except ValueError:
continue
try:
options[key] = DEBUG_OPTIONS_PARSER[key](value)
except KeyError:
continue
return options
def _extract_debug_options(opts, flags=None):
"""Return the debug options encoded in the given value.
"opts" is a semicolon-separated string of "key=value" pairs.
"flags" is a list of strings.
If flags is provided then it is used as a fallback.
The values come from the launch config:
{
type:'python',
request:'launch'|'attach',
name:'friendly name for debug config',
debugOptions:[
'RedirectOutput', 'Django'
],
options:'REDIRECT_OUTPUT=True;DJANGO_DEBUG=True'
}
Further information can be found here:
https://code.visualstudio.com/docs/editor/debugging#_launchjson-attributes
"""
if not opts:
opts = _build_debug_options(flags)
return _parse_debug_options(opts)
| 5,945 | Python | 29.182741 | 92 | 0.60942 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_additional_thread_info_regular.py | from _pydevd_bundle.pydevd_constants import (STATE_RUN, PYTHON_SUSPEND, SUPPORT_GEVENT, ForkSafeLock,
_current_frames)
from _pydev_bundle import pydev_log
# IFDEF CYTHON
# pydev_log.debug("Using Cython speedups")
# ELSE
from _pydevd_bundle.pydevd_frame import PyDBFrame
# ENDIF
version = 11
#=======================================================================================================================
# PyDBAdditionalThreadInfo
#=======================================================================================================================
# IFDEF CYTHON
# cdef class PyDBAdditionalThreadInfo:
# ELSE
class PyDBAdditionalThreadInfo(object):
# ENDIF
# Note: the params in cython are declared in pydevd_cython.pxd.
# IFDEF CYTHON
# ELSE
__slots__ = [
'pydev_state',
'pydev_step_stop',
'pydev_original_step_cmd',
'pydev_step_cmd',
'pydev_notify_kill',
'pydev_django_resolve_frame',
'pydev_call_from_jinja2',
'pydev_call_inside_jinja2',
'is_tracing',
'conditional_breakpoint_exception',
'pydev_message',
'suspend_type',
'pydev_next_line',
'pydev_func_name',
'suspended_at_unhandled',
'trace_suspend_type',
'top_level_thread_tracer_no_back_frames',
'top_level_thread_tracer_unhandled',
'thread_tracer',
'step_in_initial_location',
# Used for CMD_SMART_STEP_INTO (to know which smart step into variant to use)
'pydev_smart_parent_offset',
'pydev_smart_child_offset',
# Used for CMD_SMART_STEP_INTO (list[_pydevd_bundle.pydevd_bytecode_utils.Variant])
# Filled when the cmd_get_smart_step_into_variants is requested (so, this is a copy
# of the last request for a given thread and pydev_smart_parent_offset/pydev_smart_child_offset relies on it).
'pydev_smart_step_into_variants',
'target_id_to_smart_step_into_variant',
'pydev_use_scoped_step_frame',
]
# ENDIF
def __init__(self):
self.pydev_state = STATE_RUN # STATE_RUN or STATE_SUSPEND
self.pydev_step_stop = None
# Note: we have `pydev_original_step_cmd` and `pydev_step_cmd` because the original is to
# say the action that started it and the other is to say what's the current tracing behavior
# (because it's possible that we start with a step over but may have to switch to a
# different step strategy -- for instance, if a step over is done and we return the current
# method the strategy is changed to a step in).
self.pydev_original_step_cmd = -1 # Something as CMD_STEP_INTO, CMD_STEP_OVER, etc.
self.pydev_step_cmd = -1 # Something as CMD_STEP_INTO, CMD_STEP_OVER, etc.
self.pydev_notify_kill = False
self.pydev_django_resolve_frame = False
self.pydev_call_from_jinja2 = None
self.pydev_call_inside_jinja2 = None
self.is_tracing = 0
self.conditional_breakpoint_exception = None
self.pydev_message = ''
self.suspend_type = PYTHON_SUSPEND
self.pydev_next_line = -1
self.pydev_func_name = '.invalid.' # Must match the type in cython
self.suspended_at_unhandled = False
self.trace_suspend_type = 'trace' # 'trace' or 'frame_eval'
self.top_level_thread_tracer_no_back_frames = []
self.top_level_thread_tracer_unhandled = None
self.thread_tracer = None
self.step_in_initial_location = None
self.pydev_smart_parent_offset = -1
self.pydev_smart_child_offset = -1
self.pydev_smart_step_into_variants = ()
self.target_id_to_smart_step_into_variant = {}
# Flag to indicate ipython use-case where each line will be executed as a call/line/return
# in a new new frame but in practice we want to consider each new frame as if it was all
# part of the same frame.
#
# In practice this means that a step over shouldn't revert to a step in and we need some
# special logic to know when we should stop in a step over as we need to consider 2
# different frames as being equal if they're logically the continuation of a frame
# being executed by ipython line by line.
#
# See: https://github.com/microsoft/debugpy/issues/869#issuecomment-1132141003
self.pydev_use_scoped_step_frame = False
def get_topmost_frame(self, thread):
'''
Gets the topmost frame for the given thread. Note that it may be None
and callers should remove the reference to the frame as soon as possible
to avoid disturbing user code.
'''
# sys._current_frames(): dictionary with thread id -> topmost frame
current_frames = _current_frames()
topmost_frame = current_frames.get(thread.ident)
if topmost_frame is None:
# Note: this is expected for dummy threads (so, getting the topmost frame should be
# treated as optional).
pydev_log.info(
'Unable to get topmost frame for thread: %s, thread.ident: %s, id(thread): %s\nCurrent frames: %s.\n'
'GEVENT_SUPPORT: %s',
thread,
thread.ident,
id(thread),
current_frames,
SUPPORT_GEVENT,
)
return topmost_frame
def __str__(self):
return 'State:%s Stop:%s Cmd: %s Kill:%s' % (
self.pydev_state, self.pydev_step_stop, self.pydev_step_cmd, self.pydev_notify_kill)
_set_additional_thread_info_lock = ForkSafeLock()
def set_additional_thread_info(thread):
try:
additional_info = thread.additional_info
if additional_info is None:
raise AttributeError()
except:
with _set_additional_thread_info_lock:
# If it's not there, set it within a lock to avoid any racing
# conditions.
additional_info = getattr(thread, 'additional_info', None)
if additional_info is None:
additional_info = PyDBAdditionalThreadInfo()
thread.additional_info = additional_info
return additional_info
| 6,239 | Python | 39.51948 | 120 | 0.605706 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_additional_thread_info.py | # Defines which version of the PyDBAdditionalThreadInfo we'll use.
from _pydevd_bundle.pydevd_constants import ENV_FALSE_LOWER_VALUES, USE_CYTHON_FLAG, \
ENV_TRUE_LOWER_VALUES
if USE_CYTHON_FLAG in ENV_TRUE_LOWER_VALUES:
# We must import the cython version if forcing cython
from _pydevd_bundle.pydevd_cython_wrapper import PyDBAdditionalThreadInfo, set_additional_thread_info, _set_additional_thread_info_lock # @UnusedImport
elif USE_CYTHON_FLAG in ENV_FALSE_LOWER_VALUES:
# Use the regular version if not forcing cython
from _pydevd_bundle.pydevd_additional_thread_info_regular import PyDBAdditionalThreadInfo, set_additional_thread_info, _set_additional_thread_info_lock # @UnusedImport @Reimport
else:
# Regular: use fallback if not found (message is already given elsewhere).
try:
from _pydevd_bundle.pydevd_cython_wrapper import PyDBAdditionalThreadInfo, set_additional_thread_info, _set_additional_thread_info_lock
except ImportError:
from _pydevd_bundle.pydevd_additional_thread_info_regular import PyDBAdditionalThreadInfo, set_additional_thread_info, _set_additional_thread_info_lock # @UnusedImport
| 1,166 | Python | 57.349997 | 182 | 0.780446 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_frame.py | import linecache
import os.path
import re
from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_dont_trace
from _pydevd_bundle.pydevd_constants import (RETURN_VALUES_DICT, NO_FTRACE,
EXCEPTION_TYPE_HANDLED, EXCEPTION_TYPE_USER_UNHANDLED, PYDEVD_IPYTHON_CONTEXT)
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, just_raised, remove_exception_from_frame, ignore_exception_trace
from _pydevd_bundle.pydevd_utils import get_clsname_for_code
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame
from _pydevd_bundle.pydevd_comm_constants import constant_to_str, CMD_SET_FUNCTION_BREAK
try:
from _pydevd_bundle.pydevd_bytecode_utils import get_smart_step_into_variant_from_frame_offset
except ImportError:
def get_smart_step_into_variant_from_frame_offset(*args, **kwargs):
return None
# IFDEF CYTHON
# cython_inline_constant: CMD_STEP_INTO = 107
# cython_inline_constant: CMD_STEP_INTO_MY_CODE = 144
# cython_inline_constant: CMD_STEP_RETURN = 109
# cython_inline_constant: CMD_STEP_RETURN_MY_CODE = 160
# cython_inline_constant: CMD_STEP_OVER = 108
# cython_inline_constant: CMD_STEP_OVER_MY_CODE = 159
# cython_inline_constant: CMD_STEP_CAUGHT_EXCEPTION = 137
# cython_inline_constant: CMD_SET_BREAK = 111
# cython_inline_constant: CMD_SMART_STEP_INTO = 128
# cython_inline_constant: CMD_STEP_INTO_COROUTINE = 206
# cython_inline_constant: STATE_RUN = 1
# cython_inline_constant: STATE_SUSPEND = 2
# ELSE
# Note: those are now inlined on cython.
CMD_STEP_INTO = 107
CMD_STEP_INTO_MY_CODE = 144
CMD_STEP_RETURN = 109
CMD_STEP_RETURN_MY_CODE = 160
CMD_STEP_OVER = 108
CMD_STEP_OVER_MY_CODE = 159
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SET_BREAK = 111
CMD_SMART_STEP_INTO = 128
CMD_STEP_INTO_COROUTINE = 206
STATE_RUN = 1
STATE_SUSPEND = 2
# ENDIF
basename = os.path.basename
IGNORE_EXCEPTION_TAG = re.compile('[^#]*#.*@IgnoreException')
DEBUG_START = ('pydevd.py', 'run')
DEBUG_START_PY3K = ('_pydev_execfile.py', 'execfile')
TRACE_PROPERTY = 'pydevd_traceproperty.py'
import dis
try:
StopAsyncIteration
except NameError:
StopAsyncIteration = StopIteration
# IFDEF CYTHON
# cdef is_unhandled_exception(container_obj, py_db, frame, int last_raise_line, set raise_lines):
# ELSE
def is_unhandled_exception(container_obj, py_db, frame, last_raise_line, raise_lines):
# ENDIF
if frame.f_lineno in raise_lines:
return True
else:
try_except_infos = container_obj.try_except_infos
if try_except_infos is None:
container_obj.try_except_infos = try_except_infos = py_db.collect_try_except_info(frame.f_code)
if not try_except_infos:
# Consider the last exception as unhandled because there's no try..except in it.
return True
else:
# Now, consider only the try..except for the raise
valid_try_except_infos = []
for try_except_info in try_except_infos:
if try_except_info.is_line_in_try_block(last_raise_line):
valid_try_except_infos.append(try_except_info)
if not valid_try_except_infos:
return True
else:
# Note: check all, not only the "valid" ones to cover the case
# in "tests_python.test_tracing_on_top_level.raise_unhandled10"
# where one try..except is inside the other with only a raise
# and it's gotten in the except line.
for try_except_info in try_except_infos:
if try_except_info.is_line_in_except_block(frame.f_lineno):
if (
frame.f_lineno == try_except_info.except_line or
frame.f_lineno in try_except_info.raise_lines_in_except
):
# In a raise inside a try..except block or some except which doesn't
# match the raised exception.
return True
return False
# IFDEF CYTHON
# cdef class _TryExceptContainerObj:
# cdef public list try_except_infos;
# def __init__(self):
# self.try_except_infos = None
# ELSE
class _TryExceptContainerObj(object):
'''
A dumb container object just to containe the try..except info when needed. Meant to be
persisent among multiple PyDBFrames to the same code object.
'''
try_except_infos = None
# ENDIF
#=======================================================================================================================
# PyDBFrame
#=======================================================================================================================
# IFDEF CYTHON
# cdef class PyDBFrame:
# ELSE
class PyDBFrame:
'''This makes the tracing for a given frame, so, the trace_dispatch
is used initially when we enter into a new context ('call') and then
is reused for the entire context.
'''
# ENDIF
# Note: class (and not instance) attributes.
# Same thing in the main debugger but only considering the file contents, while the one in the main debugger
# considers the user input (so, the actual result must be a join of both).
filename_to_lines_where_exceptions_are_ignored = {}
filename_to_stat_info = {}
# IFDEF CYTHON
# cdef tuple _args
# cdef int should_skip
# cdef object exc_info
# def __init__(self, tuple args):
# self._args = args # In the cython version we don't need to pass the frame
# self.should_skip = -1 # On cythonized version, put in instance.
# self.exc_info = ()
# ELSE
should_skip = -1 # Default value in class (put in instance on set).
exc_info = () # Default value in class (put in instance on set).
def __init__(self, args):
# args = main_debugger, abs_path_canonical_path_and_base, base, info, t, frame
# yeap, much faster than putting in self and then getting it from self later on
self._args = args
# ENDIF
def set_suspend(self, *args, **kwargs):
self._args[0].set_suspend(*args, **kwargs)
def do_wait_suspend(self, *args, **kwargs):
self._args[0].do_wait_suspend(*args, **kwargs)
# IFDEF CYTHON
# def trace_exception(self, frame, str event, arg):
# cdef bint should_stop;
# cdef tuple exc_info;
# ELSE
def trace_exception(self, frame, event, arg):
# ENDIF
if event == 'exception':
should_stop, frame = self._should_stop_on_exception(frame, event, arg)
if should_stop:
if self._handle_exception(frame, event, arg, EXCEPTION_TYPE_HANDLED):
return self.trace_dispatch
elif event == 'return':
exc_info = self.exc_info
if exc_info and arg is None:
frame_skips_cache, frame_cache_key = self._args[4], self._args[5]
custom_key = (frame_cache_key, 'try_exc_info')
container_obj = frame_skips_cache.get(custom_key)
if container_obj is None:
container_obj = frame_skips_cache[custom_key] = _TryExceptContainerObj()
if is_unhandled_exception(container_obj, self._args[0], frame, exc_info[1], exc_info[2]) and \
self.handle_user_exception(frame):
return self.trace_dispatch
return self.trace_exception
# IFDEF CYTHON
# cdef _should_stop_on_exception(self, frame, str event, arg):
# cdef PyDBAdditionalThreadInfo info;
# cdef bint should_stop;
# cdef bint was_just_raised;
# cdef list check_excs;
# ELSE
def _should_stop_on_exception(self, frame, event, arg):
# ENDIF
# main_debugger, _filename, info, _thread = self._args
main_debugger = self._args[0]
info = self._args[2]
should_stop = False
# STATE_SUSPEND = 2
if info.pydev_state != 2: # and breakpoint is not None:
exception, value, trace = arg
if trace is not None and hasattr(trace, 'tb_next'):
# on jython trace is None on the first event and it may not have a tb_next.
should_stop = False
exception_breakpoint = None
try:
if main_debugger.plugin is not None:
result = main_debugger.plugin.exception_break(main_debugger, self, frame, self._args, arg)
if result:
should_stop, frame = result
except:
pydev_log.exception()
if not should_stop:
# Apply checks that don't need the exception breakpoint (where we shouldn't ever stop).
if exception == SystemExit and main_debugger.ignore_system_exit_code(value):
pass
elif exception in (GeneratorExit, StopIteration, StopAsyncIteration):
# These exceptions are control-flow related (they work as a generator
# pause), so, we shouldn't stop on them.
pass
elif ignore_exception_trace(trace):
pass
else:
was_just_raised = trace.tb_next is None
# It was not handled by any plugin, lets check exception breakpoints.
check_excs = []
# Note: check user unhandled before regular exceptions.
exc_break_user = main_debugger.get_exception_breakpoint(
exception, main_debugger.break_on_user_uncaught_exceptions)
if exc_break_user is not None:
check_excs.append((exc_break_user, True))
exc_break_caught = main_debugger.get_exception_breakpoint(
exception, main_debugger.break_on_caught_exceptions)
if exc_break_caught is not None:
check_excs.append((exc_break_caught, False))
for exc_break, is_user_uncaught in check_excs:
# Initially mark that it should stop and then go into exclusions.
should_stop = True
if main_debugger.exclude_exception_by_filter(exc_break, trace):
pydev_log.debug("Ignore exception %s in library %s -- (%s)" % (exception, frame.f_code.co_filename, frame.f_code.co_name))
should_stop = False
elif exc_break.condition is not None and \
not main_debugger.handle_breakpoint_condition(info, exc_break, frame):
should_stop = False
elif is_user_uncaught:
# Note: we don't stop here, we just collect the exc_info to use later on...
should_stop = False
if not main_debugger.apply_files_filter(frame, frame.f_code.co_filename, True) \
and (frame.f_back is None or main_debugger.apply_files_filter(frame.f_back, frame.f_back.f_code.co_filename, True)):
# User uncaught means that we're currently in user code but the code
# up the stack is library code.
exc_info = self.exc_info
if not exc_info:
exc_info = (arg, frame.f_lineno, set([frame.f_lineno]))
else:
lines = exc_info[2]
lines.add(frame.f_lineno)
exc_info = (arg, frame.f_lineno, lines)
self.exc_info = exc_info
else:
# I.e.: these are only checked if we're not dealing with user uncaught exceptions.
if exc_break.notify_on_first_raise_only and main_debugger.skip_on_exceptions_thrown_in_same_context \
and not was_just_raised and not just_raised(trace.tb_next):
# In this case we never stop if it was just raised, so, to know if it was the first we
# need to check if we're in the 2nd method.
should_stop = False # I.e.: we stop only when we're at the caller of a method that throws an exception
elif exc_break.notify_on_first_raise_only and not main_debugger.skip_on_exceptions_thrown_in_same_context \
and not was_just_raised:
should_stop = False # I.e.: we stop only when it was just raised
elif was_just_raised and main_debugger.skip_on_exceptions_thrown_in_same_context:
# Option: Don't break if an exception is caught in the same function from which it is thrown
should_stop = False
if should_stop:
exception_breakpoint = exc_break
try:
info.pydev_message = exc_break.qname
except:
info.pydev_message = exc_break.qname.encode('utf-8')
break
if should_stop:
# Always add exception to frame (must remove later after we proceed).
add_exception_to_frame(frame, (exception, value, trace))
if exception_breakpoint is not None and exception_breakpoint.expression is not None:
main_debugger.handle_breakpoint_expression(exception_breakpoint, info, frame)
return should_stop, frame
def handle_user_exception(self, frame):
exc_info = self.exc_info
if exc_info:
return self._handle_exception(frame, 'exception', exc_info[0], EXCEPTION_TYPE_USER_UNHANDLED)
return False
# IFDEF CYTHON
# cdef _handle_exception(self, frame, str event, arg, str exception_type):
# cdef bint stopped;
# cdef tuple abs_real_path_and_base;
# cdef str absolute_filename;
# cdef str canonical_normalized_filename;
# cdef dict filename_to_lines_where_exceptions_are_ignored;
# cdef dict lines_ignored;
# cdef dict frame_id_to_frame;
# cdef dict merged;
# cdef object trace_obj;
# cdef object main_debugger;
# ELSE
def _handle_exception(self, frame, event, arg, exception_type):
# ENDIF
stopped = False
try:
# print('_handle_exception', frame.f_lineno, frame.f_code.co_name)
# We have 3 things in arg: exception type, description, traceback object
trace_obj = arg[2]
main_debugger = self._args[0]
initial_trace_obj = trace_obj
if trace_obj.tb_next is None and trace_obj.tb_frame is frame:
# I.e.: tb_next should be only None in the context it was thrown (trace_obj.tb_frame is frame is just a double check).
pass
else:
# Get the trace_obj from where the exception was raised...
while trace_obj.tb_next is not None:
trace_obj = trace_obj.tb_next
if main_debugger.ignore_exceptions_thrown_in_lines_with_ignore_exception:
for check_trace_obj in (initial_trace_obj, trace_obj):
abs_real_path_and_base = get_abs_path_real_path_and_base_from_frame(check_trace_obj.tb_frame)
absolute_filename = abs_real_path_and_base[0]
canonical_normalized_filename = abs_real_path_and_base[1]
filename_to_lines_where_exceptions_are_ignored = self.filename_to_lines_where_exceptions_are_ignored
lines_ignored = filename_to_lines_where_exceptions_are_ignored.get(canonical_normalized_filename)
if lines_ignored is None:
lines_ignored = filename_to_lines_where_exceptions_are_ignored[canonical_normalized_filename] = {}
try:
curr_stat = os.stat(absolute_filename)
curr_stat = (curr_stat.st_size, curr_stat.st_mtime)
except:
curr_stat = None
last_stat = self.filename_to_stat_info.get(absolute_filename)
if last_stat != curr_stat:
self.filename_to_stat_info[absolute_filename] = curr_stat
lines_ignored.clear()
try:
linecache.checkcache(absolute_filename)
except:
pydev_log.exception('Error in linecache.checkcache(%r)', absolute_filename)
from_user_input = main_debugger.filename_to_lines_where_exceptions_are_ignored.get(canonical_normalized_filename)
if from_user_input:
merged = {}
merged.update(lines_ignored)
# Override what we have with the related entries that the user entered
merged.update(from_user_input)
else:
merged = lines_ignored
exc_lineno = check_trace_obj.tb_lineno
# print ('lines ignored', lines_ignored)
# print ('user input', from_user_input)
# print ('merged', merged, 'curr', exc_lineno)
if exc_lineno not in merged: # Note: check on merged but update lines_ignored.
try:
line = linecache.getline(absolute_filename, exc_lineno, check_trace_obj.tb_frame.f_globals)
except:
pydev_log.exception('Error in linecache.getline(%r, %s, f_globals)', absolute_filename, exc_lineno)
line = ''
if IGNORE_EXCEPTION_TAG.match(line) is not None:
lines_ignored[exc_lineno] = 1
return False
else:
# Put in the cache saying not to ignore
lines_ignored[exc_lineno] = 0
else:
# Ok, dict has it already cached, so, let's check it...
if merged.get(exc_lineno, 0):
return False
thread = self._args[3]
try:
frame_id_to_frame = {}
frame_id_to_frame[id(frame)] = frame
f = trace_obj.tb_frame
while f is not None:
frame_id_to_frame[id(f)] = f
f = f.f_back
f = None
stopped = True
main_debugger.send_caught_exception_stack(thread, arg, id(frame))
try:
self.set_suspend(thread, CMD_STEP_CAUGHT_EXCEPTION)
self.do_wait_suspend(thread, frame, event, arg, exception_type=exception_type)
finally:
main_debugger.send_caught_exception_stack_proceeded(thread)
except:
pydev_log.exception()
main_debugger.set_trace_for_frame_and_parents(frame)
finally:
# Make sure the user cannot see the '__exception__' we added after we leave the suspend state.
remove_exception_from_frame(frame)
# Clear some local variables...
frame = None
trace_obj = None
initial_trace_obj = None
check_trace_obj = None
f = None
frame_id_to_frame = None
main_debugger = None
thread = None
return stopped
# IFDEF CYTHON
# cdef get_func_name(self, frame):
# cdef str func_name
# ELSE
def get_func_name(self, frame):
# ENDIF
code_obj = frame.f_code
func_name = code_obj.co_name
try:
cls_name = get_clsname_for_code(code_obj, frame)
if cls_name is not None:
return "%s.%s" % (cls_name, func_name)
else:
return func_name
except:
pydev_log.exception()
return func_name
# IFDEF CYTHON
# cdef _show_return_values(self, frame, arg):
# ELSE
def _show_return_values(self, frame, arg):
# ENDIF
try:
try:
f_locals_back = getattr(frame.f_back, "f_locals", None)
if f_locals_back is not None:
return_values_dict = f_locals_back.get(RETURN_VALUES_DICT, None)
if return_values_dict is None:
return_values_dict = {}
f_locals_back[RETURN_VALUES_DICT] = return_values_dict
name = self.get_func_name(frame)
return_values_dict[name] = arg
except:
pydev_log.exception()
finally:
f_locals_back = None
# IFDEF CYTHON
# cdef _remove_return_values(self, main_debugger, frame):
# ELSE
def _remove_return_values(self, main_debugger, frame):
# ENDIF
try:
try:
# Showing return values was turned off, we should remove them from locals dict.
# The values can be in the current frame or in the back one
frame.f_locals.pop(RETURN_VALUES_DICT, None)
f_locals_back = getattr(frame.f_back, "f_locals", None)
if f_locals_back is not None:
f_locals_back.pop(RETURN_VALUES_DICT, None)
except:
pydev_log.exception()
finally:
f_locals_back = None
# IFDEF CYTHON
# cdef _get_unfiltered_back_frame(self, main_debugger, frame):
# ELSE
def _get_unfiltered_back_frame(self, main_debugger, frame):
# ENDIF
f = frame.f_back
while f is not None:
if not main_debugger.is_files_filter_enabled:
return f
else:
if main_debugger.apply_files_filter(f, f.f_code.co_filename, False):
f = f.f_back
else:
return f
return f
# IFDEF CYTHON
# cdef _is_same_frame(self, target_frame, current_frame):
# cdef PyDBAdditionalThreadInfo info;
# ELSE
def _is_same_frame(self, target_frame, current_frame):
# ENDIF
if target_frame is current_frame:
return True
info = self._args[2]
if info.pydev_use_scoped_step_frame:
# If using scoped step we don't check the target, we just need to check
# if the current matches the same heuristic where the target was defined.
if target_frame is not None and current_frame is not None:
if target_frame.f_code.co_filename == current_frame.f_code.co_filename:
# The co_name may be different (it may include the line number), but
# the filename must still be the same.
f = current_frame.f_back
if f is not None and f.f_code.co_name == PYDEVD_IPYTHON_CONTEXT[1]:
f = f.f_back
if f is not None and f.f_code.co_name == PYDEVD_IPYTHON_CONTEXT[2]:
return True
return False
# IFDEF CYTHON
# cpdef trace_dispatch(self, frame, str event, arg):
# cdef tuple abs_path_canonical_path_and_base;
# cdef bint is_exception_event;
# cdef bint has_exception_breakpoints;
# cdef bint can_skip;
# cdef bint stop;
# cdef PyDBAdditionalThreadInfo info;
# cdef int step_cmd;
# cdef int line;
# cdef bint is_line;
# cdef bint is_call;
# cdef bint is_return;
# cdef bint should_stop;
# cdef dict breakpoints_for_file;
# cdef dict stop_info;
# cdef str curr_func_name;
# cdef bint exist_result;
# cdef dict frame_skips_cache;
# cdef object frame_cache_key;
# cdef tuple line_cache_key;
# cdef int breakpoints_in_line_cache;
# cdef int breakpoints_in_frame_cache;
# cdef bint has_breakpoint_in_frame;
# cdef bint is_coroutine_or_generator;
# cdef int bp_line;
# cdef object bp;
# cdef int pydev_smart_parent_offset
# cdef int pydev_smart_child_offset
# cdef tuple pydev_smart_step_into_variants
# ELSE
def trace_dispatch(self, frame, event, arg):
# ENDIF
# Note: this is a big function because most of the logic related to hitting a breakpoint and
# stepping is contained in it. Ideally this could be split among multiple functions, but the
# problem in this case is that in pure-python function calls are expensive and even more so
# when tracing is on (because each function call will get an additional tracing call). We
# try to address this by using the info.is_tracing for the fastest possible return, but the
# cost is still high (maybe we could use code-generation in the future and make the code
# generation be better split among what each part does).
# DEBUG = '_debugger_case_generator.py' in frame.f_code.co_filename
main_debugger, abs_path_canonical_path_and_base, info, thread, frame_skips_cache, frame_cache_key = self._args
# if DEBUG: print('frame trace_dispatch %s %s %s %s %s %s, stop: %s' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename, event, constant_to_str(info.pydev_step_cmd), arg, info.pydev_step_stop))
try:
info.is_tracing += 1
# TODO: This shouldn't be needed. The fact that frame.f_lineno
# is None seems like a bug in Python 3.11.
# Reported in: https://github.com/python/cpython/issues/94485
line = frame.f_lineno or 0 # Workaround or case where frame.f_lineno is None
line_cache_key = (frame_cache_key, line)
if main_debugger.pydb_disposed:
return None if event == 'call' else NO_FTRACE
plugin_manager = main_debugger.plugin
has_exception_breakpoints = (
main_debugger.break_on_caught_exceptions
or main_debugger.break_on_user_uncaught_exceptions
or main_debugger.has_plugin_exception_breaks)
stop_frame = info.pydev_step_stop
step_cmd = info.pydev_step_cmd
function_breakpoint_on_call_event = None
if frame.f_code.co_flags & 0xa0: # 0xa0 == CO_GENERATOR = 0x20 | CO_COROUTINE = 0x80
# Dealing with coroutines and generators:
# When in a coroutine we change the perceived event to the debugger because
# a call, StopIteration exception and return are usually just pausing/unpausing it.
if event == 'line':
is_line = True
is_call = False
is_return = False
is_exception_event = False
elif event == 'return':
is_line = False
is_call = False
is_return = True
is_exception_event = False
returns_cache_key = (frame_cache_key, 'returns')
return_lines = frame_skips_cache.get(returns_cache_key)
if return_lines is None:
# Note: we're collecting the return lines by inspecting the bytecode as
# there are multiple returns and multiple stop iterations when awaiting and
# it doesn't give any clear indication when a coroutine or generator is
# finishing or just pausing.
return_lines = set()
for x in main_debugger.collect_return_info(frame.f_code):
# Note: cython does not support closures in cpdefs (so we can't use
# a list comprehension).
return_lines.add(x.return_line)
frame_skips_cache[returns_cache_key] = return_lines
if line not in return_lines:
# Not really a return (coroutine/generator paused).
return self.trace_dispatch
else:
if self.exc_info:
self.handle_user_exception(frame)
return self.trace_dispatch
# Tricky handling: usually when we're on a frame which is about to exit
# we set the step mode to step into, but in this case we'd end up in the
# asyncio internal machinery, which is not what we want, so, we just
# ask the stop frame to be a level up.
#
# Note that there's an issue here which we may want to fix in the future: if
# the back frame is a frame which is filtered, we won't stop properly.
# Solving this may not be trivial as we'd need to put a scope in the step
# in, but we may have to do it anyways to have a step in which doesn't end
# up in asyncio).
#
# Note2: we don't revert to a step in if we're doing scoped stepping
# (because on scoped stepping we're always receiving a call/line/return
# event for each line in ipython, so, we can't revert to step in on return
# as the return shouldn't mean that we've actually completed executing a
# frame in this case).
if stop_frame is frame and not info.pydev_use_scoped_step_frame:
if step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE, CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE):
f = self._get_unfiltered_back_frame(main_debugger, frame)
if f is not None:
info.pydev_step_cmd = CMD_STEP_INTO_COROUTINE
info.pydev_step_stop = f
else:
if step_cmd == CMD_STEP_OVER:
info.pydev_step_cmd = CMD_STEP_INTO
info.pydev_step_stop = None
elif step_cmd == CMD_STEP_OVER_MY_CODE:
info.pydev_step_cmd = CMD_STEP_INTO_MY_CODE
info.pydev_step_stop = None
elif step_cmd == CMD_STEP_INTO_COROUTINE:
# We're exiting this one, so, mark the new coroutine context.
f = self._get_unfiltered_back_frame(main_debugger, frame)
if f is not None:
info.pydev_step_stop = f
else:
info.pydev_step_cmd = CMD_STEP_INTO
info.pydev_step_stop = None
elif event == 'exception':
breakpoints_for_file = None
if has_exception_breakpoints:
should_stop, frame = self._should_stop_on_exception(frame, event, arg)
if should_stop:
if self._handle_exception(frame, event, arg, EXCEPTION_TYPE_HANDLED):
return self.trace_dispatch
return self.trace_dispatch
else:
# event == 'call' or event == 'c_XXX'
return self.trace_dispatch
else: # Not coroutine nor generator
if event == 'line':
is_line = True
is_call = False
is_return = False
is_exception_event = False
elif event == 'return':
is_line = False
is_return = True
is_call = False
is_exception_event = False
# If we are in single step mode and something causes us to exit the current frame, we need to make sure we break
# eventually. Force the step mode to step into and the step stop frame to None.
# I.e.: F6 in the end of a function should stop in the next possible position (instead of forcing the user
# to make a step in or step over at that location).
# Note: this is especially troublesome when we're skipping code with the
# @DontTrace comment.
if (
stop_frame is frame and
not info.pydev_use_scoped_step_frame and is_return and
step_cmd in (CMD_STEP_OVER, CMD_STEP_RETURN, CMD_STEP_OVER_MY_CODE, CMD_STEP_RETURN_MY_CODE, CMD_SMART_STEP_INTO)
):
if step_cmd in (CMD_STEP_OVER, CMD_STEP_RETURN, CMD_SMART_STEP_INTO):
info.pydev_step_cmd = CMD_STEP_INTO
else:
info.pydev_step_cmd = CMD_STEP_INTO_MY_CODE
info.pydev_step_stop = None
if self.exc_info:
if self.handle_user_exception(frame):
return self.trace_dispatch
elif event == 'call':
is_line = False
is_call = True
is_return = False
is_exception_event = False
if frame.f_code.co_firstlineno == frame.f_lineno: # Check line to deal with async/await.
function_breakpoint_on_call_event = main_debugger.function_breakpoint_name_to_breakpoint.get(frame.f_code.co_name)
elif event == 'exception':
is_exception_event = True
breakpoints_for_file = None
if has_exception_breakpoints:
should_stop, frame = self._should_stop_on_exception(frame, event, arg)
if should_stop:
if self._handle_exception(frame, event, arg, EXCEPTION_TYPE_HANDLED):
return self.trace_dispatch
is_line = False
is_return = False
is_call = False
else:
# Unexpected: just keep the same trace func (i.e.: event == 'c_XXX').
return self.trace_dispatch
if not is_exception_event:
breakpoints_for_file = main_debugger.breakpoints.get(abs_path_canonical_path_and_base[1])
can_skip = False
if info.pydev_state == 1: # STATE_RUN = 1
# we can skip if:
# - we have no stop marked
# - we should make a step return/step over and we're not in the current frame
# - we're stepping into a coroutine context and we're not in that context
if step_cmd == -1:
can_skip = True
elif step_cmd in (CMD_STEP_OVER, CMD_STEP_RETURN, CMD_STEP_OVER_MY_CODE, CMD_STEP_RETURN_MY_CODE) and not self._is_same_frame(stop_frame, frame):
can_skip = True
elif step_cmd == CMD_SMART_STEP_INTO and (
stop_frame is not None and
stop_frame is not frame and
stop_frame is not frame.f_back and
(frame.f_back is None or stop_frame is not frame.f_back.f_back)):
can_skip = True
elif step_cmd == CMD_STEP_INTO_MY_CODE:
if (
main_debugger.apply_files_filter(frame, frame.f_code.co_filename, True)
and (frame.f_back is None or main_debugger.apply_files_filter(frame.f_back, frame.f_back.f_code.co_filename, True))
):
can_skip = True
elif step_cmd == CMD_STEP_INTO_COROUTINE:
f = frame
while f is not None:
if self._is_same_frame(stop_frame, f):
break
f = f.f_back
else:
can_skip = True
if can_skip:
if plugin_manager is not None and (
main_debugger.has_plugin_line_breaks or main_debugger.has_plugin_exception_breaks):
can_skip = plugin_manager.can_skip(main_debugger, frame)
if can_skip and main_debugger.show_return_values and info.pydev_step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE) and self._is_same_frame(stop_frame, frame.f_back):
# trace function for showing return values after step over
can_skip = False
# Let's check to see if we are in a function that has a breakpoint. If we don't have a breakpoint,
# we will return nothing for the next trace
# also, after we hit a breakpoint and go to some other debugging state, we have to force the set trace anyway,
# so, that's why the additional checks are there.
if function_breakpoint_on_call_event:
pass # Do nothing here (just keep on going as we can't skip it).
elif not breakpoints_for_file:
if can_skip:
if has_exception_breakpoints:
return self.trace_exception
else:
return None if is_call else NO_FTRACE
else:
# When cached, 0 means we don't have a breakpoint and 1 means we have.
if can_skip:
breakpoints_in_line_cache = frame_skips_cache.get(line_cache_key, -1)
if breakpoints_in_line_cache == 0:
return self.trace_dispatch
breakpoints_in_frame_cache = frame_skips_cache.get(frame_cache_key, -1)
if breakpoints_in_frame_cache != -1:
# Gotten from cache.
has_breakpoint_in_frame = breakpoints_in_frame_cache == 1
else:
has_breakpoint_in_frame = False
try:
func_lines = set()
for offset_and_lineno in dis.findlinestarts(frame.f_code):
func_lines.add(offset_and_lineno[1])
except:
# This is a fallback for implementations where we can't get the function
# lines -- i.e.: jython (in this case clients need to provide the function
# name to decide on the skip or we won't be able to skip the function
# completely).
# Checks the breakpoint to see if there is a context match in some function.
curr_func_name = frame.f_code.co_name
# global context is set with an empty name
if curr_func_name in ('?', '<module>', '<lambda>'):
curr_func_name = ''
for bp in breakpoints_for_file.values():
# will match either global or some function
if bp.func_name in ('None', curr_func_name):
has_breakpoint_in_frame = True
break
else:
for bp_line in breakpoints_for_file: # iterate on keys
if bp_line in func_lines:
has_breakpoint_in_frame = True
break
# Cache the value (1 or 0 or -1 for default because of cython).
if has_breakpoint_in_frame:
frame_skips_cache[frame_cache_key] = 1
else:
frame_skips_cache[frame_cache_key] = 0
if can_skip and not has_breakpoint_in_frame:
if has_exception_breakpoints:
return self.trace_exception
else:
return None if is_call else NO_FTRACE
# We may have hit a breakpoint or we are already in step mode. Either way, let's check what we should do in this frame
# if DEBUG: print('NOT skipped: %s %s %s %s' % (frame.f_lineno, frame.f_code.co_name, event, frame.__class__.__name__))
try:
flag = False
# return is not taken into account for breakpoint hit because we'd have a double-hit in this case
# (one for the line and the other for the return).
stop_info = {}
breakpoint = None
exist_result = False
stop = False
stop_reason = CMD_SET_BREAK
bp_type = None
if function_breakpoint_on_call_event:
breakpoint = function_breakpoint_on_call_event
stop = True
new_frame = frame
stop_reason = CMD_SET_FUNCTION_BREAK
elif is_line and info.pydev_state != STATE_SUSPEND and breakpoints_for_file is not None and line in breakpoints_for_file:
breakpoint = breakpoints_for_file[line]
new_frame = frame
stop = True
if step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE) and (self._is_same_frame(stop_frame, frame) and is_line):
stop = False # we don't stop on breakpoint if we have to stop by step-over (it will be processed later)
elif plugin_manager is not None and main_debugger.has_plugin_line_breaks:
result = plugin_manager.get_breakpoint(main_debugger, self, frame, event, self._args)
if result:
exist_result = True
flag, breakpoint, new_frame, bp_type = result
if breakpoint:
# ok, hit breakpoint, now, we have to discover if it is a conditional breakpoint
# lets do the conditional stuff here
if breakpoint.expression is not None:
main_debugger.handle_breakpoint_expression(breakpoint, info, new_frame)
if breakpoint.is_logpoint and info.pydev_message is not None and len(info.pydev_message) > 0:
cmd = main_debugger.cmd_factory.make_io_message(info.pydev_message + os.linesep, '1')
main_debugger.writer.add_command(cmd)
if stop or exist_result:
eval_result = False
if breakpoint.has_condition:
eval_result = main_debugger.handle_breakpoint_condition(info, breakpoint, new_frame)
if breakpoint.has_condition:
if not eval_result:
stop = False
elif breakpoint.is_logpoint:
stop = False
if is_call and (frame.f_code.co_name in ('<lambda>', '<module>') or (line == 1 and frame.f_code.co_name.startswith('<cell'))):
# If we find a call for a module, it means that the module is being imported/executed for the
# first time. In this case we have to ignore this hit as it may later duplicated by a
# line event at the same place (so, if there's a module with a print() in the first line
# the user will hit that line twice, which is not what we want).
#
# For lambda, as it only has a single statement, it's not interesting to trace
# its call and later its line event as they're usually in the same line.
#
# For ipython, <cell xxx> may be executed having each line compiled as a new
# module, so it's the same case as <module>.
return self.trace_dispatch
if main_debugger.show_return_values:
if is_return and (
(info.pydev_step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE, CMD_SMART_STEP_INTO) and (self._is_same_frame(stop_frame, frame.f_back))) or
(info.pydev_step_cmd in (CMD_STEP_RETURN, CMD_STEP_RETURN_MY_CODE) and (self._is_same_frame(stop_frame, frame))) or
(info.pydev_step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_COROUTINE)) or
(
info.pydev_step_cmd == CMD_STEP_INTO_MY_CODE
and frame.f_back is not None
and not main_debugger.apply_files_filter(frame.f_back, frame.f_back.f_code.co_filename, True)
)
):
self._show_return_values(frame, arg)
elif main_debugger.remove_return_values_flag:
try:
self._remove_return_values(main_debugger, frame)
finally:
main_debugger.remove_return_values_flag = False
if stop:
self.set_suspend(
thread,
stop_reason,
suspend_other_threads=breakpoint and breakpoint.suspend_policy == "ALL",
)
elif flag and plugin_manager is not None:
result = plugin_manager.suspend(main_debugger, thread, frame, bp_type)
if result:
frame = result
# if thread has a suspend flag, we suspend with a busy wait
if info.pydev_state == STATE_SUSPEND:
self.do_wait_suspend(thread, frame, event, arg)
return self.trace_dispatch
else:
if not breakpoint and is_line:
# No stop from anyone and no breakpoint found in line (cache that).
frame_skips_cache[line_cache_key] = 0
except:
pydev_log.exception()
raise
# step handling. We stop when we hit the right frame
try:
should_skip = 0
if pydevd_dont_trace.should_trace_hook is not None:
if self.should_skip == -1:
# I.e.: cache the result on self.should_skip (no need to evaluate the same frame multiple times).
# Note that on a code reload, we won't re-evaluate this because in practice, the frame.f_code
# Which will be handled by this frame is read-only, so, we can cache it safely.
if not pydevd_dont_trace.should_trace_hook(frame, abs_path_canonical_path_and_base[0]):
# -1, 0, 1 to be Cython-friendly
should_skip = self.should_skip = 1
else:
should_skip = self.should_skip = 0
else:
should_skip = self.should_skip
plugin_stop = False
if should_skip:
stop = False
elif step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE, CMD_STEP_INTO_COROUTINE):
force_check_project_scope = step_cmd == CMD_STEP_INTO_MY_CODE
if is_line:
if not info.pydev_use_scoped_step_frame:
if force_check_project_scope or main_debugger.is_files_filter_enabled:
stop = not main_debugger.apply_files_filter(frame, frame.f_code.co_filename, force_check_project_scope)
else:
stop = True
else:
# We can only stop inside the ipython call.
filename = frame.f_code.co_filename
if filename.endswith('.pyc'):
filename = filename[:-1]
if not filename.endswith(PYDEVD_IPYTHON_CONTEXT[0]):
f = frame.f_back
while f is not None:
if f.f_code.co_name == PYDEVD_IPYTHON_CONTEXT[1]:
f2 = f.f_back
if f2 is not None and f2.f_code.co_name == PYDEVD_IPYTHON_CONTEXT[2]:
pydev_log.debug('Stop inside ipython call')
stop = True
break
f = f.f_back
del f
if not stop:
# In scoped mode if step in didn't work in this context it won't work
# afterwards anyways.
return None if is_call else NO_FTRACE
elif is_return and frame.f_back is not None and not info.pydev_use_scoped_step_frame:
if main_debugger.get_file_type(frame.f_back) == main_debugger.PYDEV_FILE:
stop = False
else:
if force_check_project_scope or main_debugger.is_files_filter_enabled:
stop = not main_debugger.apply_files_filter(frame.f_back, frame.f_back.f_code.co_filename, force_check_project_scope)
if stop:
# Prevent stopping in a return to the same location we were initially
# (i.e.: double-stop at the same place due to some filtering).
if info.step_in_initial_location == (frame.f_back, frame.f_back.f_lineno):
stop = False
else:
stop = True
else:
stop = False
if stop:
if step_cmd == CMD_STEP_INTO_COROUTINE:
# i.e.: Check if we're stepping into the proper context.
f = frame
while f is not None:
if self._is_same_frame(stop_frame, f):
break
f = f.f_back
else:
stop = False
if plugin_manager is not None:
result = plugin_manager.cmd_step_into(main_debugger, frame, event, self._args, stop_info, stop)
if result:
stop, plugin_stop = result
elif step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE):
# Note: when dealing with a step over my code it's the same as a step over (the
# difference is that when we return from a frame in one we go to regular step
# into and in the other we go to a step into my code).
stop = self._is_same_frame(stop_frame, frame) and is_line
# Note: don't stop on a return for step over, only for line events
# i.e.: don't stop in: (stop_frame is frame.f_back and is_return) as we'd stop twice in that line.
if plugin_manager is not None:
result = plugin_manager.cmd_step_over(main_debugger, frame, event, self._args, stop_info, stop)
if result:
stop, plugin_stop = result
elif step_cmd == CMD_SMART_STEP_INTO:
stop = False
back = frame.f_back
if self._is_same_frame(stop_frame, frame) and is_return:
# We're exiting the smart step into initial frame (so, we probably didn't find our target).
stop = True
elif self._is_same_frame(stop_frame, back) and is_line:
if info.pydev_smart_child_offset != -1:
# i.e.: in this case, we're not interested in the pause in the parent, rather
# we're interested in the pause in the child (when the parent is at the proper place).
stop = False
else:
pydev_smart_parent_offset = info.pydev_smart_parent_offset
pydev_smart_step_into_variants = info.pydev_smart_step_into_variants
if pydev_smart_parent_offset >= 0 and pydev_smart_step_into_variants:
# Preferred mode (when the smart step into variants are available
# and the offset is set).
stop = get_smart_step_into_variant_from_frame_offset(back.f_lasti, pydev_smart_step_into_variants) is \
get_smart_step_into_variant_from_frame_offset(pydev_smart_parent_offset, pydev_smart_step_into_variants)
else:
# Only the name/line is available, so, check that.
curr_func_name = frame.f_code.co_name
# global context is set with an empty name
if curr_func_name in ('?', '<module>') or curr_func_name is None:
curr_func_name = ''
if curr_func_name == info.pydev_func_name and stop_frame.f_lineno == info.pydev_next_line:
stop = True
if not stop:
# In smart step into, if we didn't hit it in this frame once, that'll
# not be the case next time either, so, disable tracing for this frame.
return None if is_call else NO_FTRACE
elif back is not None and self._is_same_frame(stop_frame, back.f_back) and is_line:
# Ok, we have to track 2 stops at this point, the parent and the child offset.
# This happens when handling a step into which targets a function inside a list comprehension
# or generator (in which case an intermediary frame is created due to an internal function call).
pydev_smart_parent_offset = info.pydev_smart_parent_offset
pydev_smart_child_offset = info.pydev_smart_child_offset
# print('matched back frame', pydev_smart_parent_offset, pydev_smart_child_offset)
# print('parent f_lasti', back.f_back.f_lasti)
# print('child f_lasti', back.f_lasti)
stop = False
if pydev_smart_child_offset >= 0 and pydev_smart_child_offset >= 0:
pydev_smart_step_into_variants = info.pydev_smart_step_into_variants
if pydev_smart_parent_offset >= 0 and pydev_smart_step_into_variants:
# Note that we don't really check the parent offset, only the offset of
# the child (because this is a generator, the parent may have moved forward
# already -- and that's ok, so, we just check that the parent frame
# matches in this case).
smart_step_into_variant = get_smart_step_into_variant_from_frame_offset(pydev_smart_parent_offset, pydev_smart_step_into_variants)
# print('matched parent offset', pydev_smart_parent_offset)
# Ok, now, check the child variant
children_variants = smart_step_into_variant.children_variants
stop = children_variants and (
get_smart_step_into_variant_from_frame_offset(back.f_lasti, children_variants) is \
get_smart_step_into_variant_from_frame_offset(pydev_smart_child_offset, children_variants)
)
# print('stop at child', stop)
if not stop:
# In smart step into, if we didn't hit it in this frame once, that'll
# not be the case next time either, so, disable tracing for this frame.
return None if is_call else NO_FTRACE
elif step_cmd in (CMD_STEP_RETURN, CMD_STEP_RETURN_MY_CODE):
stop = is_return and self._is_same_frame(stop_frame, frame)
else:
stop = False
if stop and step_cmd != -1 and is_return and hasattr(frame, "f_back"):
f_code = getattr(frame.f_back, 'f_code', None)
if f_code is not None:
if main_debugger.get_file_type(frame.f_back) == main_debugger.PYDEV_FILE:
stop = False
if plugin_stop:
stopped_on_plugin = plugin_manager.stop(main_debugger, frame, event, self._args, stop_info, arg, step_cmd)
elif stop:
if is_line:
self.set_suspend(thread, step_cmd, original_step_cmd=info.pydev_original_step_cmd)
self.do_wait_suspend(thread, frame, event, arg)
elif is_return: # return event
back = frame.f_back
if back is not None:
# When we get to the pydevd run function, the debugging has actually finished for the main thread
# (note that it can still go on for other threads, but for this one, we just make it finish)
# So, just setting it to None should be OK
back_absolute_filename, _, base = get_abs_path_real_path_and_base_from_frame(back)
if (base, back.f_code.co_name) in (DEBUG_START, DEBUG_START_PY3K):
back = None
elif base == TRACE_PROPERTY:
# We dont want to trace the return event of pydevd_traceproperty (custom property for debugging)
# if we're in a return, we want it to appear to the user in the previous frame!
return None if is_call else NO_FTRACE
elif pydevd_dont_trace.should_trace_hook is not None:
if not pydevd_dont_trace.should_trace_hook(back, back_absolute_filename):
# In this case, we'll have to skip the previous one because it shouldn't be traced.
# Also, we have to reset the tracing, because if the parent's parent (or some
# other parent) has to be traced and it's not currently, we wouldn't stop where
# we should anymore (so, a step in/over/return may not stop anywhere if no parent is traced).
# Related test: _debugger_case17a.py
main_debugger.set_trace_for_frame_and_parents(back)
return None if is_call else NO_FTRACE
if back is not None:
# if we're in a return, we want it to appear to the user in the previous frame!
self.set_suspend(thread, step_cmd, original_step_cmd=info.pydev_original_step_cmd)
self.do_wait_suspend(thread, back, event, arg)
else:
# in jython we may not have a back frame
info.pydev_step_stop = None
info.pydev_original_step_cmd = -1
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
except KeyboardInterrupt:
raise
except:
try:
pydev_log.exception()
info.pydev_original_step_cmd = -1
info.pydev_step_cmd = -1
info.pydev_step_stop = None
except:
return None if is_call else NO_FTRACE
# if we are quitting, let's stop the tracing
if main_debugger.quitting:
return None if is_call else NO_FTRACE
return self.trace_dispatch
finally:
info.is_tracing -= 1
# end trace_dispatch
| 63,187 | Python | 49.958064 | 216 | 0.507051 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_thread_lifecycle.py | from _pydevd_bundle import pydevd_utils
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_comm_constants import CMD_STEP_INTO, CMD_THREAD_SUSPEND
from _pydevd_bundle.pydevd_constants import PYTHON_SUSPEND, STATE_SUSPEND, get_thread_id, STATE_RUN
from _pydev_bundle._pydev_saved_modules import threading
from _pydev_bundle import pydev_log
def pydevd_find_thread_by_id(thread_id):
try:
threads = threading.enumerate()
for i in threads:
tid = get_thread_id(i)
if thread_id == tid or thread_id.endswith('|' + tid):
return i
# This can happen when a request comes for a thread which was previously removed.
pydev_log.info("Could not find thread %s.", thread_id)
pydev_log.info("Available: %s.", ([get_thread_id(t) for t in threads],))
except:
pydev_log.exception()
return None
def mark_thread_suspended(thread, stop_reason, original_step_cmd=-1):
info = set_additional_thread_info(thread)
info.suspend_type = PYTHON_SUSPEND
if original_step_cmd != -1:
stop_reason = original_step_cmd
thread.stop_reason = stop_reason
# Note: don't set the 'pydev_original_step_cmd' here if unset.
if info.pydev_step_cmd == -1:
# If the step command is not specified, set it to step into
# to make sure it'll break as soon as possible.
info.pydev_step_cmd = CMD_STEP_INTO
info.pydev_step_stop = None
# Mark as suspended as the last thing.
info.pydev_state = STATE_SUSPEND
return info
def internal_run_thread(thread, set_additional_thread_info):
info = set_additional_thread_info(thread)
info.pydev_original_step_cmd = -1
info.pydev_step_cmd = -1
info.pydev_step_stop = None
info.pydev_state = STATE_RUN
def resume_threads(thread_id, except_thread=None):
pydev_log.info('Resuming threads: %s (except thread: %s)', thread_id, except_thread)
threads = []
if thread_id == '*':
threads = pydevd_utils.get_non_pydevd_threads()
elif thread_id.startswith('__frame__:'):
pydev_log.critical("Can't make tasklet run: %s", thread_id)
else:
threads = [pydevd_find_thread_by_id(thread_id)]
for t in threads:
if t is None or t is except_thread:
pydev_log.info('Skipped resuming thread: %s', t)
continue
internal_run_thread(t, set_additional_thread_info=set_additional_thread_info)
def suspend_all_threads(py_db, except_thread):
'''
Suspend all except the one passed as a parameter.
:param except_thread:
'''
pydev_log.info('Suspending all threads except: %s', except_thread)
all_threads = pydevd_utils.get_non_pydevd_threads()
for t in all_threads:
if getattr(t, 'pydev_do_not_trace', None):
pass # skip some other threads, i.e. ipython history saving thread from debug console
else:
if t is except_thread:
continue
info = mark_thread_suspended(t, CMD_THREAD_SUSPEND)
frame = info.get_topmost_frame(t)
# Reset the tracing as in this case as it could've set scopes to be untraced.
if frame is not None:
try:
py_db.set_trace_for_frame_and_parents(frame)
finally:
frame = None
| 3,408 | Python | 34.14433 | 99 | 0.639378 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_breakpoints.py | from _pydev_bundle import pydev_log
from _pydevd_bundle import pydevd_import_class
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame
from _pydev_bundle._pydev_saved_modules import threading
class ExceptionBreakpoint(object):
def __init__(
self,
qname,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries
):
exctype = get_exception_class(qname)
self.qname = qname
if exctype is not None:
self.name = exctype.__name__
else:
self.name = None
self.condition = condition
self.expression = expression
self.notify_on_unhandled_exceptions = notify_on_unhandled_exceptions
self.notify_on_handled_exceptions = notify_on_handled_exceptions
self.notify_on_first_raise_only = notify_on_first_raise_only
self.notify_on_user_unhandled_exceptions = notify_on_user_unhandled_exceptions
self.ignore_libraries = ignore_libraries
self.type = exctype
def __str__(self):
return self.qname
@property
def has_condition(self):
return self.condition is not None
def handle_hit_condition(self, frame):
return False
class LineBreakpoint(object):
def __init__(self, breakpoint_id, line, condition, func_name, expression, suspend_policy="NONE", hit_condition=None, is_logpoint=False):
self.breakpoint_id = breakpoint_id
self.line = line
self.condition = condition
self.func_name = func_name
self.expression = expression
self.suspend_policy = suspend_policy
self.hit_condition = hit_condition
self._hit_count = 0
self._hit_condition_lock = threading.Lock()
self.is_logpoint = is_logpoint
@property
def has_condition(self):
return bool(self.condition) or bool(self.hit_condition)
def handle_hit_condition(self, frame):
if not self.hit_condition:
return False
ret = False
with self._hit_condition_lock:
self._hit_count += 1
expr = self.hit_condition.replace('@HIT@', str(self._hit_count))
try:
ret = bool(eval(expr, frame.f_globals, frame.f_locals))
except Exception:
ret = False
return ret
class FunctionBreakpoint(object):
def __init__(self, func_name, condition, expression, suspend_policy="NONE", hit_condition=None, is_logpoint=False):
self.condition = condition
self.func_name = func_name
self.expression = expression
self.suspend_policy = suspend_policy
self.hit_condition = hit_condition
self._hit_count = 0
self._hit_condition_lock = threading.Lock()
self.is_logpoint = is_logpoint
@property
def has_condition(self):
return bool(self.condition) or bool(self.hit_condition)
def handle_hit_condition(self, frame):
if not self.hit_condition:
return False
ret = False
with self._hit_condition_lock:
self._hit_count += 1
expr = self.hit_condition.replace('@HIT@', str(self._hit_count))
try:
ret = bool(eval(expr, frame.f_globals, frame.f_locals))
except Exception:
ret = False
return ret
def get_exception_breakpoint(exctype, exceptions):
if not exctype:
exception_full_qname = None
else:
exception_full_qname = str(exctype.__module__) + '.' + exctype.__name__
exc = None
if exceptions is not None:
try:
return exceptions[exception_full_qname]
except KeyError:
for exception_breakpoint in exceptions.values():
if exception_breakpoint.type is not None and issubclass(exctype, exception_breakpoint.type):
if exc is None or issubclass(exception_breakpoint.type, exc.type):
exc = exception_breakpoint
return exc
def stop_on_unhandled_exception(py_db, thread, additional_info, arg):
exctype, value, tb = arg
break_on_uncaught_exceptions = py_db.break_on_uncaught_exceptions
if break_on_uncaught_exceptions:
exception_breakpoint = py_db.get_exception_breakpoint(exctype, break_on_uncaught_exceptions)
else:
exception_breakpoint = None
if not exception_breakpoint:
return
if tb is None: # sometimes it can be None, e.g. with GTK
return
if exctype is KeyboardInterrupt:
return
if exctype is SystemExit and py_db.ignore_system_exit_code(value):
return
frames = []
user_frame = None
while tb is not None:
if not py_db.exclude_exception_by_filter(exception_breakpoint, tb):
user_frame = tb.tb_frame
frames.append(tb.tb_frame)
tb = tb.tb_next
if user_frame is None:
return
frames_byid = dict([(id(frame), frame) for frame in frames])
add_exception_to_frame(user_frame, arg)
if exception_breakpoint.condition is not None:
eval_result = py_db.handle_breakpoint_condition(additional_info, exception_breakpoint, user_frame)
if not eval_result:
return
if exception_breakpoint.expression is not None:
py_db.handle_breakpoint_expression(exception_breakpoint, additional_info, user_frame)
try:
additional_info.pydev_message = exception_breakpoint.qname
except:
additional_info.pydev_message = exception_breakpoint.qname.encode('utf-8')
pydev_log.debug('Handling post-mortem stop on exception breakpoint %s' % (exception_breakpoint.qname,))
py_db.do_stop_on_unhandled_exception(thread, user_frame, frames_byid, arg)
def get_exception_class(kls):
try:
return eval(kls)
except:
return pydevd_import_class.import_name(kls)
| 6,010 | Python | 31.491892 | 140 | 0.635275 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_trace_dispatch.py | # Defines which version of the trace_dispatch we'll use.
# Should give warning only here if cython is not available but supported.
import os
from _pydevd_bundle.pydevd_constants import USE_CYTHON_FLAG, ENV_TRUE_LOWER_VALUES, \
ENV_FALSE_LOWER_VALUES
from _pydev_bundle import pydev_log
dirname = os.path.dirname(os.path.dirname(__file__))
USING_CYTHON = False
def delete_old_compiled_extensions():
import _pydevd_bundle
cython_extensions_dir = os.path.dirname(os.path.dirname(_pydevd_bundle.__file__))
_pydevd_bundle_ext_dir = os.path.dirname(_pydevd_bundle.__file__)
_pydevd_frame_eval_ext_dir = os.path.join(cython_extensions_dir, '_pydevd_frame_eval_ext')
try:
import shutil
for file in os.listdir(_pydevd_bundle_ext_dir):
if file.startswith("pydevd") and file.endswith(".so"):
os.remove(os.path.join(_pydevd_bundle_ext_dir, file))
for file in os.listdir(_pydevd_frame_eval_ext_dir):
if file.startswith("pydevd") and file.endswith(".so"):
os.remove(os.path.join(_pydevd_frame_eval_ext_dir, file))
build_dir = os.path.join(cython_extensions_dir, "build")
if os.path.exists(build_dir):
shutil.rmtree(os.path.join(cython_extensions_dir, "build"))
except OSError:
pydev_log.error_once("warning: failed to delete old cython speedups. Please delete all *.so files from the directories "
"\"%s\" and \"%s\"" % (_pydevd_bundle_ext_dir, _pydevd_frame_eval_ext_dir))
if USE_CYTHON_FLAG in ENV_TRUE_LOWER_VALUES:
# We must import the cython version if forcing cython
from _pydevd_bundle.pydevd_cython_wrapper import trace_dispatch, global_cache_skips, global_cache_frame_skips, fix_top_level_trace_and_get_trace_func
USING_CYTHON = True
elif USE_CYTHON_FLAG in ENV_FALSE_LOWER_VALUES:
# Use the regular version if not forcing cython
from _pydevd_bundle.pydevd_trace_dispatch_regular import trace_dispatch, global_cache_skips, global_cache_frame_skips, fix_top_level_trace_and_get_trace_func # @UnusedImport
else:
# Regular: use fallback if not found and give message to user
try:
from _pydevd_bundle.pydevd_cython_wrapper import trace_dispatch, global_cache_skips, global_cache_frame_skips, fix_top_level_trace_and_get_trace_func
# This version number is always available
from _pydevd_bundle.pydevd_additional_thread_info_regular import version as regular_version
# This version number from the already compiled cython extension
from _pydevd_bundle.pydevd_cython_wrapper import version as cython_version
if cython_version != regular_version:
# delete_old_compiled_extensions() -- would be ok in dev mode but we don't want to erase
# files from other python versions on release, so, just raise import error here.
raise ImportError('Cython version of speedups does not match.')
else:
USING_CYTHON = True
except ImportError:
from _pydevd_bundle.pydevd_trace_dispatch_regular import trace_dispatch, global_cache_skips, global_cache_frame_skips, fix_top_level_trace_and_get_trace_func # @UnusedImport
pydev_log.show_compile_cython_command_line()
| 3,265 | Python | 50.841269 | 182 | 0.69709 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_import_class.py | #Note: code gotten from _pydev_imports_tipper.
import sys
def _imp(name, log=None):
try:
return __import__(name)
except:
if '.' in name:
sub = name[0:name.rfind('.')]
if log is not None:
log.add_content('Unable to import', name, 'trying with', sub)
log.add_exception()
return _imp(sub, log)
else:
s = 'Unable to import module: %s - sys.path: %s' % (str(name), sys.path)
if log is not None:
log.add_content(s)
log.add_exception()
raise ImportError(s)
IS_IPY = False
if sys.platform == 'cli':
IS_IPY = True
_old_imp = _imp
def _imp(name, log=None):
#We must add a reference in clr for .Net
import clr #@UnresolvedImport
initial_name = name
while '.' in name:
try:
clr.AddReference(name)
break #If it worked, that's OK.
except:
name = name[0:name.rfind('.')]
else:
try:
clr.AddReference(name)
except:
pass #That's OK (not dot net module).
return _old_imp(initial_name, log)
def import_name(name, log=None):
mod = _imp(name, log)
components = name.split('.')
old_comp = None
for comp in components[1:]:
try:
#this happens in the following case:
#we have mx.DateTime.mxDateTime.mxDateTime.pyd
#but after importing it, mx.DateTime.mxDateTime shadows access to mxDateTime.pyd
mod = getattr(mod, comp)
except AttributeError:
if old_comp != comp:
raise
old_comp = comp
return mod
| 1,838 | Python | 25.652174 | 92 | 0.490751 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_trace_api.py | def add_line_breakpoint(plugin, pydb, type, canonical_normalized_filename, breakpoint_id, line, condition, expression, func_name, hit_condition=None, is_logpoint=False, add_breakpoint_result=None, on_changed_breakpoint_state=None):
return None
def after_breakpoints_consolidated(py_db, canonical_normalized_filename, id_to_pybreakpoint, file_to_line_to_breakpoints):
return None
def add_exception_breakpoint(plugin, pydb, type, exception):
return False
def remove_exception_breakpoint(plugin, pydb, type, exception):
return False
def remove_all_exception_breakpoints(plugin, pydb):
return False
def get_breakpoints(plugin, pydb):
return None
def can_skip(plugin, pydb, frame):
return True
def has_exception_breaks(plugin):
return False
def has_line_breaks(plugin):
return False
def cmd_step_into(plugin, pydb, frame, event, args, stop_info, stop):
return False
def cmd_step_over(plugin, pydb, frame, event, args, stop_info, stop):
return False
def stop(plugin, pydb, frame, event, args, stop_info, arg, step_cmd):
return False
def get_breakpoint(plugin, pydb, pydb_frame, frame, event, args):
return None
def suspend(plugin, pydb, thread, frame):
return None
def exception_break(plugin, pydb, pydb_frame, frame, args, arg):
return None
def change_variable(plugin, frame, attr, expression):
return False
| 1,397 | Python | 21.190476 | 231 | 0.727989 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_comm.py | ''' pydevd - a debugging daemon
This is the daemon you launch for python remote debugging.
Protocol:
each command has a format:
id\tsequence-num\ttext
id: protocol command number
sequence-num: each request has a sequence number. Sequence numbers
originating at the debugger are odd, sequence numbers originating
at the daemon are even. Every response uses the same sequence number
as the request.
payload: it is protocol dependent. When response is a complex structure, it
is returned as XML. Each attribute value is urlencoded, and then the whole
payload is urlencoded again to prevent stray characters corrupting protocol/xml encodings
Commands:
NUMBER NAME FROM* ARGUMENTS RESPONSE NOTE
100 series: program execution
101 RUN JAVA - -
102 LIST_THREADS JAVA RETURN with XML listing of all threads
103 THREAD_CREATE PYDB - XML with thread information
104 THREAD_KILL JAVA id (or * to exit) kills the thread
PYDB id nofies JAVA that thread was killed
105 THREAD_SUSPEND JAVA XML of the stack, suspends the thread
reason for suspension
PYDB id notifies JAVA that thread was suspended
106 CMD_THREAD_RUN JAVA id resume the thread
PYDB id \t reason notifies JAVA that thread was resumed
107 STEP_INTO JAVA thread_id
108 STEP_OVER JAVA thread_id
109 STEP_RETURN JAVA thread_id
110 GET_VARIABLE JAVA thread_id \t frame_id \t GET_VARIABLE with XML of var content
FRAME|GLOBAL \t attributes*
111 SET_BREAK JAVA file/line of the breakpoint
112 REMOVE_BREAK JAVA file/line of the return
113 CMD_EVALUATE_EXPRESSION JAVA expression result of evaluating the expression
114 CMD_GET_FRAME JAVA request for frame contents
115 CMD_EXEC_EXPRESSION JAVA
116 CMD_WRITE_TO_CONSOLE PYDB
117 CMD_CHANGE_VARIABLE
118 CMD_RUN_TO_LINE
119 CMD_RELOAD_CODE
120 CMD_GET_COMPLETIONS JAVA
200 CMD_REDIRECT_OUTPUT JAVA streams to redirect as string -
'STDOUT' (redirect only STDOUT)
'STDERR' (redirect only STDERR)
'STDOUT STDERR' (redirect both streams)
500 series diagnostics/ok
501 VERSION either Version string (1.0) Currently just used at startup
502 RETURN either Depends on caller -
900 series: errors
901 ERROR either - This is reserved for unexpected errors.
* JAVA - remote debugger, the java end
* PYDB - pydevd, the python end
'''
import linecache
import os
from _pydev_bundle.pydev_imports import _queue
from _pydev_bundle._pydev_saved_modules import time
from _pydev_bundle._pydev_saved_modules import threading
from _pydev_bundle._pydev_saved_modules import socket as socket_module
from _pydevd_bundle.pydevd_constants import (DebugInfoHolder, IS_WINDOWS, IS_JYTHON,
IS_PY36_OR_GREATER, STATE_RUN, ASYNC_EVAL_TIMEOUT_SEC,
get_global_debugger, GetGlobalDebugger, set_global_debugger, # Keep for backward compatibility @UnusedImport
silence_warnings_decorator, filter_all_warnings)
from _pydev_bundle.pydev_override import overrides
import weakref
from _pydev_bundle._pydev_completer import extract_token_and_qualifier
from _pydevd_bundle._debug_adapter.pydevd_schema import VariablesResponseBody, \
SetVariableResponseBody, StepInTarget, StepInTargetsResponseBody
from _pydevd_bundle._debug_adapter import pydevd_base_schema, pydevd_schema
from _pydevd_bundle.pydevd_net_command import NetCommand
from _pydevd_bundle.pydevd_xml import ExceptionOnEvaluate
from _pydevd_bundle.pydevd_constants import ForkSafeLock, NULL
from _pydevd_bundle.pydevd_daemon_thread import PyDBDaemonThread
from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id, resume_threads
from _pydevd_bundle.pydevd_dont_trace_files import PYDEV_FILE
import dis
from _pydevd_bundle.pydevd_frame_utils import create_frames_list_from_exception_cause
import pydevd_file_utils
import itertools
from urllib.parse import quote_plus, unquote_plus
import pydevconsole
from _pydevd_bundle import pydevd_vars, pydevd_io, pydevd_reload
from _pydevd_bundle import pydevd_bytecode_utils
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle import pydevd_vm_type
import sys
import traceback
from _pydevd_bundle.pydevd_utils import quote_smart as quote, compare_object_attrs_key, \
notify_about_gevent_if_needed, isinstance_checked, ScopeRequest, getattr_checked, Timer
from _pydev_bundle import pydev_log, fsnotify
from _pydev_bundle.pydev_log import exception as pydev_log_exception
from _pydev_bundle import _pydev_completer
from pydevd_tracing import get_exception_traceback_str
from _pydevd_bundle import pydevd_console
from _pydev_bundle.pydev_monkey import disable_trace_thread_modules, enable_trace_thread_modules
from io import StringIO
# CMD_XXX constants imported for backward compatibility
from _pydevd_bundle.pydevd_comm_constants import * # @UnusedWildImport
# Socket import aliases:
AF_INET, SOCK_STREAM, SHUT_WR, SOL_SOCKET, SO_REUSEADDR, IPPROTO_TCP, socket = (
socket_module.AF_INET,
socket_module.SOCK_STREAM,
socket_module.SHUT_WR,
socket_module.SOL_SOCKET,
socket_module.SO_REUSEADDR,
socket_module.IPPROTO_TCP,
socket_module.socket,
)
if IS_WINDOWS and not IS_JYTHON:
SO_EXCLUSIVEADDRUSE = socket_module.SO_EXCLUSIVEADDRUSE
class ReaderThread(PyDBDaemonThread):
''' reader thread reads and dispatches commands in an infinite loop '''
def __init__(self, sock, py_db, PyDevJsonCommandProcessor, process_net_command, terminate_on_socket_close=True):
assert sock is not None
PyDBDaemonThread.__init__(self, py_db)
self.__terminate_on_socket_close = terminate_on_socket_close
self.sock = sock
self._buffer = b''
self.name = "pydevd.Reader"
self.process_net_command = process_net_command
self.process_net_command_json = PyDevJsonCommandProcessor(self._from_json).process_net_command_json
def _from_json(self, json_msg, update_ids_from_dap=False):
return pydevd_base_schema.from_json(json_msg, update_ids_from_dap, on_dict_loaded=self._on_dict_loaded)
def _on_dict_loaded(self, dct):
for listener in self.py_db.dap_messages_listeners:
listener.after_receive(dct)
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
# Note that we no longer shutdown the reader, just the writer. The idea is that we shutdown
# the writer to send that the communication has finished, then, the client will shutdown its
# own writer when it receives an empty read, at which point this reader will also shutdown.
# That way, we can *almost* guarantee that all messages have been properly sent -- it's not
# completely guaranteed because it's possible that the process exits before the whole
# message was sent as having this thread alive won't stop the process from exiting -- we
# have a timeout when exiting the process waiting for this thread to finish -- see:
# PyDB.dispose_and_kill_all_pydevd_threads()).
# try:
# self.sock.shutdown(SHUT_RD)
# except:
# pass
# try:
# self.sock.close()
# except:
# pass
def _read(self, size):
while True:
buffer_len = len(self._buffer)
if buffer_len == size:
ret = self._buffer
self._buffer = b''
return ret
if buffer_len > size:
ret = self._buffer[:size]
self._buffer = self._buffer[size:]
return ret
try:
r = self.sock.recv(max(size - buffer_len, 1024))
except OSError:
return b''
if not r:
return b''
self._buffer += r
def _read_line(self):
while True:
i = self._buffer.find(b'\n')
if i != -1:
i += 1 # Add the newline to the return
ret = self._buffer[:i]
self._buffer = self._buffer[i:]
return ret
else:
try:
r = self.sock.recv(1024)
except OSError:
return b''
if not r:
return b''
self._buffer += r
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
try:
content_len = -1
while True:
# i.e.: even if we received a kill, we should only exit the ReaderThread when the
# client itself closes the connection (although on kill received we stop actually
# processing anything read).
try:
notify_about_gevent_if_needed()
line = self._read_line()
if len(line) == 0:
pydev_log.debug('ReaderThread: empty contents received (len(line) == 0).')
self._terminate_on_socket_close()
return # Finished communication.
if self._kill_received:
continue
if line.startswith(b'Content-Length:'):
content_len = int(line.strip().split(b':', 1)[1])
continue
if content_len != -1:
# If we previously received a content length, read until a '\r\n'.
if line == b'\r\n':
json_contents = self._read(content_len)
content_len = -1
if len(json_contents) == 0:
pydev_log.debug('ReaderThread: empty contents received (len(json_contents) == 0).')
self._terminate_on_socket_close()
return # Finished communication.
if self._kill_received:
continue
# We just received a json message, let's process it.
self.process_net_command_json(self.py_db, json_contents)
continue
else:
# No content len, regular line-based protocol message (remove trailing new-line).
if line.endswith(b'\n\n'):
line = line[:-2]
elif line.endswith(b'\n'):
line = line[:-1]
elif line.endswith(b'\r'):
line = line[:-1]
except:
if not self._kill_received:
pydev_log_exception()
self._terminate_on_socket_close()
return # Finished communication.
# Note: the java backend is always expected to pass utf-8 encoded strings. We now work with str
# internally and thus, we may need to convert to the actual encoding where needed (i.e.: filenames
# on python 2 may need to be converted to the filesystem encoding).
if hasattr(line, 'decode'):
line = line.decode('utf-8')
if DebugInfoHolder.DEBUG_RECORD_SOCKET_READS:
pydev_log.critical(u'debugger: received >>%s<<\n' % (line,))
args = line.split(u'\t', 2)
try:
cmd_id = int(args[0])
pydev_log.debug('Received command: %s %s\n' % (ID_TO_MEANING.get(str(cmd_id), '???'), line,))
self.process_command(cmd_id, int(args[1]), args[2])
except:
if sys is not None and pydev_log_exception is not None: # Could happen at interpreter shutdown
pydev_log_exception("Can't process net command: %s.", line)
except:
if not self._kill_received:
if sys is not None and pydev_log_exception is not None: # Could happen at interpreter shutdown
pydev_log_exception()
self._terminate_on_socket_close()
finally:
pydev_log.debug('ReaderThread: exit')
def _terminate_on_socket_close(self):
if self.__terminate_on_socket_close:
self.py_db.dispose_and_kill_all_pydevd_threads()
def process_command(self, cmd_id, seq, text):
self.process_net_command(self.py_db, cmd_id, seq, text)
class FSNotifyThread(PyDBDaemonThread):
def __init__(self, py_db, api, watch_dirs):
PyDBDaemonThread.__init__(self, py_db)
self.api = api
self.name = "pydevd.FSNotifyThread"
self.watcher = fsnotify.Watcher()
self.watch_dirs = watch_dirs
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
try:
pydev_log.info('Watching directories for code reload:\n---\n%s\n---' % ('\n'.join(sorted(self.watch_dirs))))
# i.e.: The first call to set_tracked_paths will do a full scan, so, do it in the thread
# too (after everything is configured).
self.watcher.set_tracked_paths(self.watch_dirs)
while not self._kill_received:
for change_enum, change_path in self.watcher.iter_changes():
# We're only interested in modified events
if change_enum == fsnotify.Change.modified:
pydev_log.info('Modified: %s', change_path)
self.api.request_reload_code(self.py_db, -1, None, change_path)
else:
pydev_log.info('Ignored (add or remove) change in: %s', change_path)
except:
pydev_log.exception('Error when waiting for filesystem changes in FSNotifyThread.')
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
self.watcher.dispose()
PyDBDaemonThread.do_kill_pydev_thread(self)
class WriterThread(PyDBDaemonThread):
''' writer thread writes out the commands in an infinite loop '''
def __init__(self, sock, py_db, terminate_on_socket_close=True):
PyDBDaemonThread.__init__(self, py_db)
self.sock = sock
self.__terminate_on_socket_close = terminate_on_socket_close
self.name = "pydevd.Writer"
self._cmd_queue = _queue.Queue()
if pydevd_vm_type.get_vm_type() == 'python':
self.timeout = 0
else:
self.timeout = 0.1
def add_command(self, cmd):
''' cmd is NetCommand '''
if not self._kill_received: # we don't take new data after everybody die
self._cmd_queue.put(cmd, False)
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
''' just loop and write responses '''
try:
while True:
try:
try:
cmd = self._cmd_queue.get(True, 0.1)
except _queue.Empty:
if self._kill_received:
pydev_log.debug('WriterThread: kill_received (sock.shutdown(SHUT_WR))')
try:
self.sock.shutdown(SHUT_WR)
except:
pass
# Note: don't close the socket, just send the shutdown,
# then, when no data is received on the reader, it can close
# the socket.
# See: https://blog.netherlabs.nl/articles/2009/01/18/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable
# try:
# self.sock.close()
# except:
# pass
return # break if queue is empty and _kill_received
else:
continue
except:
# pydev_log.info('Finishing debug communication...(1)')
# when liberating the thread here, we could have errors because we were shutting down
# but the thread was still not liberated
return
if cmd.as_dict is not None:
for listener in self.py_db.dap_messages_listeners:
listener.before_send(cmd.as_dict)
notify_about_gevent_if_needed()
cmd.send(self.sock)
if cmd.id == CMD_EXIT:
pydev_log.debug('WriterThread: CMD_EXIT received')
break
if time is None:
break # interpreter shutdown
time.sleep(self.timeout)
except Exception:
if self.__terminate_on_socket_close:
self.py_db.dispose_and_kill_all_pydevd_threads()
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 0:
pydev_log_exception()
finally:
pydev_log.debug('WriterThread: exit')
def empty(self):
return self._cmd_queue.empty()
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
if not self._kill_received:
# Add command before setting the kill flag (otherwise the command may not be added).
exit_cmd = self.py_db.cmd_factory.make_exit_command(self.py_db)
self.add_command(exit_cmd)
PyDBDaemonThread.do_kill_pydev_thread(self)
def create_server_socket(host, port):
try:
server = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)
if IS_WINDOWS and not IS_JYTHON:
server.setsockopt(SOL_SOCKET, SO_EXCLUSIVEADDRUSE, 1)
else:
server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
server.bind((host, port))
server.settimeout(None)
except Exception:
server.close()
raise
return server
def start_server(port):
''' binds to a port, waits for the debugger to connect '''
s = create_server_socket(host='', port=port)
try:
s.listen(1)
new_socket, _addr = s.accept()
pydev_log.info("Connection accepted")
# closing server socket is not necessary but we don't need it
s.close()
return new_socket
except:
pydev_log.exception("Could not bind to port: %s\n", port)
raise
def start_client(host, port):
''' connects to a host/port '''
pydev_log.info("Connecting to %s:%s", host, port)
s = socket(AF_INET, SOCK_STREAM)
# Set TCP keepalive on an open socket.
# It activates after 1 second (TCP_KEEPIDLE,) of idleness,
# then sends a keepalive ping once every 3 seconds (TCP_KEEPINTVL),
# and closes the connection after 5 failed ping (TCP_KEEPCNT), or 15 seconds
try:
s.setsockopt(SOL_SOCKET, socket_module.SO_KEEPALIVE, 1)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
s.setsockopt(socket_module.IPPROTO_TCP, socket_module.TCP_KEEPIDLE, 1)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
s.setsockopt(socket_module.IPPROTO_TCP, socket_module.TCP_KEEPINTVL, 3)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
s.setsockopt(socket_module.IPPROTO_TCP, socket_module.TCP_KEEPCNT, 5)
except (AttributeError, OSError):
pass # May not be available everywhere.
try:
# 10 seconds default timeout
timeout = int(os.environ.get('PYDEVD_CONNECT_TIMEOUT', 10))
s.settimeout(timeout)
s.connect((host, port))
s.settimeout(None) # no timeout after connected
pydev_log.info("Connected.")
return s
except:
pydev_log.exception("Could not connect to %s: %s", host, port)
raise
INTERNAL_TERMINATE_THREAD = 1
INTERNAL_SUSPEND_THREAD = 2
class InternalThreadCommand(object):
''' internal commands are generated/executed by the debugger.
The reason for their existence is that some commands have to be executed
on specific threads. These are the InternalThreadCommands that get
get posted to PyDB.
'''
def __init__(self, thread_id, method=None, *args, **kwargs):
self.thread_id = thread_id
self.method = method
self.args = args
self.kwargs = kwargs
def can_be_executed_by(self, thread_id):
'''By default, it must be in the same thread to be executed
'''
return self.thread_id == thread_id or self.thread_id.endswith('|' + thread_id)
def do_it(self, dbg):
try:
if self.method is not None:
self.method(dbg, *self.args, **self.kwargs)
else:
raise NotImplementedError("you have to override do_it")
finally:
self.args = None
self.kwargs = None
def __str__(self):
return 'InternalThreadCommands(%s, %s, %s)' % (self.method, self.args, self.kwargs)
__repr__ = __str__
class InternalThreadCommandForAnyThread(InternalThreadCommand):
def __init__(self, thread_id, method=None, *args, **kwargs):
assert thread_id == '*'
InternalThreadCommand.__init__(self, thread_id, method, *args, **kwargs)
self.executed = False
self.lock = ForkSafeLock()
def can_be_executed_by(self, thread_id):
return True # Can be executed by any thread.
def do_it(self, dbg):
with self.lock:
if self.executed:
return
self.executed = True
InternalThreadCommand.do_it(self, dbg)
def _send_io_message(py_db, s):
cmd = py_db.cmd_factory.make_io_message(s, 2)
if py_db.writer is not None:
py_db.writer.add_command(cmd)
def internal_reload_code(dbg, seq, module_name, filename):
try:
found_module_to_reload = False
if module_name is not None:
module_name = module_name
if module_name not in sys.modules:
if '.' in module_name:
new_module_name = module_name.split('.')[-1]
if new_module_name in sys.modules:
module_name = new_module_name
modules_to_reload = {}
module = sys.modules.get(module_name)
if module is not None:
modules_to_reload[id(module)] = (module, module_name)
if filename:
filename = pydevd_file_utils.normcase(filename)
for module_name, module in sys.modules.copy().items():
f = getattr_checked(module, '__file__')
if f is not None:
if f.endswith(('.pyc', '.pyo')):
f = f[:-1]
if pydevd_file_utils.normcase(f) == filename:
modules_to_reload[id(module)] = (module, module_name)
if not modules_to_reload:
if filename and module_name:
_send_io_message(dbg, 'code reload: Unable to find module %s to reload for path: %s\n' % (module_name, filename))
elif filename:
_send_io_message(dbg, 'code reload: Unable to find module to reload for path: %s\n' % (filename,))
elif module_name:
_send_io_message(dbg, 'code reload: Unable to find module to reload: %s\n' % (module_name,))
else:
# Too much info...
# _send_io_message(dbg, 'code reload: This usually means you are trying to reload the __main__ module (which cannot be reloaded).\n')
for module, module_name in modules_to_reload.values():
_send_io_message(dbg, 'code reload: Start reloading module: "' + module_name + '" ... \n')
found_module_to_reload = True
if pydevd_reload.xreload(module):
_send_io_message(dbg, 'code reload: reload finished\n')
else:
_send_io_message(dbg, 'code reload: reload finished without applying any change\n')
cmd = dbg.cmd_factory.make_reloaded_code_message(seq, found_module_to_reload)
dbg.writer.add_command(cmd)
except:
pydev_log.exception('Error reloading code')
class InternalGetThreadStack(InternalThreadCommand):
'''
This command will either wait for a given thread to be paused to get its stack or will provide
it anyways after a timeout (in which case the stack will be gotten but local variables won't
be available and it'll not be possible to interact with the frame as it's not actually
stopped in a breakpoint).
'''
def __init__(self, seq, thread_id, py_db, set_additional_thread_info, fmt, timeout=.5, start_frame=0, levels=0):
InternalThreadCommand.__init__(self, thread_id)
self._py_db = weakref.ref(py_db)
self._timeout = time.time() + timeout
self.seq = seq
self._cmd = None
self._fmt = fmt
self._start_frame = start_frame
self._levels = levels
# Note: receives set_additional_thread_info to avoid a circular import
# in this module.
self._set_additional_thread_info = set_additional_thread_info
@overrides(InternalThreadCommand.can_be_executed_by)
def can_be_executed_by(self, _thread_id):
timed_out = time.time() >= self._timeout
py_db = self._py_db()
t = pydevd_find_thread_by_id(self.thread_id)
frame = None
if t and not getattr(t, 'pydev_do_not_trace', None):
additional_info = self._set_additional_thread_info(t)
frame = additional_info.get_topmost_frame(t)
try:
self._cmd = py_db.cmd_factory.make_get_thread_stack_message(
py_db, self.seq, self.thread_id, frame, self._fmt, must_be_suspended=not timed_out, start_frame=self._start_frame, levels=self._levels)
finally:
frame = None
t = None
return self._cmd is not None or timed_out
@overrides(InternalThreadCommand.do_it)
def do_it(self, dbg):
if self._cmd is not None:
dbg.writer.add_command(self._cmd)
self._cmd = None
def internal_step_in_thread(py_db, thread_id, cmd_id, set_additional_thread_info):
thread_to_step = pydevd_find_thread_by_id(thread_id)
if thread_to_step:
info = set_additional_thread_info(thread_to_step)
info.pydev_original_step_cmd = cmd_id
info.pydev_step_cmd = cmd_id
info.pydev_step_stop = None
info.pydev_state = STATE_RUN
if py_db.stepping_resumes_all_threads:
resume_threads('*', except_thread=thread_to_step)
def internal_smart_step_into(py_db, thread_id, offset, child_offset, set_additional_thread_info):
thread_to_step = pydevd_find_thread_by_id(thread_id)
if thread_to_step:
info = set_additional_thread_info(thread_to_step)
info.pydev_original_step_cmd = CMD_SMART_STEP_INTO
info.pydev_step_cmd = CMD_SMART_STEP_INTO
info.pydev_step_stop = None
info.pydev_smart_parent_offset = int(offset)
info.pydev_smart_child_offset = int(child_offset)
info.pydev_state = STATE_RUN
if py_db.stepping_resumes_all_threads:
resume_threads('*', except_thread=thread_to_step)
class InternalSetNextStatementThread(InternalThreadCommand):
def __init__(self, thread_id, cmd_id, line, func_name, seq=0):
'''
cmd_id may actually be one of:
CMD_RUN_TO_LINE
CMD_SET_NEXT_STATEMENT
CMD_SMART_STEP_INTO
'''
self.thread_id = thread_id
self.cmd_id = cmd_id
self.line = line
self.seq = seq
self.func_name = func_name
def do_it(self, dbg):
t = pydevd_find_thread_by_id(self.thread_id)
if t:
info = t.additional_info
info.pydev_original_step_cmd = self.cmd_id
info.pydev_step_cmd = self.cmd_id
info.pydev_step_stop = None
info.pydev_next_line = int(self.line)
info.pydev_func_name = self.func_name
info.pydev_message = str(self.seq)
info.pydev_smart_parent_offset = -1
info.pydev_smart_child_offset = -1
info.pydev_state = STATE_RUN
@silence_warnings_decorator
def internal_get_variable_json(py_db, request):
'''
:param VariablesRequest request:
'''
arguments = request.arguments # : :type arguments: VariablesArguments
variables_reference = arguments.variablesReference
scope = None
if isinstance_checked(variables_reference, ScopeRequest):
scope = variables_reference
variables_reference = variables_reference.variable_reference
fmt = arguments.format
if hasattr(fmt, 'to_dict'):
fmt = fmt.to_dict()
variables = []
try:
try:
variable = py_db.suspended_frames_manager.get_variable(variables_reference)
except KeyError:
pass
else:
for child_var in variable.get_children_variables(fmt=fmt, scope=scope):
variables.append(child_var.get_var_data(fmt=fmt))
except:
try:
exc, exc_type, tb = sys.exc_info()
err = ''.join(traceback.format_exception(exc, exc_type, tb))
variables = [{
'name': '<error>',
'value': err,
'type': '<error>',
'variablesReference': 0
}]
except:
err = '<Internal error - unable to get traceback when getting variables>'
pydev_log.exception(err)
variables = []
body = VariablesResponseBody(variables)
variables_response = pydevd_base_schema.build_response(request, kwargs={'body':body})
py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
class InternalGetVariable(InternalThreadCommand):
''' gets the value of a variable '''
def __init__(self, seq, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attributes = attrs
@silence_warnings_decorator
def do_it(self, dbg):
''' Converts request into python variable '''
try:
xml = StringIO()
xml.write("<xml>")
type_name, val_dict = pydevd_vars.resolve_compound_variable_fields(
dbg, self.thread_id, self.frame_id, self.scope, self.attributes)
if val_dict is None:
val_dict = {}
# assume properly ordered if resolver returns 'OrderedDict'
# check type as string to support OrderedDict backport for older Python
keys = list(val_dict)
if not (type_name == "OrderedDict" or val_dict.__class__.__name__ == "OrderedDict" or IS_PY36_OR_GREATER):
keys = sorted(keys, key=compare_object_attrs_key)
timer = Timer()
for k in keys:
val = val_dict[k]
evaluate_full_value = pydevd_xml.should_evaluate_full_value(val)
xml.write(pydevd_xml.var_to_xml(val, k, evaluate_full_value=evaluate_full_value))
timer.report_if_compute_repr_attr_slow(self.attributes, k, type(val))
xml.write("</xml>")
cmd = dbg.cmd_factory.make_get_variable_message(self.sequence, xml.getvalue())
xml.close()
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(
self.sequence, "Error resolving variables %s" % (get_exception_traceback_str(),))
dbg.writer.add_command(cmd)
class InternalGetArray(InternalThreadCommand):
def __init__(self, seq, roffset, coffset, rows, cols, format, thread_id, frame_id, scope, attrs):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.name = attrs.split("\t")[-1]
self.attrs = attrs
self.roffset = int(roffset)
self.coffset = int(coffset)
self.rows = int(rows)
self.cols = int(cols)
self.format = format
def do_it(self, dbg):
try:
frame = dbg.find_frame(self.thread_id, self.frame_id)
var = pydevd_vars.eval_in_context(self.name, frame.f_globals, frame.f_locals, py_db=dbg)
xml = pydevd_vars.table_like_struct_to_xml(var, self.name, self.roffset, self.coffset, self.rows, self.cols, self.format)
cmd = dbg.cmd_factory.make_get_array_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error resolving array: " + get_exception_traceback_str())
dbg.writer.add_command(cmd)
def internal_change_variable(dbg, seq, thread_id, frame_id, scope, attr, value):
''' Changes the value of a variable '''
try:
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
result = pydevd_vars.change_attr_expression(frame, attr, value, dbg)
else:
result = None
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_variable_changed_message(seq, xml)
dbg.writer.add_command(cmd)
except Exception:
cmd = dbg.cmd_factory.make_error_message(seq, "Error changing variable attr:%s expression:%s traceback:%s" % (attr, value, get_exception_traceback_str()))
dbg.writer.add_command(cmd)
def internal_change_variable_json(py_db, request):
'''
The pydevd_vars.change_attr_expression(thread_id, frame_id, attr, value, dbg) can only
deal with changing at a frame level, so, currently changing the contents of something
in a different scope is currently not supported.
:param SetVariableRequest request:
'''
# : :type arguments: SetVariableArguments
arguments = request.arguments
variables_reference = arguments.variablesReference
scope = None
if isinstance_checked(variables_reference, ScopeRequest):
scope = variables_reference
variables_reference = variables_reference.variable_reference
fmt = arguments.format
if hasattr(fmt, 'to_dict'):
fmt = fmt.to_dict()
try:
variable = py_db.suspended_frames_manager.get_variable(variables_reference)
except KeyError:
variable = None
if variable is None:
_write_variable_response(
py_db, request, value='', success=False, message='Unable to find variable container to change: %s.' % (variables_reference,))
return
child_var = variable.change_variable(arguments.name, arguments.value, py_db, fmt=fmt)
if child_var is None:
_write_variable_response(
py_db, request, value='', success=False, message='Unable to change: %s.' % (arguments.name,))
return
var_data = child_var.get_var_data(fmt=fmt)
body = SetVariableResponseBody(
value=var_data['value'],
type=var_data['type'],
variablesReference=var_data.get('variablesReference'),
namedVariables=var_data.get('namedVariables'),
indexedVariables=var_data.get('indexedVariables'),
)
variables_response = pydevd_base_schema.build_response(request, kwargs={'body':body})
py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
def _write_variable_response(py_db, request, value, success, message):
body = SetVariableResponseBody('')
variables_response = pydevd_base_schema.build_response(
request,
kwargs={
'body':body,
'success': False,
'message': message
})
cmd = NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
py_db.writer.add_command(cmd)
@silence_warnings_decorator
def internal_get_frame(dbg, seq, thread_id, frame_id):
''' Converts request into python variable '''
try:
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
hidden_ns = pydevconsole.get_ipython_hidden_vars()
xml = "<xml>"
xml += pydevd_xml.frame_vars_to_xml(frame.f_locals, hidden_ns)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_frame_message(seq, xml)
dbg.writer.add_command(cmd)
else:
# pydevd_vars.dump_frames(thread_id)
# don't print this error: frame not found: means that the client is not synchronized (but that's ok)
cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(seq, "Error resolving frame: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
def internal_get_smart_step_into_variants(dbg, seq, thread_id, frame_id, start_line, end_line, set_additional_thread_info):
try:
thread = pydevd_find_thread_by_id(thread_id)
frame = dbg.find_frame(thread_id, frame_id)
if thread is None or frame is None:
cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
return
if pydevd_bytecode_utils is None:
variants = []
else:
variants = pydevd_bytecode_utils.calculate_smart_step_into_variants(frame, int(start_line), int(end_line))
info = set_additional_thread_info(thread)
# Store the last request (may be used afterwards when stepping).
info.pydev_smart_step_into_variants = tuple(variants)
xml = "<xml>"
for variant in variants:
if variant.children_variants:
for child_variant in variant.children_variants:
# If there are child variants, the current one is just an intermediary, so,
# just create variants for the child (notifying properly about the parent too).
xml += '<variant name="%s" isVisited="%s" line="%s" offset="%s" childOffset="%s" callOrder="%s"/>' % (
quote(child_variant.name),
str(child_variant.is_visited).lower(),
child_variant.line,
variant.offset,
child_variant.offset,
child_variant.call_order,
)
else:
xml += '<variant name="%s" isVisited="%s" line="%s" offset="%s" childOffset="-1" callOrder="%s"/>' % (
quote(variant.name),
str(variant.is_visited).lower(),
variant.line,
variant.offset,
variant.call_order,
)
xml += "</xml>"
cmd = NetCommand(CMD_GET_SMART_STEP_INTO_VARIANTS, seq, xml)
dbg.writer.add_command(cmd)
except:
# Error is expected (if `dis` module cannot be used -- i.e.: Jython).
pydev_log.exception('Error calculating Smart Step Into Variants.')
cmd = dbg.cmd_factory.make_error_message(
seq, "Error getting smart step into variants for frame: %s from thread: %s"
% (frame_id, thread_id))
dbg.writer.add_command(cmd)
def internal_get_step_in_targets_json(dbg, seq, thread_id, frame_id, request, set_additional_thread_info):
try:
thread = pydevd_find_thread_by_id(thread_id)
frame = dbg.find_frame(thread_id, frame_id)
if thread is None or frame is None:
body = StepInTargetsResponseBody([])
variables_response = pydevd_base_schema.build_response(
request,
kwargs={
'body': body,
'success': False,
'message': 'Thread to get step in targets seems to have resumed already.'
})
cmd = NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
dbg.writer.add_command(cmd)
return
start_line = 0
end_line = 99999999
if pydevd_bytecode_utils is None:
variants = []
else:
variants = pydevd_bytecode_utils.calculate_smart_step_into_variants(frame, start_line, end_line)
info = set_additional_thread_info(thread)
targets = []
counter = itertools.count(0)
target_id_to_variant = {}
for variant in variants:
if not variant.is_visited:
if variant.children_variants:
for child_variant in variant.children_variants:
target_id = next(counter)
if child_variant.call_order > 1:
targets.append(StepInTarget(id=target_id, label='%s (call %s)' % (child_variant.name, child_variant.call_order),))
else:
targets.append(StepInTarget(id=target_id, label=child_variant.name))
target_id_to_variant[target_id] = child_variant
if len(targets) >= 15: # Show at most 15 targets.
break
else:
target_id = next(counter)
if variant.call_order > 1:
targets.append(StepInTarget(id=target_id, label='%s (call %s)' % (variant.name, variant.call_order),))
else:
targets.append(StepInTarget(id=target_id, label=variant.name))
target_id_to_variant[target_id] = variant
if len(targets) >= 15: # Show at most 15 targets.
break
# Store the last request (may be used afterwards when stepping).
info.pydev_smart_step_into_variants = tuple(variants)
info.target_id_to_smart_step_into_variant = target_id_to_variant
body = StepInTargetsResponseBody(targets=targets)
response = pydevd_base_schema.build_response(request, kwargs={'body': body})
cmd = NetCommand(CMD_RETURN, 0, response, is_json=True)
dbg.writer.add_command(cmd)
except Exception as e:
# Error is expected (if `dis` module cannot be used -- i.e.: Jython).
pydev_log.exception('Error calculating Smart Step Into Variants.')
body = StepInTargetsResponseBody([])
variables_response = pydevd_base_schema.build_response(
request,
kwargs={
'body': body,
'success': False,
'message': str(e)
})
cmd = NetCommand(CMD_RETURN, 0, variables_response, is_json=True)
dbg.writer.add_command(cmd)
def internal_get_next_statement_targets(dbg, seq, thread_id, frame_id):
''' gets the valid line numbers for use with set next statement '''
try:
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
code = frame.f_code
xml = "<xml>"
try:
linestarts = dis.findlinestarts(code)
except:
# i.e.: jython doesn't provide co_lnotab, so, we can only keep at the current line.
xml += "<line>%d</line>" % (frame.f_lineno,)
else:
for _, line in linestarts:
xml += "<line>%d</line>" % (line,)
del frame
xml += "</xml>"
cmd = dbg.cmd_factory.make_get_next_statement_targets_message(seq, xml)
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(seq, "Frame not found: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
except:
cmd = dbg.cmd_factory.make_error_message(seq, "Error resolving frame: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
def _evaluate_response(py_db, request, result, error_message=''):
is_error = isinstance(result, ExceptionOnEvaluate)
if is_error:
result = result.result
if not error_message:
body = pydevd_schema.EvaluateResponseBody(result=result, variablesReference=0)
variables_response = pydevd_base_schema.build_response(request, kwargs={'body':body})
py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
else:
body = pydevd_schema.EvaluateResponseBody(result=result, variablesReference=0)
variables_response = pydevd_base_schema.build_response(request, kwargs={
'body':body, 'success':False, 'message': error_message})
py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
_global_frame = None
def internal_evaluate_expression_json(py_db, request, thread_id):
'''
:param EvaluateRequest request:
'''
global _global_frame
# : :type arguments: EvaluateArguments
arguments = request.arguments
expression = arguments.expression
frame_id = arguments.frameId
context = arguments.context
fmt = arguments.format
if hasattr(fmt, 'to_dict'):
fmt = fmt.to_dict()
ctx = NULL
if context == 'repl':
if not py_db.is_output_redirected:
ctx = pydevd_io.redirect_stream_to_pydb_io_messages_context()
else:
# If we're not in a repl (watch, hover, ...) don't show warnings.
ctx = filter_all_warnings()
with ctx:
try_exec = False
if frame_id is None:
if _global_frame is None:
# Lazily create a frame to be used for evaluation with no frame id.
def __create_frame():
yield sys._getframe()
_global_frame = next(__create_frame())
frame = _global_frame
try_exec = True # Always exec in this case
eval_result = None
else:
frame = py_db.find_frame(thread_id, frame_id)
eval_result = pydevd_vars.evaluate_expression(py_db, frame, expression, is_exec=False)
is_error = isinstance_checked(eval_result, ExceptionOnEvaluate)
if is_error:
if context == 'hover': # In a hover it doesn't make sense to do an exec.
_evaluate_response(py_db, request, result='', error_message='Exception occurred during evaluation.')
return
elif context == 'watch':
# If it's a watch, don't show it as an exception object, rather, format
# it and show it as a string (with success=False).
msg = '%s: %s' % (
eval_result.result.__class__.__name__, eval_result.result,)
_evaluate_response(py_db, request, result=msg, error_message=msg)
return
else:
# We only try the exec if the failure we had was due to not being able
# to evaluate the expression.
try:
pydevd_vars.compile_as_eval(expression)
except Exception:
try_exec = context == 'repl'
else:
try_exec = False
if context == 'repl':
# In the repl we should show the exception to the user.
_evaluate_response_return_exception(py_db, request, eval_result.etype, eval_result.result, eval_result.tb)
return
if try_exec:
try:
pydevd_vars.evaluate_expression(py_db, frame, expression, is_exec=True)
except (Exception, KeyboardInterrupt):
_evaluate_response_return_exception(py_db, request, *sys.exc_info())
return
# No result on exec.
_evaluate_response(py_db, request, result='')
return
# Ok, we have the result (could be an error), let's put it into the saved variables.
frame_tracker = py_db.suspended_frames_manager.get_frame_tracker(thread_id)
if frame_tracker is None:
# This is not really expected.
_evaluate_response(py_db, request, result='', error_message='Thread id: %s is not current thread id.' % (thread_id,))
return
safe_repr_custom_attrs = {}
if context == 'clipboard':
safe_repr_custom_attrs = dict(
maxstring_outer=2 ** 64,
maxstring_inner=2 ** 64,
maxother_outer=2 ** 64,
maxother_inner=2 ** 64,
)
if context == 'repl' and eval_result is None:
# We don't want "None" to appear when typing in the repl.
body = pydevd_schema.EvaluateResponseBody(
result=None,
variablesReference=0,
)
else:
variable = frame_tracker.obtain_as_variable(expression, eval_result, frame=frame)
var_data = variable.get_var_data(fmt=fmt, **safe_repr_custom_attrs)
body = pydevd_schema.EvaluateResponseBody(
result=var_data['value'],
variablesReference=var_data.get('variablesReference', 0),
type=var_data.get('type'),
presentationHint=var_data.get('presentationHint'),
namedVariables=var_data.get('namedVariables'),
indexedVariables=var_data.get('indexedVariables'),
)
variables_response = pydevd_base_schema.build_response(request, kwargs={'body':body})
py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
def _evaluate_response_return_exception(py_db, request, exc_type, exc, initial_tb):
try:
tb = initial_tb
# Show the traceback without pydevd frames.
temp_tb = tb
while temp_tb:
if py_db.get_file_type(temp_tb.tb_frame) == PYDEV_FILE:
tb = temp_tb.tb_next
temp_tb = temp_tb.tb_next
if tb is None:
tb = initial_tb
err = ''.join(traceback.format_exception(exc_type, exc, tb))
# Make sure we don't keep references to them.
exc = None
exc_type = None
tb = None
temp_tb = None
initial_tb = None
except:
err = '<Internal error - unable to get traceback when evaluating expression>'
pydev_log.exception(err)
# Currently there is an issue in VSC where returning success=false for an
# eval request, in repl context, VSC does not show the error response in
# the debug console. So return the error message in result as well.
_evaluate_response(py_db, request, result=err, error_message=err)
@silence_warnings_decorator
def internal_evaluate_expression(dbg, seq, thread_id, frame_id, expression, is_exec, trim_if_too_big, attr_to_set_result):
''' gets the value of a variable '''
try:
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
result = pydevd_vars.evaluate_expression(dbg, frame, expression, is_exec)
if attr_to_set_result != "":
pydevd_vars.change_attr_expression(frame, attr_to_set_result, expression, dbg, result)
else:
result = None
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, expression, trim_if_too_big)
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(seq, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(seq, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
def _set_expression_response(py_db, request, result, error_message):
body = pydevd_schema.SetExpressionResponseBody(result='', variablesReference=0)
variables_response = pydevd_base_schema.build_response(request, kwargs={
'body':body, 'success':False, 'message': error_message})
py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
def internal_set_expression_json(py_db, request, thread_id):
# : :type arguments: SetExpressionArguments
arguments = request.arguments
expression = arguments.expression
frame_id = arguments.frameId
value = arguments.value
fmt = arguments.format
if hasattr(fmt, 'to_dict'):
fmt = fmt.to_dict()
frame = py_db.find_frame(thread_id, frame_id)
exec_code = '%s = (%s)' % (expression, value)
result = pydevd_vars.evaluate_expression(py_db, frame, exec_code, is_exec=True)
is_error = isinstance(result, ExceptionOnEvaluate)
if is_error:
_set_expression_response(py_db, request, result, error_message='Error executing: %s' % (exec_code,))
return
# Ok, we have the result (could be an error), let's put it into the saved variables.
frame_tracker = py_db.suspended_frames_manager.get_frame_tracker(thread_id)
if frame_tracker is None:
# This is not really expected.
_set_expression_response(py_db, request, result, error_message='Thread id: %s is not current thread id.' % (thread_id,))
return
# Now that the exec is done, get the actual value changed to return.
result = pydevd_vars.evaluate_expression(py_db, frame, expression, is_exec=False)
variable = frame_tracker.obtain_as_variable(expression, result, frame=frame)
var_data = variable.get_var_data(fmt=fmt)
body = pydevd_schema.SetExpressionResponseBody(
value=var_data['value'],
variablesReference=var_data.get('variablesReference', 0),
type=var_data.get('type'),
presentationHint=var_data.get('presentationHint'),
namedVariables=var_data.get('namedVariables'),
indexedVariables=var_data.get('indexedVariables'),
)
variables_response = pydevd_base_schema.build_response(request, kwargs={'body':body})
py_db.writer.add_command(NetCommand(CMD_RETURN, 0, variables_response, is_json=True))
def internal_get_completions(dbg, seq, thread_id, frame_id, act_tok, line=-1, column=-1):
'''
Note that if the column is >= 0, the act_tok is considered text and the actual
activation token/qualifier is computed in this command.
'''
try:
remove_path = None
try:
qualifier = u''
if column >= 0:
token_and_qualifier = extract_token_and_qualifier(act_tok, line, column)
act_tok = token_and_qualifier[0]
if act_tok:
act_tok += u'.'
qualifier = token_and_qualifier[1]
frame = dbg.find_frame(thread_id, frame_id)
if frame is not None:
completions = _pydev_completer.generate_completions(frame, act_tok)
# Note that qualifier and start are only actually valid for the
# Debug Adapter Protocol (for the line-based protocol, the IDE
# is required to filter the completions returned).
cmd = dbg.cmd_factory.make_get_completions_message(
seq, completions, qualifier, start=column - len(qualifier))
dbg.writer.add_command(cmd)
else:
cmd = dbg.cmd_factory.make_error_message(seq, "internal_get_completions: Frame not found: %s from thread: %s" % (frame_id, thread_id))
dbg.writer.add_command(cmd)
finally:
if remove_path is not None:
sys.path.remove(remove_path)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(seq, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
def internal_get_description(dbg, seq, thread_id, frame_id, expression):
''' Fetch the variable description stub from the debug console
'''
try:
frame = dbg.find_frame(thread_id, frame_id)
description = pydevd_console.get_description(frame, thread_id, frame_id, expression)
description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t'))
description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description
cmd = dbg.cmd_factory.make_get_description_message(seq, description_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(seq, "Error in fetching description" + exc)
dbg.writer.add_command(cmd)
def build_exception_info_response(dbg, thread_id, request_seq, set_additional_thread_info, iter_visible_frames_info, max_frames):
'''
:return ExceptionInfoResponse
'''
thread = pydevd_find_thread_by_id(thread_id)
additional_info = set_additional_thread_info(thread)
topmost_frame = additional_info.get_topmost_frame(thread)
current_paused_frame_name = ''
source_path = '' # This is an extra bit of data used by Visual Studio
stack_str_lst = []
name = None
description = None
if topmost_frame is not None:
try:
try:
frames_list = dbg.suspended_frames_manager.get_frames_list(thread_id)
memo = set()
while frames_list is not None and len(frames_list):
frames = []
frame = None
if not name:
exc_type = frames_list.exc_type
if exc_type is not None:
try:
name = exc_type.__qualname__
except:
try:
name = exc_type.__name__
except:
try:
name = str(exc_type)
except:
pass
if not description:
exc_desc = frames_list.exc_desc
if exc_desc is not None:
try:
description = str(exc_desc)
except:
pass
for frame_id, frame, method_name, original_filename, filename_in_utf8, lineno, _applied_mapping, show_as_current_frame in \
iter_visible_frames_info(dbg, frames_list):
line_text = linecache.getline(original_filename, lineno)
# Never filter out plugin frames!
if not getattr(frame, 'IS_PLUGIN_FRAME', False):
if dbg.is_files_filter_enabled and dbg.apply_files_filter(frame, original_filename, False):
continue
if show_as_current_frame:
current_paused_frame_name = method_name
method_name += ' (Current frame)'
frames.append((filename_in_utf8, lineno, method_name, line_text))
if not source_path and frames:
source_path = frames[0][0]
stack_str = ''.join(traceback.format_list(frames[-max_frames:]))
stack_str += frames_list.exc_context_msg
stack_str_lst.append(stack_str)
frames_list = create_frames_list_from_exception_cause(
frames_list.trace_obj, None, frames_list.exc_type, frames_list.exc_desc, memo)
if frames_list is None or not frames_list:
break
except:
pydev_log.exception('Error on build_exception_info_response.')
finally:
topmost_frame = None
full_stack_str = ''.join(reversed(stack_str_lst))
if not name:
name = 'exception: type unknown'
if not description:
description = 'exception: no description'
if current_paused_frame_name:
name += ' (note: full exception trace is shown but execution is paused at: %s)' % (current_paused_frame_name,)
if thread.stop_reason == CMD_STEP_CAUGHT_EXCEPTION:
break_mode = pydevd_schema.ExceptionBreakMode.ALWAYS
else:
break_mode = pydevd_schema.ExceptionBreakMode.UNHANDLED
response = pydevd_schema.ExceptionInfoResponse(
request_seq=request_seq,
success=True,
command='exceptionInfo',
body=pydevd_schema.ExceptionInfoResponseBody(
exceptionId=name,
description=description,
breakMode=break_mode,
details=pydevd_schema.ExceptionDetails(
message=description,
typeName=name,
stackTrace=full_stack_str,
source=source_path,
# Note: ExceptionDetails actually accepts an 'innerException', but
# when passing it, VSCode is not showing the stack trace at all.
)
)
)
return response
def internal_get_exception_details_json(dbg, request, thread_id, max_frames, set_additional_thread_info=None, iter_visible_frames_info=None):
''' Fetch exception details
'''
try:
response = build_exception_info_response(dbg, thread_id, request.seq, set_additional_thread_info, iter_visible_frames_info, max_frames)
except:
exc = get_exception_traceback_str()
response = pydevd_base_schema.build_response(request, kwargs={
'success': False,
'message': exc,
'body':{}
})
dbg.writer.add_command(NetCommand(CMD_RETURN, 0, response, is_json=True))
class InternalGetBreakpointException(InternalThreadCommand):
''' Send details of exception raised while evaluating conditional breakpoint '''
def __init__(self, thread_id, exc_type, stacktrace):
self.sequence = 0
self.thread_id = thread_id
self.stacktrace = stacktrace
self.exc_type = exc_type
def do_it(self, dbg):
try:
callstack = "<xml>"
makeValid = pydevd_xml.make_valid_xml_value
for filename, line, methodname, methodobj in self.stacktrace:
if not filesystem_encoding_is_utf8 and hasattr(filename, "decode"):
# filename is a byte string encoded using the file system encoding
# convert it to utf8
filename = filename.decode(file_system_encoding).encode("utf-8")
callstack += '<frame thread_id = "%s" file="%s" line="%s" name="%s" obj="%s" />' \
% (self.thread_id, makeValid(filename), line, makeValid(methodname), makeValid(methodobj))
callstack += "</xml>"
cmd = dbg.cmd_factory.make_send_breakpoint_exception_message(self.sequence, self.exc_type + "\t" + callstack)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Exception: " + exc)
dbg.writer.add_command(cmd)
class InternalSendCurrExceptionTrace(InternalThreadCommand):
''' Send details of the exception that was caught and where we've broken in.
'''
def __init__(self, thread_id, arg, curr_frame_id):
'''
:param arg: exception type, description, traceback object
'''
self.sequence = 0
self.thread_id = thread_id
self.curr_frame_id = curr_frame_id
self.arg = arg
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_message(dbg, self.sequence, self.thread_id, self.curr_frame_id, *self.arg)
del self.arg
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace: " + exc)
dbg.writer.add_command(cmd)
class InternalSendCurrExceptionTraceProceeded(InternalThreadCommand):
''' Send details of the exception that was caught and where we've broken in.
'''
def __init__(self, thread_id):
self.sequence = 0
self.thread_id = thread_id
def do_it(self, dbg):
try:
cmd = dbg.cmd_factory.make_send_curr_exception_trace_proceeded_message(self.sequence, self.thread_id)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error Sending Current Exception Trace Proceeded: " + exc)
dbg.writer.add_command(cmd)
class InternalEvaluateConsoleExpression(InternalThreadCommand):
''' Execute the given command in the debug console '''
def __init__(self, seq, thread_id, frame_id, line, buffer_output=True):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.line = line
self.buffer_output = buffer_output
def do_it(self, dbg):
''' Create an XML for console output, error and more (true/false)
<xml>
<output message=output_message></output>
<error message=error_message></error>
<more>true/false</more>
</xml>
'''
try:
frame = dbg.find_frame(self.thread_id, self.frame_id)
if frame is not None:
console_message = pydevd_console.execute_console_command(
frame, self.thread_id, self.frame_id, self.line, self.buffer_output)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, console_message.to_xml())
else:
from _pydevd_bundle.pydevd_console import ConsoleMessage
console_message = ConsoleMessage()
console_message.add_console_message(
pydevd_console.CONSOLE_ERROR,
"Select the valid frame in the debug view (thread: %s, frame: %s invalid)" % (self.thread_id, self.frame_id),
)
cmd = dbg.cmd_factory.make_error_message(self.sequence, console_message.to_xml())
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating expression " + exc)
dbg.writer.add_command(cmd)
class InternalRunCustomOperation(InternalThreadCommand):
''' Run a custom command on an expression
'''
def __init__(self, seq, thread_id, frame_id, scope, attrs, style, encoded_code_or_file, fnname):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.scope = scope
self.attrs = attrs
self.style = style
self.code_or_file = unquote_plus(encoded_code_or_file)
self.fnname = fnname
def do_it(self, dbg):
try:
res = pydevd_vars.custom_operation(dbg, self.thread_id, self.frame_id, self.scope, self.attrs,
self.style, self.code_or_file, self.fnname)
resEncoded = quote_plus(res)
cmd = dbg.cmd_factory.make_custom_operation_message(self.sequence, resEncoded)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in running custom operation" + exc)
dbg.writer.add_command(cmd)
class InternalConsoleGetCompletions(InternalThreadCommand):
''' Fetch the completions in the debug console
'''
def __init__(self, seq, thread_id, frame_id, act_tok):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.act_tok = act_tok
def do_it(self, dbg):
''' Get completions and write back to the client
'''
try:
frame = dbg.find_frame(self.thread_id, self.frame_id)
completions_xml = pydevd_console.get_completions(frame, self.act_tok)
cmd = dbg.cmd_factory.make_send_console_message(self.sequence, completions_xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error in fetching completions" + exc)
dbg.writer.add_command(cmd)
class InternalConsoleExec(InternalThreadCommand):
''' gets the value of a variable '''
def __init__(self, seq, thread_id, frame_id, expression):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.expression = expression
def do_it(self, dbg):
''' Converts request into python variable '''
try:
try:
# don't trace new threads created by console command
disable_trace_thread_modules()
result = pydevconsole.console_exec(self.thread_id, self.frame_id, self.expression, dbg)
xml = "<xml>"
xml += pydevd_xml.var_to_xml(result, "")
xml += "</xml>"
cmd = dbg.cmd_factory.make_evaluate_expression_message(self.sequence, xml)
dbg.writer.add_command(cmd)
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating console expression " + exc)
dbg.writer.add_command(cmd)
finally:
enable_trace_thread_modules()
sys.stderr.flush()
sys.stdout.flush()
class InternalLoadFullValue(InternalThreadCommand):
'''
Loads values asynchronously
'''
def __init__(self, seq, thread_id, frame_id, vars):
self.sequence = seq
self.thread_id = thread_id
self.frame_id = frame_id
self.vars = vars
@silence_warnings_decorator
def do_it(self, dbg):
'''Starts a thread that will load values asynchronously'''
try:
var_objects = []
for variable in self.vars:
variable = variable.strip()
if len(variable) > 0:
if '\t' in variable: # there are attributes beyond scope
scope, attrs = variable.split('\t', 1)
name = attrs[0]
else:
scope, attrs = (variable, None)
name = scope
var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs)
var_objects.append((var_obj, name))
t = GetValueAsyncThreadDebug(dbg, dbg, self.sequence, var_objects)
t.start()
except:
exc = get_exception_traceback_str()
sys.stderr.write('%s\n' % (exc,))
cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc)
dbg.writer.add_command(cmd)
class AbstractGetValueAsyncThread(PyDBDaemonThread):
'''
Abstract class for a thread, which evaluates values for async variables
'''
def __init__(self, py_db, frame_accessor, seq, var_objects):
PyDBDaemonThread.__init__(self, py_db)
self.frame_accessor = frame_accessor
self.seq = seq
self.var_objs = var_objects
self.cancel_event = threading.Event()
def send_result(self, xml):
raise NotImplementedError()
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
start = time.time()
xml = StringIO()
xml.write("<xml>")
for (var_obj, name) in self.var_objs:
current_time = time.time()
if current_time - start > ASYNC_EVAL_TIMEOUT_SEC or self.cancel_event.is_set():
break
xml.write(pydevd_xml.var_to_xml(var_obj, name, evaluate_full_value=True))
xml.write("</xml>")
self.send_result(xml)
xml.close()
class GetValueAsyncThreadDebug(AbstractGetValueAsyncThread):
'''
A thread for evaluation async values, which returns result for debugger
Create message and send it via writer thread
'''
def send_result(self, xml):
if self.frame_accessor is not None:
cmd = self.frame_accessor.cmd_factory.make_load_full_value_message(self.seq, xml.getvalue())
self.frame_accessor.writer.add_command(cmd)
class GetValueAsyncThreadConsole(AbstractGetValueAsyncThread):
'''
A thread for evaluation async values, which returns result for Console
Send result directly to Console's server
'''
def send_result(self, xml):
if self.frame_accessor is not None:
self.frame_accessor.ReturnFullValue(self.seq, xml.getvalue())
| 74,656 | Python | 39.952825 | 162 | 0.580208 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_source_mapping.py | import bisect
from _pydevd_bundle.pydevd_constants import NULL, KeyifyList
import pydevd_file_utils
class SourceMappingEntry(object):
__slots__ = ['source_filename', 'line', 'end_line', 'runtime_line', 'runtime_source']
def __init__(self, line, end_line, runtime_line, runtime_source):
assert isinstance(runtime_source, str)
self.line = int(line)
self.end_line = int(end_line)
self.runtime_line = int(runtime_line)
self.runtime_source = runtime_source # Something as <ipython-cell-xxx>
# Should be set after translated to server (absolute_source_filename).
# This is what's sent to the client afterwards (so, its case should not be normalized).
self.source_filename = None
def contains_line(self, i):
return self.line <= i <= self.end_line
def contains_runtime_line(self, i):
line_count = self.end_line + self.line
runtime_end_line = self.runtime_line + line_count
return self.runtime_line <= i <= runtime_end_line
def __str__(self):
return 'SourceMappingEntry(%s)' % (
', '.join('%s=%r' % (attr, getattr(self, attr)) for attr in self.__slots__))
__repr__ = __str__
class SourceMapping(object):
def __init__(self, on_source_mapping_changed=NULL):
self._mappings_to_server = {} # dict(normalized(file.py) to [SourceMappingEntry])
self._mappings_to_client = {} # dict(<cell> to File.py)
self._cache = {}
self._on_source_mapping_changed = on_source_mapping_changed
def set_source_mapping(self, absolute_filename, mapping):
'''
:param str absolute_filename:
The filename for the source mapping (bytes on py2 and str on py3).
:param list(SourceMappingEntry) mapping:
A list with the source mapping entries to be applied to the given filename.
:return str:
An error message if it was not possible to set the mapping or an empty string if
everything is ok.
'''
# Let's first validate if it's ok to apply that mapping.
# File mappings must be 1:N, not M:N (i.e.: if there's a mapping from file1.py to <cell1>,
# there can be no other mapping from any other file to <cell1>).
# This is a limitation to make it easier to remove existing breakpoints when new breakpoints are
# set to a file (so, any file matching that breakpoint can be removed instead of needing to check
# which lines are corresponding to that file).
for map_entry in mapping:
existing_source_filename = self._mappings_to_client.get(map_entry.runtime_source)
if existing_source_filename and existing_source_filename != absolute_filename:
return 'Cannot apply mapping from %s to %s (it conflicts with mapping: %s to %s)' % (
absolute_filename, map_entry.runtime_source, existing_source_filename, map_entry.runtime_source)
try:
absolute_normalized_filename = pydevd_file_utils.normcase(absolute_filename)
current_mapping = self._mappings_to_server.get(absolute_normalized_filename, [])
for map_entry in current_mapping:
del self._mappings_to_client[map_entry.runtime_source]
self._mappings_to_server[absolute_normalized_filename] = sorted(mapping, key=lambda entry:entry.line)
for map_entry in mapping:
self._mappings_to_client[map_entry.runtime_source] = absolute_filename
finally:
self._cache.clear()
self._on_source_mapping_changed()
return ''
def map_to_client(self, runtime_source_filename, lineno):
key = (lineno, 'client', runtime_source_filename)
try:
return self._cache[key]
except KeyError:
for _, mapping in list(self._mappings_to_server.items()):
for map_entry in mapping:
if map_entry.runtime_source == runtime_source_filename: # <cell1>
if map_entry.contains_runtime_line(lineno): # matches line range
self._cache[key] = (map_entry.source_filename, map_entry.line + (lineno - map_entry.runtime_line), True)
return self._cache[key]
self._cache[key] = (runtime_source_filename, lineno, False) # Mark that no translation happened in the cache.
return self._cache[key]
def has_mapping_entry(self, runtime_source_filename):
'''
:param runtime_source_filename:
Something as <ipython-cell-xxx>
'''
# Note that we're not interested in the line here, just on knowing if a given filename
# (from the server) has a mapping for it.
key = ('has_entry', runtime_source_filename)
try:
return self._cache[key]
except KeyError:
for _absolute_normalized_filename, mapping in list(self._mappings_to_server.items()):
for map_entry in mapping:
if map_entry.runtime_source == runtime_source_filename:
self._cache[key] = True
return self._cache[key]
self._cache[key] = False
return self._cache[key]
def map_to_server(self, absolute_filename, lineno):
'''
Convert something as 'file1.py' at line 10 to '<ipython-cell-xxx>' at line 2.
Note that the name should be already normalized at this point.
'''
absolute_normalized_filename = pydevd_file_utils.normcase(absolute_filename)
changed = False
mappings = self._mappings_to_server.get(absolute_normalized_filename)
if mappings:
i = bisect.bisect(KeyifyList(mappings, lambda entry:entry.line), lineno)
if i >= len(mappings):
i -= 1
if i == 0:
entry = mappings[i]
else:
entry = mappings[i - 1]
if not entry.contains_line(lineno):
entry = mappings[i]
if not entry.contains_line(lineno):
entry = None
if entry is not None:
lineno = entry.runtime_line + (lineno - entry.line)
absolute_filename = entry.runtime_source
changed = True
return absolute_filename, lineno, changed
| 6,428 | Python | 40.746753 | 132 | 0.602209 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevconsole_code.py | """
A copy of the code module in the standard library with some changes to work with
async evaluation.
Utilities needed to emulate Python's interactive interpreter.
"""
# Inspired by similar code by Jeff Epler and Fredrik Lundh.
import sys
import traceback
import inspect
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
# START --------------------------- from codeop import CommandCompiler, compile_command
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
try:
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
finally:
err1 = err2 = None
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
try:
from ast import PyCF_ALLOW_TOP_LEVEL_AWAIT
self.flags |= PyCF_ALLOW_TOP_LEVEL_AWAIT
except:
pass
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
# END --------------------------- from codeop import CommandCompiler, compile_command
__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact",
"compile_command"]
from _pydev_bundle._pydev_saved_modules import threading
class _EvalAwaitInNewEventLoop(threading.Thread):
def __init__(self, compiled, updated_globals, updated_locals):
threading.Thread.__init__(self)
self.daemon = True
self._compiled = compiled
self._updated_globals = updated_globals
self._updated_locals = updated_locals
# Output
self.evaluated_value = None
self.exc = None
async def _async_func(self):
return await eval(self._compiled, self._updated_locals, self._updated_globals)
def run(self):
try:
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.evaluated_value = asyncio.run(self._async_func())
except:
self.exc = sys.exc_info()
class InteractiveInterpreter:
"""Base class for InteractiveConsole.
This class deals with parsing and interpreter state (the user's
namespace); it doesn't deal with input buffering or prompting or
input file naming (the filename is always passed in explicitly).
"""
def __init__(self, locals=None):
"""Constructor.
The optional 'locals' argument specifies the dictionary in
which code will be executed; it defaults to a newly created
dictionary with key "__name__" set to "__console__" and key
"__doc__" set to None.
"""
if locals is None:
locals = {"__name__": "__console__", "__doc__": None}
self.locals = locals
self.compile = CommandCompiler()
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One of several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
self.showsyntaxerror(filename)
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
is_async = False
if hasattr(inspect, 'CO_COROUTINE'):
is_async = inspect.CO_COROUTINE & code.co_flags == inspect.CO_COROUTINE
if is_async:
t = _EvalAwaitInNewEventLoop(code, self.locals, None)
t.start()
t.join()
if t.exc:
raise t.exc[1].with_traceback(t.exc[2])
else:
exec(code, self.locals)
except SystemExit:
raise
except:
self.showtraceback()
def showsyntaxerror(self, filename=None):
"""Display the syntax error that just occurred.
This doesn't display a stack trace because there isn't one.
If a filename is given, it is stuffed in the exception instead
of what was there before (because Python's parser always uses
"<string>" when reading from a string).
The output is written by self.write(), below.
"""
type, value, tb = sys.exc_info()
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
if filename and type is SyntaxError:
# Work hard to stuff the correct filename in the exception
try:
msg, (dummy_filename, lineno, offset, line) = value.args
except ValueError:
# Not the format we expect; leave it alone
pass
else:
# Stuff in the right filename
value = SyntaxError(msg, (filename, lineno, offset, line))
sys.last_value = value
if sys.excepthook is sys.__excepthook__:
lines = traceback.format_exception_only(type, value)
self.write(''.join(lines))
else:
# If someone has set sys.excepthook, we let that take precedence
# over self.write
sys.excepthook(type, value, tb)
def showtraceback(self):
"""Display the exception that just occurred.
We remove the first stack item because it is our own code.
The output is written by self.write(), below.
"""
sys.last_type, sys.last_value, last_tb = ei = sys.exc_info()
sys.last_traceback = last_tb
try:
lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next)
if sys.excepthook is sys.__excepthook__:
self.write(''.join(lines))
else:
# If someone has set sys.excepthook, we let that take precedence
# over self.write
sys.excepthook(ei[0], ei[1], last_tb)
finally:
last_tb = ei = None
def write(self, data):
"""Write a string.
The base implementation writes to sys.stderr; a subclass may
replace this with a different implementation.
"""
sys.stderr.write(data)
class InteractiveConsole(InteractiveInterpreter):
"""Closely emulate the behavior of the interactive Python interpreter.
This class builds on InteractiveInterpreter and adds prompting
using the familiar sys.ps1 and sys.ps2, and input buffering.
"""
def __init__(self, locals=None, filename="<console>"):
"""Constructor.
The optional locals argument will be passed to the
InteractiveInterpreter base class.
The optional filename argument should specify the (file)name
of the input stream; it will show up in tracebacks.
"""
InteractiveInterpreter.__init__(self, locals)
self.filename = filename
self.resetbuffer()
def resetbuffer(self):
"""Reset the input buffer."""
self.buffer = []
def interact(self, banner=None, exitmsg=None):
"""Closely emulate the interactive Python console.
The optional banner argument specifies the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
The optional exitmsg argument specifies the exit message
printed when exiting. Pass the empty string to suppress
printing an exit message. If exitmsg is not given or None,
a default message is printed.
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license" for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
elif banner:
self.write("%s\n" % str(banner))
more = 0
while 1:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
if exitmsg is None:
self.write('now exiting %s...\n' % self.__class__.__name__)
elif exitmsg != '':
self.write('%s\n' % exitmsg)
def push(self, line):
"""Push a line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is appended to a buffer and the
interpreter's runsource() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is 1 if more input is required, 0 if the line was dealt
with in some way (this is the same as runsource()).
"""
self.buffer.append(line)
source = "\n".join(self.buffer)
more = self.runsource(source, self.filename)
if not more:
self.resetbuffer()
return more
def raw_input(self, prompt=""):
"""Write a prompt and read a line.
The returned line does not include the trailing newline.
When the user enters the EOF key sequence, EOFError is raised.
The base implementation uses the built-in function
input(); a subclass may replace this with a different
implementation.
"""
return input(prompt)
def interact(banner=None, readfunc=None, local=None, exitmsg=None):
"""Closely emulate the interactive Python interpreter.
This is a backwards compatible interface to the InteractiveConsole
class. When readfunc is not specified, it attempts to import the
readline module to enable GNU readline if it is available.
Arguments (all optional, all default to None):
banner -- passed to InteractiveConsole.interact()
readfunc -- if not None, replaces InteractiveConsole.raw_input()
local -- passed to InteractiveInterpreter.__init__()
exitmsg -- passed to InteractiveConsole.interact()
"""
console = InteractiveConsole(local)
if readfunc is not None:
console.raw_input = readfunc
else:
try:
import readline
except ImportError:
pass
console.interact(banner, exitmsg)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-q', action='store_true',
help="don't print version and copyright messages")
args = parser.parse_args()
if args.q or sys.flags.quiet:
banner = ''
else:
banner = None
interact(banner)
| 19,014 | Python | 33.261261 | 87 | 0.622068 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_concurrency_analyser/pydevd_concurrency_logger.py | import time
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle._pydev_saved_modules import threading
from _pydevd_bundle import pydevd_xml
from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder
from _pydevd_bundle.pydevd_constants import get_thread_id
from _pydevd_bundle.pydevd_net_command import NetCommand
from _pydevd_bundle.pydevd_concurrency_analyser.pydevd_thread_wrappers import ObjectWrapper, wrap_attr
import pydevd_file_utils
from _pydev_bundle import pydev_log
import sys
file_system_encoding = getfilesystemencoding()
from urllib.parse import quote
threadingCurrentThread = threading.current_thread
DONT_TRACE_THREADING = ['threading.py', 'pydevd.py']
INNER_METHODS = ['_stop']
INNER_FILES = ['threading.py']
THREAD_METHODS = ['start', '_stop', 'join']
LOCK_METHODS = ['__init__', 'acquire', 'release', '__enter__', '__exit__']
QUEUE_METHODS = ['put', 'get']
# return time since epoch in milliseconds
cur_time = lambda: int(round(time.time() * 1000000))
def get_text_list_for_frame(frame):
# partial copy-paste from make_thread_suspend_str
curFrame = frame
cmdTextList = []
try:
while curFrame:
# print cmdText
myId = str(id(curFrame))
# print "id is ", myId
if curFrame.f_code is None:
break # Iron Python sometimes does not have it!
myName = curFrame.f_code.co_name # method name (if in method) or ? if global
if myName is None:
break # Iron Python sometimes does not have it!
# print "name is ", myName
absolute_filename = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(curFrame)[0]
my_file, _applied_mapping = pydevd_file_utils.map_file_to_client(absolute_filename)
# print "file is ", my_file
# my_file = inspect.getsourcefile(curFrame) or inspect.getfile(frame)
myLine = str(curFrame.f_lineno)
# print "line is ", myLine
# the variables are all gotten 'on-demand'
# variables = pydevd_xml.frame_vars_to_xml(curFrame.f_locals)
variables = ''
cmdTextList.append('<frame id="%s" name="%s" ' % (myId , pydevd_xml.make_valid_xml_value(myName)))
cmdTextList.append('file="%s" line="%s">' % (quote(my_file, '/>_= \t'), myLine))
cmdTextList.append(variables)
cmdTextList.append("</frame>")
curFrame = curFrame.f_back
except:
pydev_log.exception()
return cmdTextList
def send_concurrency_message(event_class, time, name, thread_id, type, event, file, line, frame, lock_id=0, parent=None):
dbg = GlobalDebuggerHolder.global_dbg
if dbg is None:
return
cmdTextList = ['<xml>']
cmdTextList.append('<' + event_class)
cmdTextList.append(' time="%s"' % pydevd_xml.make_valid_xml_value(str(time)))
cmdTextList.append(' name="%s"' % pydevd_xml.make_valid_xml_value(name))
cmdTextList.append(' thread_id="%s"' % pydevd_xml.make_valid_xml_value(thread_id))
cmdTextList.append(' type="%s"' % pydevd_xml.make_valid_xml_value(type))
if type == "lock":
cmdTextList.append(' lock_id="%s"' % pydevd_xml.make_valid_xml_value(str(lock_id)))
if parent is not None:
cmdTextList.append(' parent="%s"' % pydevd_xml.make_valid_xml_value(parent))
cmdTextList.append(' event="%s"' % pydevd_xml.make_valid_xml_value(event))
cmdTextList.append(' file="%s"' % pydevd_xml.make_valid_xml_value(file))
cmdTextList.append(' line="%s"' % pydevd_xml.make_valid_xml_value(str(line)))
cmdTextList.append('></' + event_class + '>')
cmdTextList += get_text_list_for_frame(frame)
cmdTextList.append('</xml>')
text = ''.join(cmdTextList)
if dbg.writer is not None:
dbg.writer.add_command(NetCommand(145, 0, text))
def log_new_thread(global_debugger, t):
event_time = cur_time() - global_debugger.thread_analyser.start_time
send_concurrency_message("threading_event", event_time, t.name, get_thread_id(t), "thread",
"start", "code_name", 0, None, parent=get_thread_id(t))
class ThreadingLogger:
def __init__(self):
self.start_time = cur_time()
def set_start_time(self, time):
self.start_time = time
def log_event(self, frame):
write_log = False
self_obj = None
if "self" in frame.f_locals:
self_obj = frame.f_locals["self"]
if isinstance(self_obj, threading.Thread) or self_obj.__class__ == ObjectWrapper:
write_log = True
if hasattr(frame, "f_back") and frame.f_back is not None:
back = frame.f_back
if hasattr(back, "f_back") and back.f_back is not None:
back = back.f_back
if "self" in back.f_locals:
if isinstance(back.f_locals["self"], threading.Thread):
write_log = True
try:
if write_log:
t = threadingCurrentThread()
back = frame.f_back
if not back:
return
name, _, back_base = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(back)
event_time = cur_time() - self.start_time
method_name = frame.f_code.co_name
if isinstance(self_obj, threading.Thread):
if not hasattr(self_obj, "_pydev_run_patched"):
wrap_attr(self_obj, "run")
if (method_name in THREAD_METHODS) and (back_base not in DONT_TRACE_THREADING or \
(method_name in INNER_METHODS and back_base in INNER_FILES)):
thread_id = get_thread_id(self_obj)
name = self_obj.getName()
real_method = frame.f_code.co_name
parent = None
if real_method == "_stop":
if back_base in INNER_FILES and \
back.f_code.co_name == "_wait_for_tstate_lock":
back = back.f_back.f_back
real_method = "stop"
if hasattr(self_obj, "_pydev_join_called"):
parent = get_thread_id(t)
elif real_method == "join":
# join called in the current thread, not in self object
if not self_obj.is_alive():
return
thread_id = get_thread_id(t)
name = t.name
self_obj._pydev_join_called = True
if real_method == "start":
parent = get_thread_id(t)
send_concurrency_message("threading_event", event_time, name, thread_id, "thread",
real_method, back.f_code.co_filename, back.f_lineno, back, parent=parent)
# print(event_time, self_obj.getName(), thread_id, "thread",
# real_method, back.f_code.co_filename, back.f_lineno)
if method_name == "pydev_after_run_call":
if hasattr(frame, "f_back") and frame.f_back is not None:
back = frame.f_back
if hasattr(back, "f_back") and back.f_back is not None:
back = back.f_back
if "self" in back.f_locals:
if isinstance(back.f_locals["self"], threading.Thread):
my_self_obj = frame.f_back.f_back.f_locals["self"]
my_back = frame.f_back.f_back
my_thread_id = get_thread_id(my_self_obj)
send_massage = True
if hasattr(my_self_obj, "_pydev_join_called"):
send_massage = False
# we can't detect stop after join in Python 2 yet
if send_massage:
send_concurrency_message("threading_event", event_time, "Thread", my_thread_id, "thread",
"stop", my_back.f_code.co_filename, my_back.f_lineno, my_back, parent=None)
if self_obj.__class__ == ObjectWrapper:
if back_base in DONT_TRACE_THREADING:
# do not trace methods called from threading
return
back_back_base = pydevd_file_utils.get_abs_path_real_path_and_base_from_frame(back.f_back)[2]
back = back.f_back
if back_back_base in DONT_TRACE_THREADING:
# back_back_base is the file, where the method was called froms
return
if method_name == "__init__":
send_concurrency_message("threading_event", event_time, t.name, get_thread_id(t), "lock",
method_name, back.f_code.co_filename, back.f_lineno, back, lock_id=str(id(frame.f_locals["self"])))
if "attr" in frame.f_locals and \
(frame.f_locals["attr"] in LOCK_METHODS or
frame.f_locals["attr"] in QUEUE_METHODS):
real_method = frame.f_locals["attr"]
if method_name == "call_begin":
real_method += "_begin"
elif method_name == "call_end":
real_method += "_end"
else:
return
if real_method == "release_end":
# do not log release end. Maybe use it later
return
send_concurrency_message("threading_event", event_time, t.name, get_thread_id(t), "lock",
real_method, back.f_code.co_filename, back.f_lineno, back, lock_id=str(id(self_obj)))
if real_method in ("put_end", "get_end"):
# fake release for queue, cause we don't call it directly
send_concurrency_message("threading_event", event_time, t.name, get_thread_id(t), "lock",
"release", back.f_code.co_filename, back.f_lineno, back, lock_id=str(id(self_obj)))
# print(event_time, t.name, get_thread_id(t), "lock",
# real_method, back.f_code.co_filename, back.f_lineno)
except Exception:
pydev_log.exception()
class NameManager:
def __init__(self, name_prefix):
self.tasks = {}
self.last = 0
self.prefix = name_prefix
def get(self, id):
if id not in self.tasks:
self.last += 1
self.tasks[id] = self.prefix + "-" + str(self.last)
return self.tasks[id]
class AsyncioLogger:
def __init__(self):
self.task_mgr = NameManager("Task")
self.coro_mgr = NameManager("Coro")
self.start_time = cur_time()
def get_task_id(self, frame):
asyncio = sys.modules.get('asyncio')
if asyncio is None:
# If asyncio was not imported, there's nothing to be done
# (also fixes issue where multiprocessing is imported due
# to asyncio).
return None
while frame is not None:
if "self" in frame.f_locals:
self_obj = frame.f_locals["self"]
if isinstance(self_obj, asyncio.Task):
method_name = frame.f_code.co_name
if method_name == "_step":
return id(self_obj)
frame = frame.f_back
return None
def log_event(self, frame):
event_time = cur_time() - self.start_time
# Debug loop iterations
# if isinstance(self_obj, asyncio.base_events.BaseEventLoop):
# if method_name == "_run_once":
# print("Loop iteration")
if not hasattr(frame, "f_back") or frame.f_back is None:
return
asyncio = sys.modules.get('asyncio')
if asyncio is None:
# If asyncio was not imported, there's nothing to be done
# (also fixes issue where multiprocessing is imported due
# to asyncio).
return
back = frame.f_back
if "self" in frame.f_locals:
self_obj = frame.f_locals["self"]
if isinstance(self_obj, asyncio.Task):
method_name = frame.f_code.co_name
if method_name == "set_result":
task_id = id(self_obj)
task_name = self.task_mgr.get(str(task_id))
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "thread", "stop", frame.f_code.co_filename,
frame.f_lineno, frame)
method_name = back.f_code.co_name
if method_name == "__init__":
task_id = id(self_obj)
task_name = self.task_mgr.get(str(task_id))
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "thread", "start", frame.f_code.co_filename,
frame.f_lineno, frame)
method_name = frame.f_code.co_name
if isinstance(self_obj, asyncio.Lock):
if method_name in ("acquire", "release"):
task_id = self.get_task_id(frame)
task_name = self.task_mgr.get(str(task_id))
if method_name == "acquire":
if not self_obj._waiters and not self_obj.locked():
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "lock",
method_name + "_begin", frame.f_code.co_filename, frame.f_lineno, frame, lock_id=str(id(self_obj)))
if self_obj.locked():
method_name += "_begin"
else:
method_name += "_end"
elif method_name == "release":
method_name += "_end"
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "lock",
method_name, frame.f_code.co_filename, frame.f_lineno, frame, lock_id=str(id(self_obj)))
if isinstance(self_obj, asyncio.Queue):
if method_name in ("put", "get", "_put", "_get"):
task_id = self.get_task_id(frame)
task_name = self.task_mgr.get(str(task_id))
if method_name == "put":
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "lock",
"acquire_begin", frame.f_code.co_filename, frame.f_lineno, frame, lock_id=str(id(self_obj)))
elif method_name == "_put":
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "lock",
"acquire_end", frame.f_code.co_filename, frame.f_lineno, frame, lock_id=str(id(self_obj)))
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "lock",
"release", frame.f_code.co_filename, frame.f_lineno, frame, lock_id=str(id(self_obj)))
elif method_name == "get":
back = frame.f_back
if back.f_code.co_name != "send":
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "lock",
"acquire_begin", frame.f_code.co_filename, frame.f_lineno, frame, lock_id=str(id(self_obj)))
else:
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "lock",
"acquire_end", frame.f_code.co_filename, frame.f_lineno, frame, lock_id=str(id(self_obj)))
send_concurrency_message("asyncio_event", event_time, task_name, task_name, "lock",
"release", frame.f_code.co_filename, frame.f_lineno, frame, lock_id=str(id(self_obj)))
| 16,764 | Python | 47.314121 | 140 | 0.519924 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/pydevd_concurrency_analyser/pydevd_thread_wrappers.py | from _pydev_bundle._pydev_saved_modules import threading
def wrapper(fun):
def pydev_after_run_call():
pass
def inner(*args, **kwargs):
fun(*args, **kwargs)
pydev_after_run_call()
return inner
def wrap_attr(obj, attr):
t_save_start = getattr(obj, attr)
setattr(obj, attr, wrapper(t_save_start))
obj._pydev_run_patched = True
class ObjectWrapper(object):
def __init__(self, obj):
self.wrapped_object = obj
try:
import functools
functools.update_wrapper(self, obj)
except:
pass
def __getattr__(self, attr):
orig_attr = getattr(self.wrapped_object, attr) # .__getattribute__(attr)
if callable(orig_attr):
def patched_attr(*args, **kwargs):
self.call_begin(attr)
result = orig_attr(*args, **kwargs)
self.call_end(attr)
if result == self.wrapped_object:
return self
return result
return patched_attr
else:
return orig_attr
def call_begin(self, attr):
pass
def call_end(self, attr):
pass
def __enter__(self):
self.call_begin("__enter__")
self.wrapped_object.__enter__()
self.call_end("__enter__")
def __exit__(self, exc_type, exc_val, exc_tb):
self.call_begin("__exit__")
self.wrapped_object.__exit__(exc_type, exc_val, exc_tb)
def factory_wrapper(fun):
def inner(*args, **kwargs):
obj = fun(*args, **kwargs)
return ObjectWrapper(obj)
return inner
def wrap_threads():
# TODO: add wrappers for thread and _thread
# import _thread as mod
# print("Thread imported")
# mod.start_new_thread = wrapper(mod.start_new_thread)
threading.Lock = factory_wrapper(threading.Lock)
threading.RLock = factory_wrapper(threading.RLock)
# queue patching
import queue # @UnresolvedImport
queue.Queue = factory_wrapper(queue.Queue)
| 2,039 | Python | 23.285714 | 81 | 0.57332 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/_debug_adapter/__main__pydevd_gen_debug_adapter_protocol.py | '''
Run this module to regenerate the `pydevd_schema.py` file.
Note that it'll generate it based on the current debugProtocol.json. Erase it and rerun
to download the latest version.
'''
def is_variable_to_translate(cls_name, var_name):
if var_name in ('variablesReference', 'frameId', 'threadId'):
return True
if cls_name == 'StackFrame' and var_name == 'id':
# It's frameId everywhere except on StackFrame.
return True
if cls_name == 'Thread' and var_name == 'id':
# It's threadId everywhere except on Thread.
return True
return False
def _get_noqa_for_var(prop_name):
return ' # noqa (assign to builtin)' if prop_name in ('type', 'format', 'id', 'hex', 'breakpoint', 'filter') else ''
class _OrderedSet(object):
# Not a good ordered set (just something to be small without adding any deps)
def __init__(self, initial_contents=None):
self._contents = []
self._contents_as_set = set()
if initial_contents is not None:
for x in initial_contents:
self.add(x)
def add(self, x):
if x not in self._contents_as_set:
self._contents_as_set.add(x)
self._contents.append(x)
def discard(self, x):
if x in self._contents_as_set:
self._contents_as_set.remove(x)
self._contents.remove(x)
def copy(self):
return _OrderedSet(self._contents)
def update(self, contents):
for x in contents:
self.add(x)
def __iter__(self):
return iter(self._contents)
def __contains__(self, item):
return item in self._contents_as_set
def __len__(self):
return len(self._contents)
def set_repr(self):
if len(self) == 0:
return 'set()'
lst = [repr(x) for x in self]
return 'set([' + ', '.join(lst) + '])'
class Ref(object):
def __init__(self, ref, ref_data):
self.ref = ref
self.ref_data = ref_data
def __str__(self):
return self.ref
def load_schema_data():
import os.path
import json
json_file = os.path.join(os.path.dirname(__file__), 'debugProtocol.json')
if not os.path.exists(json_file):
import requests
req = requests.get('https://raw.githubusercontent.com/microsoft/debug-adapter-protocol/gh-pages/debugAdapterProtocol.json')
assert req.status_code == 200
with open(json_file, 'wb') as stream:
stream.write(req.content)
with open(json_file, 'rb') as json_contents:
json_schema_data = json.loads(json_contents.read())
return json_schema_data
def load_custom_schema_data():
import os.path
import json
json_file = os.path.join(os.path.dirname(__file__), 'debugProtocolCustom.json')
with open(json_file, 'rb') as json_contents:
json_schema_data = json.loads(json_contents.read())
return json_schema_data
def create_classes_to_generate_structure(json_schema_data):
definitions = json_schema_data['definitions']
class_to_generatees = {}
for name, definition in definitions.items():
all_of = definition.get('allOf')
description = definition.get('description')
is_enum = definition.get('type') == 'string' and 'enum' in definition
enum_values = None
if is_enum:
enum_values = definition['enum']
properties = {}
properties.update(definition.get('properties', {}))
required = _OrderedSet(definition.get('required', _OrderedSet()))
base_definitions = []
if all_of is not None:
for definition in all_of:
ref = definition.get('$ref')
if ref is not None:
assert ref.startswith('#/definitions/')
ref = ref[len('#/definitions/'):]
base_definitions.append(ref)
else:
if not description:
description = definition.get('description')
properties.update(definition.get('properties', {}))
required.update(_OrderedSet(definition.get('required', _OrderedSet())))
if isinstance(description, (list, tuple)):
description = '\n'.join(description)
if name == 'ModulesRequest': # Hack to accept modules request without arguments (ptvsd: 2050).
required.discard('arguments')
class_to_generatees[name] = dict(
name=name,
properties=properties,
base_definitions=base_definitions,
description=description,
required=required,
is_enum=is_enum,
enum_values=enum_values
)
return class_to_generatees
def collect_bases(curr_class, classes_to_generate, memo=None):
ret = []
if memo is None:
memo = {}
base_definitions = curr_class['base_definitions']
for base_definition in base_definitions:
if base_definition not in memo:
ret.append(base_definition)
ret.extend(collect_bases(classes_to_generate[base_definition], classes_to_generate, memo))
return ret
def fill_properties_and_required_from_base(classes_to_generate):
# Now, resolve properties based on refs
for class_to_generate in classes_to_generate.values():
dct = {}
s = _OrderedSet()
for base_definition in reversed(collect_bases(class_to_generate, classes_to_generate)):
# Note: go from base to current so that the initial order of the properties has that
# same order.
dct.update(classes_to_generate[base_definition].get('properties', {}))
s.update(classes_to_generate[base_definition].get('required', _OrderedSet()))
dct.update(class_to_generate['properties'])
class_to_generate['properties'] = dct
s.update(class_to_generate['required'])
class_to_generate['required'] = s
return class_to_generate
def update_class_to_generate_description(class_to_generate):
import textwrap
description = class_to_generate['description']
lines = []
for line in description.splitlines():
wrapped = textwrap.wrap(line.strip(), 100)
lines.extend(wrapped)
lines.append('')
while lines and lines[-1] == '':
lines = lines[:-1]
class_to_generate['description'] = ' ' + ('\n '.join(lines))
def update_class_to_generate_type(classes_to_generate, class_to_generate):
properties = class_to_generate.get('properties')
for _prop_name, prop_val in properties.items():
prop_type = prop_val.get('type', '')
if not prop_type:
prop_type = prop_val.pop('$ref', '')
if prop_type:
assert prop_type.startswith('#/definitions/')
prop_type = prop_type[len('#/definitions/'):]
prop_val['type'] = Ref(prop_type, classes_to_generate[prop_type])
def update_class_to_generate_register_dec(classes_to_generate, class_to_generate):
# Default
class_to_generate['register_request'] = ''
class_to_generate['register_dec'] = '@register'
properties = class_to_generate.get('properties')
enum_type = properties.get('type', {}).get('enum')
command = None
event = None
if enum_type and len(enum_type) == 1 and next(iter(enum_type)) in ("request", "response", "event"):
msg_type = next(iter(enum_type))
if msg_type == 'response':
# The actual command is typed in the request
response_name = class_to_generate['name']
request_name = response_name[:-len('Response')] + 'Request'
if request_name in classes_to_generate:
command = classes_to_generate[request_name]['properties'].get('command')
else:
if response_name == 'ErrorResponse':
command = {'enum': ['error']}
else:
raise AssertionError('Unhandled: %s' % (response_name,))
elif msg_type == 'request':
command = properties.get('command')
elif msg_type == 'event':
command = properties.get('event')
else:
raise AssertionError('Unexpected condition.')
if command:
enum = command.get('enum')
if enum and len(enum) == 1:
class_to_generate['register_request'] = '@register_%s(%r)\n' % (msg_type, enum[0])
def extract_prop_name_and_prop(class_to_generate):
properties = class_to_generate.get('properties')
required = _OrderedSet(class_to_generate.get('required', _OrderedSet()))
# Sort so that required come first
prop_name_and_prop = list(properties.items())
def compute_sort_key(x):
key = x[0]
if key in required:
if key == 'seq':
return 0.5 # seq when required is after the other required keys (to have a default of -1).
return 0
return 1
prop_name_and_prop.sort(key=compute_sort_key)
return prop_name_and_prop
def update_class_to_generate_to_json(class_to_generate):
required = _OrderedSet(class_to_generate.get('required', _OrderedSet()))
prop_name_and_prop = extract_prop_name_and_prop(class_to_generate)
to_dict_body = ['def to_dict(self, update_ids_to_dap=False): # noqa (update_ids_to_dap may be unused)']
translate_prop_names = []
for prop_name, prop in prop_name_and_prop:
if is_variable_to_translate(class_to_generate['name'], prop_name):
translate_prop_names.append(prop_name)
for prop_name, prop in prop_name_and_prop:
namespace = dict(prop_name=prop_name, noqa=_get_noqa_for_var(prop_name))
to_dict_body.append(' %(prop_name)s = self.%(prop_name)s%(noqa)s' % namespace)
if prop.get('type') == 'array':
to_dict_body.append(' if %(prop_name)s and hasattr(%(prop_name)s[0], "to_dict"):' % namespace)
to_dict_body.append(' %(prop_name)s = [x.to_dict() for x in %(prop_name)s]' % namespace)
if translate_prop_names:
to_dict_body.append(' if update_ids_to_dap:')
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name, noqa=_get_noqa_for_var(prop_name))
to_dict_body.append(' if %(prop_name)s is not None:' % namespace)
to_dict_body.append(' %(prop_name)s = self._translate_id_to_dap(%(prop_name)s)%(noqa)s' % namespace)
if not translate_prop_names:
update_dict_ids_from_dap_body = []
else:
update_dict_ids_from_dap_body = ['', '', '@classmethod', 'def update_dict_ids_from_dap(cls, dct):']
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name)
update_dict_ids_from_dap_body.append(' if %(prop_name)r in dct:' % namespace)
update_dict_ids_from_dap_body.append(' dct[%(prop_name)r] = cls._translate_id_from_dap(dct[%(prop_name)r])' % namespace)
update_dict_ids_from_dap_body.append(' return dct')
class_to_generate['update_dict_ids_from_dap'] = _indent_lines('\n'.join(update_dict_ids_from_dap_body))
to_dict_body.append(' dct = {')
first_not_required = False
for prop_name, prop in prop_name_and_prop:
use_to_dict = prop['type'].__class__ == Ref and not prop['type'].ref_data.get('is_enum', False)
is_array = prop['type'] == 'array'
ref_array_cls_name = ''
if is_array:
ref = prop['items'].get('$ref')
if ref is not None:
ref_array_cls_name = ref.split('/')[-1]
namespace = dict(prop_name=prop_name, ref_array_cls_name=ref_array_cls_name)
if prop_name in required:
if use_to_dict:
to_dict_body.append(' %(prop_name)r: %(prop_name)s.to_dict(update_ids_to_dap=update_ids_to_dap),' % namespace)
else:
if ref_array_cls_name:
to_dict_body.append(' %(prop_name)r: [%(ref_array_cls_name)s.update_dict_ids_to_dap(o) for o in %(prop_name)s] if (update_ids_to_dap and %(prop_name)s) else %(prop_name)s,' % namespace)
else:
to_dict_body.append(' %(prop_name)r: %(prop_name)s,' % namespace)
else:
if not first_not_required:
first_not_required = True
to_dict_body.append(' }')
to_dict_body.append(' if %(prop_name)s is not None:' % namespace)
if use_to_dict:
to_dict_body.append(' dct[%(prop_name)r] = %(prop_name)s.to_dict(update_ids_to_dap=update_ids_to_dap)' % namespace)
else:
if ref_array_cls_name:
to_dict_body.append(' dct[%(prop_name)r] = [%(ref_array_cls_name)s.update_dict_ids_to_dap(o) for o in %(prop_name)s] if (update_ids_to_dap and %(prop_name)s) else %(prop_name)s' % namespace)
else:
to_dict_body.append(' dct[%(prop_name)r] = %(prop_name)s' % namespace)
if not first_not_required:
first_not_required = True
to_dict_body.append(' }')
to_dict_body.append(' dct.update(self.kwargs)')
to_dict_body.append(' return dct')
class_to_generate['to_dict'] = _indent_lines('\n'.join(to_dict_body))
if not translate_prop_names:
update_dict_ids_to_dap_body = []
else:
update_dict_ids_to_dap_body = ['', '', '@classmethod', 'def update_dict_ids_to_dap(cls, dct):']
for prop_name in translate_prop_names:
namespace = dict(prop_name=prop_name)
update_dict_ids_to_dap_body.append(' if %(prop_name)r in dct:' % namespace)
update_dict_ids_to_dap_body.append(' dct[%(prop_name)r] = cls._translate_id_to_dap(dct[%(prop_name)r])' % namespace)
update_dict_ids_to_dap_body.append(' return dct')
class_to_generate['update_dict_ids_to_dap'] = _indent_lines('\n'.join(update_dict_ids_to_dap_body))
def update_class_to_generate_init(class_to_generate):
args = []
init_body = []
docstring = []
required = _OrderedSet(class_to_generate.get('required', _OrderedSet()))
prop_name_and_prop = extract_prop_name_and_prop(class_to_generate)
translate_prop_names = []
for prop_name, prop in prop_name_and_prop:
if is_variable_to_translate(class_to_generate['name'], prop_name):
translate_prop_names.append(prop_name)
enum = prop.get('enum')
if enum and len(enum) == 1:
init_body.append(' self.%(prop_name)s = %(enum)r' % dict(prop_name=prop_name, enum=next(iter(enum))))
else:
if prop_name in required:
if prop_name == 'seq':
args.append(prop_name + '=-1')
else:
args.append(prop_name)
else:
args.append(prop_name + '=None')
if prop['type'].__class__ == Ref:
ref = prop['type']
ref_data = ref.ref_data
if ref_data.get('is_enum', False):
init_body.append(' if %s is not None:' % (prop_name,))
init_body.append(' assert %s in %s.VALID_VALUES' % (prop_name, str(ref)))
init_body.append(' self.%(prop_name)s = %(prop_name)s' % dict(
prop_name=prop_name))
else:
namespace = dict(
prop_name=prop_name,
ref_name=str(ref)
)
init_body.append(' if %(prop_name)s is None:' % namespace)
init_body.append(' self.%(prop_name)s = %(ref_name)s()' % namespace)
init_body.append(' else:')
init_body.append(' self.%(prop_name)s = %(ref_name)s(update_ids_from_dap=update_ids_from_dap, **%(prop_name)s) if %(prop_name)s.__class__ != %(ref_name)s else %(prop_name)s' % namespace
)
else:
init_body.append(' self.%(prop_name)s = %(prop_name)s' % dict(prop_name=prop_name))
if prop['type'] == 'array':
ref = prop['items'].get('$ref')
if ref is not None:
ref_array_cls_name = ref.split('/')[-1]
init_body.append(' if update_ids_from_dap and self.%(prop_name)s:' % dict(prop_name=prop_name))
init_body.append(' for o in self.%(prop_name)s:' % dict(prop_name=prop_name))
init_body.append(' %(ref_array_cls_name)s.update_dict_ids_from_dap(o)' % dict(ref_array_cls_name=ref_array_cls_name))
prop_type = prop['type']
prop_description = prop.get('description', '')
if isinstance(prop_description, (list, tuple)):
prop_description = '\n '.join(prop_description)
docstring.append(':param %(prop_type)s %(prop_name)s: %(prop_description)s' % dict(
prop_type=prop_type, prop_name=prop_name, prop_description=prop_description))
if translate_prop_names:
init_body.append(' if update_ids_from_dap:')
for prop_name in translate_prop_names:
init_body.append(' self.%(prop_name)s = self._translate_id_from_dap(self.%(prop_name)s)' % dict(prop_name=prop_name))
docstring = _indent_lines('\n'.join(docstring))
init_body = '\n'.join(init_body)
# Actually bundle the whole __init__ from the parts.
args = ', '.join(args)
if args:
args = ', ' + args
# Note: added kwargs because some messages are expected to be extended by the user (so, we'll actually
# make all extendable so that we don't have to worry about which ones -- we loose a little on typing,
# but may be better than doing a allow list based on something only pointed out in the documentation).
class_to_generate['init'] = '''def __init__(self%(args)s, update_ids_from_dap=False, **kwargs): # noqa (update_ids_from_dap may be unused)
"""
%(docstring)s
"""
%(init_body)s
self.kwargs = kwargs
''' % dict(args=args, init_body=init_body, docstring=docstring)
class_to_generate['init'] = _indent_lines(class_to_generate['init'])
def update_class_to_generate_props(class_to_generate):
import json
def default(o):
if isinstance(o, Ref):
return o.ref
raise AssertionError('Unhandled: %s' % (o,))
properties = class_to_generate['properties']
class_to_generate['props'] = ' __props__ = %s' % _indent_lines(
json.dumps(properties, indent=4, default=default)).strip()
def update_class_to_generate_refs(class_to_generate):
properties = class_to_generate['properties']
class_to_generate['refs'] = ' __refs__ = %s' % _OrderedSet(
key for (key, val) in properties.items() if val['type'].__class__ == Ref).set_repr()
def update_class_to_generate_enums(class_to_generate):
class_to_generate['enums'] = ''
if class_to_generate.get('is_enum', False):
enums = ''
for enum in class_to_generate['enum_values']:
enums += ' %s = %r\n' % (enum.upper(), enum)
enums += '\n'
enums += ' VALID_VALUES = %s\n\n' % _OrderedSet(class_to_generate['enum_values']).set_repr()
class_to_generate['enums'] = enums
def update_class_to_generate_objects(classes_to_generate, class_to_generate):
properties = class_to_generate['properties']
for key, val in properties.items():
if 'type' not in val:
val['type'] = 'TypeNA'
continue
if val['type'] == 'object':
create_new = val.copy()
create_new.update({
'name': '%s%s' % (class_to_generate['name'], key.title()),
'description': ' "%s" of %s' % (key, class_to_generate['name'])
})
if 'properties' not in create_new:
create_new['properties'] = {}
assert create_new['name'] not in classes_to_generate
classes_to_generate[create_new['name']] = create_new
update_class_to_generate_type(classes_to_generate, create_new)
update_class_to_generate_props(create_new)
# Update nested object types
update_class_to_generate_objects(classes_to_generate, create_new)
val['type'] = Ref(create_new['name'], classes_to_generate[create_new['name']])
val.pop('properties', None)
def gen_debugger_protocol():
import os.path
import sys
if sys.version_info[:2] < (3, 6):
raise AssertionError('Must be run with Python 3.6 onwards (to keep dict order).')
classes_to_generate = create_classes_to_generate_structure(load_schema_data())
classes_to_generate.update(create_classes_to_generate_structure(load_custom_schema_data()))
class_to_generate = fill_properties_and_required_from_base(classes_to_generate)
for class_to_generate in list(classes_to_generate.values()):
update_class_to_generate_description(class_to_generate)
update_class_to_generate_type(classes_to_generate, class_to_generate)
update_class_to_generate_props(class_to_generate)
update_class_to_generate_objects(classes_to_generate, class_to_generate)
for class_to_generate in classes_to_generate.values():
update_class_to_generate_refs(class_to_generate)
update_class_to_generate_init(class_to_generate)
update_class_to_generate_enums(class_to_generate)
update_class_to_generate_to_json(class_to_generate)
update_class_to_generate_register_dec(classes_to_generate, class_to_generate)
class_template = '''
%(register_request)s%(register_dec)s
class %(name)s(BaseSchema):
"""
%(description)s
Note: automatically generated code. Do not edit manually.
"""
%(enums)s%(props)s
%(refs)s
__slots__ = list(__props__.keys()) + ['kwargs']
%(init)s%(update_dict_ids_from_dap)s
%(to_dict)s%(update_dict_ids_to_dap)s
'''
contents = []
contents.append('# coding: utf-8')
contents.append('# Automatically generated code.')
contents.append('# Do not edit manually.')
contents.append('# Generated by running: %s' % os.path.basename(__file__))
contents.append('from .pydevd_base_schema import BaseSchema, register, register_request, register_response, register_event')
contents.append('')
for class_to_generate in classes_to_generate.values():
contents.append(class_template % class_to_generate)
parent_dir = os.path.dirname(__file__)
schema = os.path.join(parent_dir, 'pydevd_schema.py')
with open(schema, 'w', encoding='utf-8') as stream:
stream.write('\n'.join(contents))
def _indent_lines(lines, indent=' '):
out_lines = []
for line in lines.splitlines(keepends=True):
out_lines.append(indent + line)
return ''.join(out_lines)
if __name__ == '__main__':
gen_debugger_protocol()
| 23,085 | Python | 37.93086 | 217 | 0.586788 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/_debug_adapter/pydevd_base_schema.py | from _pydevd_bundle._debug_adapter.pydevd_schema_log import debug_exception
import json
import itertools
from functools import partial
class BaseSchema(object):
@staticmethod
def initialize_ids_translation():
BaseSchema._dap_id_to_obj_id = {0:0, None:None}
BaseSchema._obj_id_to_dap_id = {0:0, None:None}
BaseSchema._next_dap_id = partial(next, itertools.count(1))
def to_json(self):
return json.dumps(self.to_dict())
@staticmethod
def _translate_id_to_dap(obj_id):
if obj_id == '*':
return '*'
# Note: we don't invalidate ids, so, if some object starts using the same id
# of another object, the same id will be used.
dap_id = BaseSchema._obj_id_to_dap_id.get(obj_id)
if dap_id is None:
dap_id = BaseSchema._obj_id_to_dap_id[obj_id] = BaseSchema._next_dap_id()
BaseSchema._dap_id_to_obj_id[dap_id] = obj_id
return dap_id
@staticmethod
def _translate_id_from_dap(dap_id):
if dap_id == '*':
return '*'
try:
return BaseSchema._dap_id_to_obj_id[dap_id]
except:
raise KeyError('Wrong ID sent from the client: %s' % (dap_id,))
@staticmethod
def update_dict_ids_to_dap(dct):
return dct
@staticmethod
def update_dict_ids_from_dap(dct):
return dct
BaseSchema.initialize_ids_translation()
_requests_to_types = {}
_responses_to_types = {}
_event_to_types = {}
_all_messages = {}
def register(cls):
_all_messages[cls.__name__] = cls
return cls
def register_request(command):
def do_register(cls):
_requests_to_types[command] = cls
return cls
return do_register
def register_response(command):
def do_register(cls):
_responses_to_types[command] = cls
return cls
return do_register
def register_event(event):
def do_register(cls):
_event_to_types[event] = cls
return cls
return do_register
def from_dict(dct, update_ids_from_dap=False):
msg_type = dct.get('type')
if msg_type is None:
raise ValueError('Unable to make sense of message: %s' % (dct,))
if msg_type == 'request':
to_type = _requests_to_types
use = dct['command']
elif msg_type == 'response':
to_type = _responses_to_types
use = dct['command']
else:
to_type = _event_to_types
use = dct['event']
cls = to_type.get(use)
if cls is None:
raise ValueError('Unable to create message from dict: %s. %s not in %s' % (dct, use, sorted(to_type.keys())))
try:
return cls(update_ids_from_dap=update_ids_from_dap, **dct)
except:
msg = 'Error creating %s from %s' % (cls, dct)
debug_exception(msg)
raise
def from_json(json_msg, update_ids_from_dap=False, on_dict_loaded=lambda dct:None):
if isinstance(json_msg, bytes):
json_msg = json_msg.decode('utf-8')
as_dict = json.loads(json_msg)
on_dict_loaded(as_dict)
try:
return from_dict(as_dict, update_ids_from_dap=update_ids_from_dap)
except:
if as_dict.get('type') == 'response' and not as_dict.get('success'):
# Error messages may not have required body (return as a generic Response).
Response = _all_messages['Response']
return Response(**as_dict)
else:
raise
def get_response_class(request):
if request.__class__ == dict:
return _responses_to_types[request['command']]
return _responses_to_types[request.command]
def build_response(request, kwargs=None):
if kwargs is None:
kwargs = {'success':True}
else:
if 'success' not in kwargs:
kwargs['success'] = True
response_class = _responses_to_types[request.command]
kwargs.setdefault('seq', -1) # To be overwritten before sending
return response_class(command=request.command, request_seq=request.seq, **kwargs)
| 3,998 | Python | 26.02027 | 117 | 0.608304 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/_pydevd_bundle/_debug_adapter/pydevd_schema_log.py | import os
import traceback
from _pydevd_bundle.pydevd_constants import ForkSafeLock
_pid = os.getpid()
_pid_msg = '%s: ' % (_pid,)
_debug_lock = ForkSafeLock()
DEBUG = False
DEBUG_FILE = os.path.join(os.path.dirname(__file__), '__debug_output__.txt')
def debug(msg):
if DEBUG:
with _debug_lock:
_pid_prefix = _pid_msg
if isinstance(msg, bytes):
_pid_prefix = _pid_prefix.encode('utf-8')
if not msg.endswith(b'\r') and not msg.endswith(b'\n'):
msg += b'\n'
mode = 'a+b'
else:
if not msg.endswith('\r') and not msg.endswith('\n'):
msg += '\n'
mode = 'a+'
with open(DEBUG_FILE, mode) as stream:
stream.write(_pid_prefix)
stream.write(msg)
def debug_exception(msg=None):
if DEBUG:
if msg:
debug(msg)
with _debug_lock:
with open(DEBUG_FILE, 'a+') as stream:
_pid_prefix = _pid_msg
if isinstance(msg, bytes):
_pid_prefix = _pid_prefix.encode('utf-8')
stream.write(_pid_prefix)
traceback.print_exc(file=stream)
| 1,255 | Python | 25.723404 | 76 | 0.494024 |
omniverse-code/kit/exts/omni.kit.debug.python/debugpy/_vendored/pydevd/pydev_sitecustomize/sitecustomize.py | '''
This module will:
- change the input() and raw_input() commands to change \r\n or \r into \n
- execute the user site customize -- if available
- change raw_input() and input() to also remove any trailing \r
Up to PyDev 3.4 it also was setting the default encoding, but it was removed because of differences when
running from a shell (i.e.: now we just set the PYTHONIOENCODING related to that -- which is properly
treated on Py 2.7 onwards).
'''
DEBUG = 0 #0 or 1 because of jython
import sys
encoding = None
IS_PYTHON_3_ONWARDS = 0
try:
IS_PYTHON_3_ONWARDS = sys.version_info[0] >= 3
except:
#That's OK, not all versions of python have sys.version_info
if DEBUG:
import traceback;traceback.print_exc() #@Reimport
#-----------------------------------------------------------------------------------------------------------------------
#Line buffering
if IS_PYTHON_3_ONWARDS:
#Python 3 has a bug (http://bugs.python.org/issue4705) in which -u doesn't properly make output/input unbuffered
#so, we need to enable that ourselves here.
try:
sys.stdout._line_buffering = True
except:
pass
try:
sys.stderr._line_buffering = True
except:
pass
try:
sys.stdin._line_buffering = True
except:
pass
try:
import org.python.core.PyDictionary #@UnresolvedImport @UnusedImport -- just to check if it could be valid
def dict_contains(d, key):
return d.has_key(key)
except:
try:
#Py3k does not have has_key anymore, and older versions don't have __contains__
dict_contains = dict.__contains__
except:
try:
dict_contains = dict.has_key
except NameError:
def dict_contains(d, key):
return d.has_key(key)
def install_breakpointhook():
def custom_sitecustomize_breakpointhook(*args, **kwargs):
import os
hookname = os.getenv('PYTHONBREAKPOINT')
if (
hookname is not None
and len(hookname) > 0
and hasattr(sys, '__breakpointhook__')
and sys.__breakpointhook__ != custom_sitecustomize_breakpointhook
):
sys.__breakpointhook__(*args, **kwargs)
else:
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
import pydevd
kwargs.setdefault('stop_at_frame', sys._getframe().f_back)
pydevd.settrace(*args, **kwargs)
if sys.version_info[0:2] >= (3, 7):
# There are some choices on how to provide the breakpoint hook. Namely, we can provide a
# PYTHONBREAKPOINT which provides the import path for a method to be executed or we
# can override sys.breakpointhook.
# pydevd overrides sys.breakpointhook instead of providing an environment variable because
# it's possible that the debugger starts the user program but is not available in the
# PYTHONPATH (and would thus fail to be imported if PYTHONBREAKPOINT was set to pydevd.settrace).
# Note that the implementation still takes PYTHONBREAKPOINT in account (so, if it was provided
# by someone else, it'd still work).
sys.breakpointhook = custom_sitecustomize_breakpointhook
else:
if sys.version_info[0] >= 3:
import builtins as __builtin__ # Py3
else:
import __builtin__
# In older versions, breakpoint() isn't really available, so, install the hook directly
# in the builtins.
__builtin__.breakpoint = custom_sitecustomize_breakpointhook
sys.__breakpointhook__ = custom_sitecustomize_breakpointhook
# Install the breakpoint hook at import time.
install_breakpointhook()
#-----------------------------------------------------------------------------------------------------------------------
#now that we've finished the needed pydev sitecustomize, let's run the default one (if available)
#Ok, some weirdness going on in Python 3k: when removing this module from the sys.module to import the 'real'
#sitecustomize, all the variables in this scope become None (as if it was garbage-collected), so, the the reference
#below is now being kept to create a cyclic reference so that it neven dies)
__pydev_sitecustomize_module__ = sys.modules.get('sitecustomize') #A ref to this module
#remove the pydev site customize (and the pythonpath for it)
paths_removed = []
try:
for c in sys.path[:]:
#Pydev controls the whole classpath in Jython already, so, we don't want a a duplicate for
#what we've already added there (this is needed to support Jython 2.5b1 onwards -- otherwise, as
#we added the sitecustomize to the pythonpath and to the classpath, we'd have to remove it from the
#classpath too -- and I don't think there's a way to do that... or not?)
if c.find('pydev_sitecustomize') != -1 or c == '__classpath__' or c == '__pyclasspath__' or \
c == '__classpath__/' or c == '__pyclasspath__/' or c == '__classpath__\\' or c == '__pyclasspath__\\':
sys.path.remove(c)
if c.find('pydev_sitecustomize') == -1:
#We'll re-add any paths removed but the pydev_sitecustomize we added from pydev.
paths_removed.append(c)
if dict_contains(sys.modules, 'sitecustomize'):
del sys.modules['sitecustomize'] #this module
except:
#print the error... should never happen (so, always show, and not only on debug)!
import traceback;traceback.print_exc() #@Reimport
else:
#Now, execute the default sitecustomize
try:
import sitecustomize #@UnusedImport
sitecustomize.__pydev_sitecustomize_module__ = __pydev_sitecustomize_module__
except:
pass
if not dict_contains(sys.modules, 'sitecustomize'):
#If there was no sitecustomize, re-add the pydev sitecustomize (pypy gives a KeyError if it's not there)
sys.modules['sitecustomize'] = __pydev_sitecustomize_module__
try:
if paths_removed:
if sys is None:
import sys
if sys is not None:
#And after executing the default sitecustomize, restore the paths (if we didn't remove it before,
#the import sitecustomize would recurse).
sys.path.extend(paths_removed)
except:
#print the error... should never happen (so, always show, and not only on debug)!
import traceback;traceback.print_exc() #@Reimport
if sys.version_info[0] < 3:
try:
#Redefine input and raw_input only after the original sitecustomize was executed
#(because otherwise, the original raw_input and input would still not be defined)
import __builtin__
original_raw_input = __builtin__.raw_input
original_input = __builtin__.input
def raw_input(prompt=''):
#the original raw_input would only remove a trailing \n, so, at
#this point if we had a \r\n the \r would remain (which is valid for eclipse)
#so, let's remove the remaining \r which python didn't expect.
ret = original_raw_input(prompt)
if ret.endswith('\r'):
return ret[:-1]
return ret
raw_input.__doc__ = original_raw_input.__doc__
def input(prompt=''):
#input must also be rebinded for using the new raw_input defined
return eval(raw_input(prompt))
input.__doc__ = original_input.__doc__
__builtin__.raw_input = raw_input
__builtin__.input = input
except:
#Don't report errors at this stage
if DEBUG:
import traceback;traceback.print_exc() #@Reimport
else:
try:
import builtins #Python 3.0 does not have the __builtin__ module @UnresolvedImport
original_input = builtins.input
def input(prompt=''):
#the original input would only remove a trailing \n, so, at
#this point if we had a \r\n the \r would remain (which is valid for eclipse)
#so, let's remove the remaining \r which python didn't expect.
ret = original_input(prompt)
if ret.endswith('\r'):
return ret[:-1]
return ret
input.__doc__ = original_input.__doc__
builtins.input = input
except:
#Don't report errors at this stage
if DEBUG:
import traceback;traceback.print_exc() #@Reimport
try:
#The original getpass doesn't work from the eclipse console, so, let's put a replacement
#here (note that it'll not go into echo mode in the console, so, what' the user writes
#will actually be seen)
#Note: same thing from the fix_getpass module -- but we don't want to import it in this
#custom sitecustomize.
def fix_get_pass():
try:
import getpass
except ImportError:
return #If we can't import it, we can't fix it
import warnings
fallback = getattr(getpass, 'fallback_getpass', None) # >= 2.6
if not fallback:
fallback = getpass.default_getpass # <= 2.5
getpass.getpass = fallback
if hasattr(getpass, 'GetPassWarning'):
warnings.simplefilter("ignore", category=getpass.GetPassWarning)
fix_get_pass()
except:
#Don't report errors at this stage
if DEBUG:
import traceback;traceback.print_exc() #@Reimport
| 9,473 | Python | 38.806723 | 120 | 0.616172 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.