text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Return a list of all mouse events in the current drag operation.
<END_TASK>
<USER_TASK:>
Description:
def drag_events(self):
""" Return a list of all mouse events in the current drag operation.
Returns None if there is no current drag operation.
""" |
if not self.is_dragging:
return None
event = self
events = []
while True:
# mouse_press events can only be the start of a trail
if event is None or event.type == 'mouse_press':
break
events.append(event)
event = event.last_event
return events[::-1] |
<SYSTEM_TASK:>
Set the maximum width of the widget.
<END_TASK>
<USER_TASK:>
Description:
def width_max(self, width_max):
"""Set the maximum width of the widget.
Parameters
----------
width_max: None | float
the maximum width of the widget. if None, maximum width
is unbounded
""" |
if width_max is None:
self._width_limits[1] = None
return
width_max = float(width_max)
assert(self.width_min <= width_max)
self._width_limits[1] = width_max
self._update_layout() |
<SYSTEM_TASK:>
Set the maximum height of the widget.
<END_TASK>
<USER_TASK:>
Description:
def height_max(self, height_max):
"""Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
""" |
if height_max is None:
self._height_limits[1] = None
return
height_max = float(height_max)
assert(0 <= self.height_min <= height_max)
self._height_limits[1] = height_max
self._update_layout() |
<SYSTEM_TASK:>
The rectangular area inside the margin, border, and padding.
<END_TASK>
<USER_TASK:>
Description:
def inner_rect(self):
"""The rectangular area inside the margin, border, and padding.
Generally widgets should avoid drawing or placing sub-widgets outside
this rectangle.
""" |
m = self.margin + self._border_width + self.padding
if not self.border_color.is_blank:
m += 1
return Rect((m, m), (self.size[0]-2*m, self.size[1]-2*m)) |
<SYSTEM_TASK:>
Called whenever the clipper for this widget may need to be updated.
<END_TASK>
<USER_TASK:>
Description:
def _update_clipper(self):
"""Called whenever the clipper for this widget may need to be updated.
""" |
if self.clip_children and self._clipper is None:
self._clipper = Clipper()
elif not self.clip_children:
self._clipper = None
if self._clipper is None:
return
self._clipper.rect = self.inner_rect
self._clipper.transform = self.get_transform('framebuffer', 'visual') |
<SYSTEM_TASK:>
Add a Widget as a managed child of this Widget.
<END_TASK>
<USER_TASK:>
Description:
def add_widget(self, widget):
"""
Add a Widget as a managed child of this Widget.
The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
Parameters
----------
widget : instance of Widget
The widget to add.
Returns
-------
widget : instance of Widget
The widget.
""" |
self._widgets.append(widget)
widget.parent = self
self._update_child_widgets()
return widget |
<SYSTEM_TASK:>
Remove a Widget as a managed child of this Widget.
<END_TASK>
<USER_TASK:>
Description:
def remove_widget(self, widget):
"""
Remove a Widget as a managed child of this Widget.
Parameters
----------
widget : instance of Widget
The widget to remove.
""" |
self._widgets.remove(widget)
widget.parent = None
self._update_child_widgets() |
<SYSTEM_TASK:>
Packs float ieee binary representation into 4 unsigned int8
<END_TASK>
<USER_TASK:>
Description:
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
""" |
return np.fromstring(value.tostring(),
np.ubyte).reshape((value.shape + (4,))) |
<SYSTEM_TASK:>
Load spatial-filters kernel
<END_TASK>
<USER_TASK:>
Description:
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
""" |
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names |
<SYSTEM_TASK:>
A decorator ensuring that the decorated function tun time does not
<END_TASK>
<USER_TASK:>
Description:
def timeout(limit, handler):
"""A decorator ensuring that the decorated function tun time does not
exceeds the argument limit.
:args limit: the time limit
:type limit: int
:args handler: the handler function called when the decorated
function times out.
:type handler: callable
Example:
>>>def timeout_handler(limit, f, *args, **kwargs):
... print "{func} call timed out after {lim}s.".format(
... func=f.__name__, lim=limit)
...
>>>@timeout(limit=5, handler=timeout_handler)
... def work(foo, bar, baz="spam")
... time.sleep(10)
>>>work("foo", "bar", "baz")
# time passes...
work call timed out after 5s.
>>>
""" |
def wrapper(f):
def wrapped_f(*args, **kwargs):
old_handler = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(limit)
try:
res = f(*args, **kwargs)
except Timeout:
handler(limit, f, args, kwargs)
else:
return res
finally:
signal.signal(signal.SIGALRM, old_handler)
signal.alarm(0)
return wrapped_f
return wrapper |
<SYSTEM_TASK:>
Simple utility to retrieve kwargs in predetermined order.
<END_TASK>
<USER_TASK:>
Description:
def _process_backend_kwargs(self, kwargs):
""" Simple utility to retrieve kwargs in predetermined order.
Also checks whether the values of the backend arguments do not
violate the backend capabilities.
""" |
# Verify given argument with capability of the backend
app = self._vispy_canvas.app
capability = app.backend_module.capability
if kwargs['context'].shared.name: # name already assigned: shared
if not capability['context']:
raise RuntimeError('Cannot share context with this backend')
for key in [key for (key, val) in capability.items() if not val]:
if key in ['context', 'multi_window', 'scroll']:
continue
invert = key in ['resizable', 'decorate']
if bool(kwargs[key]) - invert:
raise RuntimeError('Config %s is not supported by backend %s'
% (key, app.backend_name))
# Return items in sequence
out = SimpleBunch()
keys = ['title', 'size', 'position', 'show', 'vsync', 'resizable',
'decorate', 'fullscreen', 'parent', 'context', 'always_on_top',
]
for key in keys:
out[key] = kwargs[key]
return out |
<SYSTEM_TASK:>
ViewBox key event handler
<END_TASK>
<USER_TASK:>
Description:
def viewbox_key_event(self, event):
"""ViewBox key event handler
Parameters
----------
event : instance of Event
The event.
""" |
PerspectiveCamera.viewbox_key_event(self, event)
if event.handled or not self.interactive:
return
# Ensure the timer runs
if not self._timer.running:
self._timer.start()
if event.key in self._keymap:
val_dims = self._keymap[event.key]
val = val_dims[0]
# Brake or accelarate?
if val == 0:
vec = self._brake
val = 1
else:
vec = self._acc
# Set
if event.type == 'key_release':
val = 0
for dim in val_dims[1:]:
factor = 1.0
vec[dim-1] = val * factor |
<SYSTEM_TASK:>
Set PyOS_InputHook to NULL and return the previous one.
<END_TASK>
<USER_TASK:>
Description:
def clear_inputhook(self, app=None):
"""Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`.""" |
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
allow_CTRL_C()
self._reset()
return original |
<SYSTEM_TASK:>
Make a canvas active. Used primarily by the canvas itself.
<END_TASK>
<USER_TASK:>
Description:
def set_current_canvas(canvas):
""" Make a canvas active. Used primarily by the canvas itself.
""" |
# Notify glir
canvas.context._do_CURRENT_command = True
# Try to be quick
if canvasses and canvasses[-1]() is canvas:
return
# Make this the current
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
cc.append(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] |
<SYSTEM_TASK:>
Forget about the given canvas. Used by the canvas when closed.
<END_TASK>
<USER_TASK:>
Description:
def forget_canvas(canvas):
""" Forget about the given canvas. Used by the canvas when closed.
""" |
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] |
<SYSTEM_TASK:>
For the app backends to create the GLShared object.
<END_TASK>
<USER_TASK:>
Description:
def create_shared(self, name, ref):
""" For the app backends to create the GLShared object.
Parameters
----------
name : str
The name.
ref : object
The reference.
""" |
if self._shared is not None:
raise RuntimeError('Can only set_shared once.')
self._shared = GLShared(name, ref) |
<SYSTEM_TASK:>
Add a reference for the backend object that gives access
<END_TASK>
<USER_TASK:>
Description:
def add_ref(self, name, ref):
""" Add a reference for the backend object that gives access
to the low level context. Used in vispy.app.canvas.backends.
The given name must match with that of previously added
references.
""" |
if self._name is None:
self._name = name
elif name != self._name:
raise RuntimeError('Contexts can only share between backends of '
'the same type')
self._refs.append(weakref.ref(ref)) |
<SYSTEM_TASK:>
Create an exact copy of this quaternion.
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
""" Create an exact copy of this quaternion.
""" |
return Quaternion(self.w, self.x, self.y, self.z, False) |
<SYSTEM_TASK:>
Make the quaternion unit length.
<END_TASK>
<USER_TASK:>
Description:
def _normalize(self):
""" Make the quaternion unit length.
""" |
# Get length
L = self.norm()
if not L:
raise ValueError('Quaternion cannot have 0-length.')
# Correct
self.w /= L
self.x /= L
self.y /= L
self.z /= L |
<SYSTEM_TASK:>
Rotate a Point instance using this quaternion.
<END_TASK>
<USER_TASK:>
Description:
def rotate_point(self, p):
""" Rotate a Point instance using this quaternion.
""" |
# Prepare
p = Quaternion(0, p[0], p[1], p[2], False) # Do not normalize!
q1 = self.normalize()
q2 = self.inverse()
# Apply rotation
r = (q1*p)*q2
# Make point and return
return r.x, r.y, r.z |
<SYSTEM_TASK:>
Create a 4x4 homography matrix that represents the rotation
<END_TASK>
<USER_TASK:>
Description:
def get_matrix(self):
""" Create a 4x4 homography matrix that represents the rotation
of the quaternion.
""" |
# Init matrix (remember, a matrix, not an array)
a = np.zeros((4, 4), dtype=np.float32)
w, x, y, z = self.w, self.x, self.y, self.z
# First row
a[0, 0] = - 2.0 * (y * y + z * z) + 1.0
a[1, 0] = + 2.0 * (x * y + z * w)
a[2, 0] = + 2.0 * (x * z - y * w)
a[3, 0] = 0.0
# Second row
a[0, 1] = + 2.0 * (x * y - z * w)
a[1, 1] = - 2.0 * (x * x + z * z) + 1.0
a[2, 1] = + 2.0 * (z * y + x * w)
a[3, 1] = 0.0
# Third row
a[0, 2] = + 2.0 * (x * z + y * w)
a[1, 2] = + 2.0 * (y * z - x * w)
a[2, 2] = - 2.0 * (x * x + y * y) + 1.0
a[3, 2] = 0.0
# Fourth row
a[0, 3] = 0.0
a[1, 3] = 0.0
a[2, 3] = 0.0
a[3, 3] = 1.0
return a |
<SYSTEM_TASK:>
Classmethod to create a quaternion given the euler angles.
<END_TASK>
<USER_TASK:>
Description:
def create_from_euler_angles(cls, rx, ry, rz, degrees=False):
""" Classmethod to create a quaternion given the euler angles.
""" |
if degrees:
rx, ry, rz = np.radians([rx, ry, rz])
# Obtain quaternions
qx = Quaternion(np.cos(rx/2), 0, 0, np.sin(rx/2))
qy = Quaternion(np.cos(ry/2), 0, np.sin(ry/2), 0)
qz = Quaternion(np.cos(rz/2), np.sin(rz/2), 0, 0)
# Almost done
return qx*qy*qz |
<SYSTEM_TASK:>
Turn a possibly string enum into an integer enum.
<END_TASK>
<USER_TASK:>
Description:
def as_enum(enum):
""" Turn a possibly string enum into an integer enum.
""" |
if isinstance(enum, string_types):
try:
enum = getattr(gl, 'GL_' + enum.upper())
except AttributeError:
try:
enum = _internalformats['GL_' + enum.upper()]
except KeyError:
raise ValueError('Could not find int value for enum %r' % enum)
return enum |
<SYSTEM_TASK:>
Modify shading code so that we can write code once
<END_TASK>
<USER_TASK:>
Description:
def convert_shaders(convert, shaders):
""" Modify shading code so that we can write code once
and make it run "everywhere".
""" |
# New version of the shaders
out = []
if convert == 'es2':
for isfragment, shader in enumerate(shaders):
has_version = False
has_prec_float = False
has_prec_int = False
lines = []
# Iterate over lines
for line in shader.lstrip().splitlines():
if line.startswith('#version'):
has_version = True
continue
if line.startswith('precision '):
has_prec_float = has_prec_float or 'float' in line
has_prec_int = has_prec_int or 'int' in line
lines.append(line.rstrip())
# Write
# BUG: fails on WebGL (Chrome)
# if True:
# lines.insert(has_version, '#line 0')
if not has_prec_float:
lines.insert(has_version, 'precision highp float;')
if not has_prec_int:
lines.insert(has_version, 'precision highp int;')
# BUG: fails on WebGL (Chrome)
# if not has_version:
# lines.insert(has_version, '#version 100')
out.append('\n'.join(lines))
elif convert == 'desktop':
for isfragment, shader in enumerate(shaders):
has_version = False
lines = []
# Iterate over lines
for line in shader.lstrip().splitlines():
has_version = has_version or line.startswith('#version')
if line.startswith('precision '):
line = ''
for prec in (' highp ', ' mediump ', ' lowp '):
line = line.replace(prec, ' ')
lines.append(line.rstrip())
# Write
if not has_version:
lines.insert(0, '#version 120\n')
out.append('\n'.join(lines))
else:
raise ValueError('Cannot convert shaders to %r.' % convert)
return tuple(out) |
<SYSTEM_TASK:>
Modify a desktop command so it works on es2.
<END_TASK>
<USER_TASK:>
Description:
def as_es2_command(command):
""" Modify a desktop command so it works on es2.
""" |
if command[0] == 'FUNC':
return (command[0], re.sub(r'^gl([A-Z])',
lambda m: m.group(1).lower(), command[1])) + command[2:]
if command[0] == 'SHADERS':
return command[:2] + convert_shaders('es2', command[2:])
if command[0] == 'UNIFORM':
return command[:-1] + (command[-1].tolist(),)
return command |
<SYSTEM_TASK:>
Print the list of commands currently in the queue. If filter is
<END_TASK>
<USER_TASK:>
Description:
def show(self, filter=None):
""" Print the list of commands currently in the queue. If filter is
given, print only commands that match the filter.
""" |
for command in self._commands:
if command[0] is None: # or command[1] in self._invalid_objects:
continue # Skip nill commands
if filter and command[0] != filter:
continue
t = []
for e in command:
if isinstance(e, np.ndarray):
t.append('array %s' % str(e.shape))
elif isinstance(e, str):
s = e.strip()
if len(s) > 20:
s = s[:18] + '... %i lines' % (e.count('\n')+1)
t.append(s)
else:
t.append(e)
print(tuple(t)) |
<SYSTEM_TASK:>
Flush all current commands to the GLIR interpreter.
<END_TASK>
<USER_TASK:>
Description:
def flush(self, parser):
""" Flush all current commands to the GLIR interpreter.
""" |
if self._verbose:
show = self._verbose if isinstance(self._verbose, str) else None
self.show(show)
parser.parse(self._filter(self.clear(), parser)) |
<SYSTEM_TASK:>
Merge this queue with another.
<END_TASK>
<USER_TASK:>
Description:
def associate(self, queue):
"""Merge this queue with another.
Both queues will use a shared command list and either one can be used
to fill or flush the shared queue.
""" |
assert isinstance(queue, GlirQueue)
if queue._shared is self._shared:
return
# merge commands
self._shared._commands.extend(queue.clear())
self._shared._verbose |= queue._shared._verbose
self._shared._associations[queue] = None
# update queue and all related queues to use the same _shared object
for ch in queue._shared._associations:
ch._shared = self._shared
self._shared._associations[ch] = None
queue._shared = self._shared |
<SYSTEM_TASK:>
Parse a single command.
<END_TASK>
<USER_TASK:>
Description:
def _parse(self, command):
""" Parse a single command.
""" |
cmd, id_, args = command[0], command[1], command[2:]
if cmd == 'CURRENT':
# This context is made current
self.env.clear()
self._gl_initialize()
self.env['fbo'] = args[0]
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, args[0])
elif cmd == 'FUNC':
# GL function call
args = [as_enum(a) for a in args]
try:
getattr(gl, id_)(*args)
except AttributeError:
logger.warning('Invalid gl command: %r' % id_)
elif cmd == 'CREATE':
# Creating an object
if args[0] is not None:
klass = self._classmap[args[0]]
self._objects[id_] = klass(self, id_)
else:
self._invalid_objects.add(id_)
elif cmd == 'DELETE':
# Deleting an object
ob = self._objects.get(id_, None)
if ob is not None:
self._objects[id_] = JUST_DELETED
ob.delete()
else:
# Doing somthing to an object
ob = self._objects.get(id_, None)
if ob == JUST_DELETED:
return
if ob is None:
if id_ not in self._invalid_objects:
raise RuntimeError('Cannot %s object %i because it '
'does not exist' % (cmd, id_))
return
# Triage over command. Order of commands is set so most
# common ones occur first.
if cmd == 'DRAW': # Program
ob.draw(*args)
elif cmd == 'TEXTURE': # Program
ob.set_texture(*args)
elif cmd == 'UNIFORM': # Program
ob.set_uniform(*args)
elif cmd == 'ATTRIBUTE': # Program
ob.set_attribute(*args)
elif cmd == 'DATA': # VertexBuffer, IndexBuffer, Texture
ob.set_data(*args)
elif cmd == 'SIZE': # VertexBuffer, IndexBuffer,
ob.set_size(*args) # Texture[1D, 2D, 3D], RenderBuffer
elif cmd == 'ATTACH': # FrameBuffer
ob.attach(*args)
elif cmd == 'FRAMEBUFFER': # FrameBuffer
ob.set_framebuffer(*args)
elif cmd == 'SHADERS': # Program
ob.set_shaders(*args)
elif cmd == 'WRAPPING': # Texture1D, Texture2D, Texture3D
ob.set_wrapping(*args)
elif cmd == 'INTERPOLATION': # Texture1D, Texture2D, Texture3D
ob.set_interpolation(*args)
else:
logger.warning('Invalid GLIR command %r' % cmd) |
<SYSTEM_TASK:>
Parse a list of commands.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, commands):
""" Parse a list of commands.
""" |
# Get rid of dummy objects that represented deleted objects in
# the last parsing round.
to_delete = []
for id_, val in self._objects.items():
if val == JUST_DELETED:
to_delete.append(id_)
for id_ in to_delete:
self._objects.pop(id_)
for command in commands:
self._parse(command) |
<SYSTEM_TASK:>
Deal with compatibility; desktop does not have sprites
<END_TASK>
<USER_TASK:>
Description:
def _gl_initialize(self):
""" Deal with compatibility; desktop does not have sprites
enabled by default. ES has.
""" |
if '.es' in gl.current_backend.__name__:
pass # ES2: no action required
else:
# Desktop, enable sprites
GL_VERTEX_PROGRAM_POINT_SIZE = 34370
GL_POINT_SPRITE = 34913
gl.glEnable(GL_VERTEX_PROGRAM_POINT_SIZE)
gl.glEnable(GL_POINT_SPRITE)
if self.capabilities['max_texture_size'] is None: # only do once
self.capabilities['gl_version'] = gl.glGetParameter(gl.GL_VERSION)
self.capabilities['max_texture_size'] = \
gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE)
this_version = self.capabilities['gl_version'].split(' ')[0]
this_version = LooseVersion(this_version) |
<SYSTEM_TASK:>
This function takes care of setting the shading code and
<END_TASK>
<USER_TASK:>
Description:
def set_shaders(self, vert, frag):
""" This function takes care of setting the shading code and
compiling+linking it into a working program object that is ready
to use.
""" |
self._linked = False
# Create temporary shader objects
vert_handle = gl.glCreateShader(gl.GL_VERTEX_SHADER)
frag_handle = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
# For both vertex and fragment shader: set source, compile, check
for code, handle, type_ in [(vert, vert_handle, 'vertex'),
(frag, frag_handle, 'fragment')]:
gl.glShaderSource(handle, code)
gl.glCompileShader(handle)
status = gl.glGetShaderParameter(handle, gl.GL_COMPILE_STATUS)
if not status:
errors = gl.glGetShaderInfoLog(handle)
errormsg = self._get_error(code, errors, 4)
raise RuntimeError("Shader compilation error in %s:\n%s" %
(type_ + ' shader', errormsg))
# Attach shaders
gl.glAttachShader(self._handle, vert_handle)
gl.glAttachShader(self._handle, frag_handle)
# Link the program and check
gl.glLinkProgram(self._handle)
if not gl.glGetProgramParameter(self._handle, gl.GL_LINK_STATUS):
raise RuntimeError('Program linking error:\n%s'
% gl.glGetProgramInfoLog(self._handle))
# Now we can remove the shaders. We no longer need them and it
# frees up precious GPU memory:
# http://gamedev.stackexchange.com/questions/47910
gl.glDetachShader(self._handle, vert_handle)
gl.glDetachShader(self._handle, frag_handle)
gl.glDeleteShader(vert_handle)
gl.glDeleteShader(frag_handle)
# Now we know what variables will be used by the program
self._unset_variables = self._get_active_attributes_and_uniforms()
self._handles = {}
self._known_invalid = set()
self._linked = True |
<SYSTEM_TASK:>
Parses a single GLSL error and extracts the linenr and description
<END_TASK>
<USER_TASK:>
Description:
def _parse_error(self, error):
""" Parses a single GLSL error and extracts the linenr and description
Other GLIR implementations may omit this.
""" |
error = str(error)
# Nvidia
# 0(7): error C1008: undefined variable "MV"
m = re.match(r'(\d+)\((\d+)\)\s*:\s(.*)', error)
if m:
return int(m.group(2)), m.group(3)
# ATI / Intel
# ERROR: 0:131: '{' : syntax error parse error
m = re.match(r'ERROR:\s(\d+):(\d+):\s(.*)', error)
if m:
return int(m.group(2)), m.group(3)
# Nouveau
# 0:28(16): error: syntax error, unexpected ')', expecting '('
m = re.match(r'(\d+):(\d+)\((\d+)\):\s(.*)', error)
if m:
return int(m.group(2)), m.group(4)
# Other ...
return None, error |
<SYSTEM_TASK:>
Get error and show the faulty line + some context
<END_TASK>
<USER_TASK:>
Description:
def _get_error(self, code, errors, indentation=0):
"""Get error and show the faulty line + some context
Other GLIR implementations may omit this.
""" |
# Init
results = []
lines = None
if code is not None:
lines = [line.strip() for line in code.split('\n')]
for error in errors.split('\n'):
# Strip; skip empy lines
error = error.strip()
if not error:
continue
# Separate line number from description (if we can)
linenr, error = self._parse_error(error)
if None in (linenr, lines):
results.append('%s' % error)
else:
results.append('on line %i: %s' % (linenr, error))
if linenr > 0 and linenr < len(lines):
results.append(' %s' % lines[linenr - 1])
# Add indentation and return
results = [' ' * indentation + r for r in results]
return '\n'.join(results) |
<SYSTEM_TASK:>
Set a texture sampler. Value is the id of the texture to link.
<END_TASK>
<USER_TASK:>
Description:
def set_texture(self, name, value):
""" Set a texture sampler. Value is the id of the texture to link.
""" |
if not self._linked:
raise RuntimeError('Cannot set uniform when program has no code')
# Get handle for the uniform, first try cache
handle = self._handles.get(name, -1)
if handle < 0:
if name in self._known_invalid:
return
handle = gl.glGetUniformLocation(self._handle, name)
self._unset_variables.discard(name) # Mark as set
self._handles[name] = handle # Store in cache
if handle < 0:
self._known_invalid.add(name)
logger.info('Variable %s is not an active uniform' % name)
return
# Program needs to be active in order to set uniforms
self.activate()
if True:
# Sampler: the value is the id of the texture
tex = self._parser.get_object(value)
if tex == JUST_DELETED:
return
if tex is None:
raise RuntimeError('Could not find texture with id %i' % value)
unit = len(self._samplers)
if name in self._samplers:
unit = self._samplers[name][-1] # Use existing unit
self._samplers[name] = tex._target, tex.handle, unit
gl.glUniform1i(handle, unit) |
<SYSTEM_TASK:>
Set a uniform value. Value is assumed to have been checked.
<END_TASK>
<USER_TASK:>
Description:
def set_uniform(self, name, type_, value):
""" Set a uniform value. Value is assumed to have been checked.
""" |
if not self._linked:
raise RuntimeError('Cannot set uniform when program has no code')
# Get handle for the uniform, first try cache
handle = self._handles.get(name, -1)
count = 1
if handle < 0:
if name in self._known_invalid:
return
handle = gl.glGetUniformLocation(self._handle, name)
self._unset_variables.discard(name) # Mark as set
# if we set a uniform_array, mark all as set
if not type_.startswith('mat'):
count = value.nbytes // (4 * self.ATYPEINFO[type_][0])
if count > 1:
for ii in range(count):
if '%s[%s]' % (name, ii) in self._unset_variables:
self._unset_variables.discard('%s[%s]' % (name, ii))
self._handles[name] = handle # Store in cache
if handle < 0:
self._known_invalid.add(name)
logger.info('Variable %s is not an active uniform' % name)
return
# Look up function to call
funcname = self.UTYPEMAP[type_]
func = getattr(gl, funcname)
# Program needs to be active in order to set uniforms
self.activate()
# Triage depending on type
if type_.startswith('mat'):
# Value is matrix, these gl funcs have alternative signature
transpose = False # OpenGL ES 2.0 does not support transpose
func(handle, 1, transpose, value)
else:
# Regular uniform
func(handle, count, value) |
<SYSTEM_TASK:>
Set an attribute value. Value is assumed to have been checked.
<END_TASK>
<USER_TASK:>
Description:
def set_attribute(self, name, type_, value):
""" Set an attribute value. Value is assumed to have been checked.
""" |
if not self._linked:
raise RuntimeError('Cannot set attribute when program has no code')
# Get handle for the attribute, first try cache
handle = self._handles.get(name, -1)
if handle < 0:
if name in self._known_invalid:
return
handle = gl.glGetAttribLocation(self._handle, name)
self._unset_variables.discard(name) # Mark as set
self._handles[name] = handle # Store in cache
if handle < 0:
self._known_invalid.add(name)
if value[0] != 0 and value[2] > 0: # VBO with offset
return # Probably an unused element in a structured VBO
logger.info('Variable %s is not an active attribute' % name)
return
# Program needs to be active in order to set uniforms
self.activate()
# Triage depending on VBO or tuple data
if value[0] == 0:
# Look up function call
funcname = self.ATYPEMAP[type_]
func = getattr(gl, funcname)
# Set data
self._attributes[name] = 0, handle, func, value[1:]
else:
# Get meta data
vbo_id, stride, offset = value
size, gtype, dtype = self.ATYPEINFO[type_]
# Get associated VBO
vbo = self._parser.get_object(vbo_id)
if vbo == JUST_DELETED:
return
if vbo is None:
raise RuntimeError('Could not find VBO with id %i' % vbo_id)
# Set data
func = gl.glVertexAttribPointer
args = size, gtype, gl.GL_FALSE, stride, offset
self._attributes[name] = vbo.handle, handle, func, args |
<SYSTEM_TASK:>
Simplify a transform to a single matrix transform, which makes it a lot
<END_TASK>
<USER_TASK:>
Description:
def as_matrix_transform(transform):
"""
Simplify a transform to a single matrix transform, which makes it a lot
faster to compute transformations.
Raises a TypeError if the transform cannot be simplified.
""" |
if isinstance(transform, ChainTransform):
matrix = np.identity(4)
for tr in transform.transforms:
# We need to do the matrix multiplication manually because VisPy
# somehow doesn't mutliply matrices if there is a perspective
# component. The equation below looks like it's the wrong way
# around, but the VisPy matrices are transposed.
matrix = np.matmul(as_matrix_transform(tr).matrix, matrix)
return MatrixTransform(matrix)
elif isinstance(transform, InverseTransform):
matrix = as_matrix_transform(transform._inverse)
return MatrixTransform(matrix.inv_matrix)
elif isinstance(transform, NullTransform):
return MatrixTransform()
elif isinstance(transform, STTransform):
return transform.as_matrix()
elif isinstance(transform, MatrixTransform):
return transform
else:
raise TypeError("Could not simplify transform of type {0}".format(type(transform))) |
<SYSTEM_TASK:>
Places all nodes on a single circle.
<END_TASK>
<USER_TASK:>
Description:
def circular(adjacency_mat, directed=False):
"""Places all nodes on a single circle.
Parameters
----------
adjacency_mat : matrix or sparse
The graph adjacency matrix
directed : bool
Whether the graph is directed. If this is True, is will also
generate the vertices for arrows, which can be passed to an
ArrowVisual.
Yields
------
(node_vertices, line_vertices, arrow_vertices) : tuple
Yields the node and line vertices in a tuple. This layout only yields a
single time, and has no builtin animation
""" |
if issparse(adjacency_mat):
adjacency_mat = adjacency_mat.tocoo()
num_nodes = adjacency_mat.shape[0]
t = np.linspace(0, 2 * np.pi, num_nodes, endpoint=False, dtype=np.float32)
# Visual coordinate system is between 0 and 1, so generate a circle with
# radius 0.5 and center it at the point (0.5, 0.5).
node_coords = (0.5 * np.array([np.cos(t), np.sin(t)]) + 0.5).T
line_vertices, arrows = _straight_line_vertices(adjacency_mat,
node_coords, directed)
yield node_coords, line_vertices, arrows |
<SYSTEM_TASK:>
Set the data used to display this visual.
<END_TASK>
<USER_TASK:>
Description:
def set_data(self, pos=None, symbol='o', size=10., edge_width=1.,
edge_width_rel=None, edge_color='black', face_color='white',
scaling=False):
""" Set the data used to display this visual.
Parameters
----------
pos : array
The array of locations to display each symbol.
symbol : str
The style of symbol to draw (see Notes).
size : float or array
The symbol size in px.
edge_width : float | None
The width of the symbol outline in pixels.
edge_width_rel : float | None
The width as a fraction of marker size. Exactly one of
`edge_width` and `edge_width_rel` must be supplied.
edge_color : Color | ColorArray
The color used to draw each symbol outline.
face_color : Color | ColorArray
The color used to draw each symbol interior.
scaling : bool
If set to True, marker scales when rezooming.
Notes
-----
Allowed style strings are: disc, arrow, ring, clobber, square, diamond,
vbar, hbar, cross, tailed_arrow, x, triangle_up, triangle_down,
and star.
""" |
assert (isinstance(pos, np.ndarray) and
pos.ndim == 2 and pos.shape[1] in (2, 3))
if (edge_width is not None) + (edge_width_rel is not None) != 1:
raise ValueError('exactly one of edge_width and edge_width_rel '
'must be non-None')
if edge_width is not None:
if edge_width < 0:
raise ValueError('edge_width cannot be negative')
else:
if edge_width_rel < 0:
raise ValueError('edge_width_rel cannot be negative')
self.symbol = symbol
self.scaling = scaling
edge_color = ColorArray(edge_color).rgba
if len(edge_color) == 1:
edge_color = edge_color[0]
face_color = ColorArray(face_color).rgba
if len(face_color) == 1:
face_color = face_color[0]
n = len(pos)
data = np.zeros(n, dtype=[('a_position', np.float32, 3),
('a_fg_color', np.float32, 4),
('a_bg_color', np.float32, 4),
('a_size', np.float32, 1),
('a_edgewidth', np.float32, 1)])
data['a_fg_color'] = edge_color
data['a_bg_color'] = face_color
if edge_width is not None:
data['a_edgewidth'] = edge_width
else:
data['a_edgewidth'] = size*edge_width_rel
data['a_position'][:, :pos.shape[1]] = pos
data['a_size'] = size
self.shared_program['u_antialias'] = self.antialias # XXX make prop
self._data = data
self._vbo.set_data(data)
self.shared_program.bind(self._vbo)
self.update() |
<SYSTEM_TASK:>
Get the total bounds based on the visuals present in the scene
<END_TASK>
<USER_TASK:>
Description:
def get_scene_bounds(self, dim=None):
"""Get the total bounds based on the visuals present in the scene
Parameters
----------
dim : int | None
Dimension to return.
Returns
-------
bounds : list | tuple
If ``dim is None``, Returns a list of 3 tuples, otherwise
the bounds for the requested dimension.
""" |
# todo: handle sub-children
# todo: handle transformations
# Init
bounds = [(np.inf, -np.inf), (np.inf, -np.inf), (np.inf, -np.inf)]
# Get bounds of all children
for ob in self.scene.children:
if hasattr(ob, 'bounds'):
for axis in (0, 1, 2):
if (dim is not None) and dim != axis:
continue
b = ob.bounds(axis)
if b is not None:
b = min(b), max(b) # Ensure correct order
bounds[axis] = (min(bounds[axis][0], b[0]),
max(bounds[axis][1], b[1]))
# Set defaults
for axis in (0, 1, 2):
if any(np.isinf(bounds[axis])):
bounds[axis] = -1, 1
if dim is not None:
return bounds[dim]
else:
return bounds |
<SYSTEM_TASK:>
Helper to turn val into array and clip between 0 and 1
<END_TASK>
<USER_TASK:>
Description:
def _array_clip_val(val):
"""Helper to turn val into array and clip between 0 and 1""" |
val = np.array(val)
if val.max() > 1 or val.min() < 0:
logger.warning('value will be clipped between 0 and 1')
val[...] = np.clip(val, 0, 1)
return val |
<SYSTEM_TASK:>
Extend a ColorArray with new colors
<END_TASK>
<USER_TASK:>
Description:
def extend(self, colors):
"""Extend a ColorArray with new colors
Parameters
----------
colors : instance of ColorArray
The new colors.
""" |
colors = ColorArray(colors)
self._rgba = np.vstack((self._rgba, colors._rgba))
return self |
<SYSTEM_TASK:>
Set the color using an Nx4 array of RGBA floats
<END_TASK>
<USER_TASK:>
Description:
def rgba(self, val):
"""Set the color using an Nx4 array of RGBA floats""" |
# Note: all other attribute sets get routed here!
# This method is meant to do the heavy lifting of setting data
rgba = _user_to_rgba(val, expand=False)
if self._rgba is None:
self._rgba = rgba # only on init
else:
self._rgba[:, :rgba.shape[1]] = rgba |
<SYSTEM_TASK:>
The ViewBox received a mouse event; update transform
<END_TASK>
<USER_TASK:>
Description:
def viewbox_mouse_event(self, event):
""" The ViewBox received a mouse event; update transform
accordingly.
Default implementation adjusts scale factor when scolling.
Parameters
----------
event : instance of Event
The event.
""" |
BaseCamera.viewbox_mouse_event(self, event)
if event.type == 'mouse_wheel':
s = 1.1 ** - event.delta[1]
self._scale_factor *= s
if self._distance is not None:
self._distance *= s
self.view_changed() |
<SYSTEM_TASK:>
Reset the camera view using the known limits.
<END_TASK>
<USER_TASK:>
Description:
def _set_range(self, init):
""" Reset the camera view using the known limits.
""" |
if init and (self._scale_factor is not None):
return # We don't have to set our scale factor
# Get window size (and store factor now to sync with resizing)
w, h = self._viewbox.size
w, h = float(w), float(h)
# Get range and translation for x and y
x1, y1, z1 = self._xlim[0], self._ylim[0], self._zlim[0]
x2, y2, z2 = self._xlim[1], self._ylim[1], self._zlim[1]
rx, ry, rz = (x2 - x1), (y2 - y1), (z2 - z1)
# Correct ranges for window size. Note that the window width
# influences the x and y data range, while the height influences
# the z data range.
if w / h > 1:
rx /= w / h
ry /= w / h
else:
rz /= h / w
# Convert to screen coordinates. In screen x, only x and y have effect.
# In screen y, all three dimensions have effect. The idea of the lines
# below is to calculate the range on screen when that will fit the
# data under any rotation.
rxs = (rx**2 + ry**2)**0.5
rys = (rx**2 + ry**2 + rz**2)**0.5
self.scale_factor = max(rxs, rys) * 1.04 |
<SYSTEM_TASK:>
The viewbox received a mouse event; update transform
<END_TASK>
<USER_TASK:>
Description:
def viewbox_mouse_event(self, event):
"""
The viewbox received a mouse event; update transform
accordingly.
Parameters
----------
event : instance of Event
The event.
""" |
if event.handled or not self.interactive:
return
PerspectiveCamera.viewbox_mouse_event(self, event)
if event.type == 'mouse_release':
self._event_value = None # Reset
elif event.type == 'mouse_press':
event.handled = True
elif event.type == 'mouse_move':
if event.press_event is None:
return
modifiers = event.mouse_event.modifiers
p1 = event.mouse_event.press_event.pos
p2 = event.mouse_event.pos
d = p2 - p1
if 1 in event.buttons and not modifiers:
# Rotate
self._update_rotation(event)
elif 2 in event.buttons and not modifiers:
# Zoom
if self._event_value is None:
self._event_value = (self._scale_factor, self._distance)
zoomy = (1 + self.zoom_factor) ** d[1]
self.scale_factor = self._event_value[0] * zoomy
# Modify distance if its given
if self._distance is not None:
self._distance = self._event_value[1] * zoomy
self.view_changed()
elif 1 in event.buttons and keys.SHIFT in modifiers:
# Translate
norm = np.mean(self._viewbox.size)
if self._event_value is None or len(self._event_value) == 2:
self._event_value = self.center
dist = (p1 - p2) / norm * self._scale_factor
dist[1] *= -1
# Black magic part 1: turn 2D into 3D translations
dx, dy, dz = self._dist_to_trans(dist)
# Black magic part 2: take up-vector and flipping into account
ff = self._flip_factors
up, forward, right = self._get_dim_vectors()
dx, dy, dz = right * dx + forward * dy + up * dz
dx, dy, dz = ff[0] * dx, ff[1] * dy, dz * ff[2]
c = self._event_value
self.center = c[0] + dx, c[1] + dy, c[2] + dz
elif 2 in event.buttons and keys.SHIFT in modifiers:
# Change fov
if self._event_value is None:
self._event_value = self._fov
fov = self._event_value - d[1] / 5.0
self.fov = min(180.0, max(0.0, fov)) |
<SYSTEM_TASK:>
Determine if the user requested interactive mode.
<END_TASK>
<USER_TASK:>
Description:
def is_interactive(self):
""" Determine if the user requested interactive mode.
""" |
# The Python interpreter sets sys.flags correctly, so use them!
if sys.flags.interactive:
return True
# IPython does not set sys.flags when -i is specified, so first
# check it if it is already imported.
if '__IPYTHON__' not in dir(six.moves.builtins):
return False
# Then we check the application singleton and determine based on
# a variable it sets.
try:
from IPython.config.application import Application as App
return App.initialized() and App.instance().interact
except (ImportError, AttributeError):
return False |
<SYSTEM_TASK:>
Enter the native GUI event loop.
<END_TASK>
<USER_TASK:>
Description:
def run(self, allow_interactive=True):
""" Enter the native GUI event loop.
Parameters
----------
allow_interactive : bool
Is the application allowed to handle interactive mode for console
terminals? By default, typing ``python -i main.py`` results in
an interactive shell that also regularly calls the VisPy event
loop. In this specific case, the run() function will terminate
immediately and rely on the interpreter's input loop to be run
after script execution.
""" |
if allow_interactive and self.is_interactive():
inputhook.set_interactive(enabled=True, app=self)
else:
return self._backend._vispy_run() |
<SYSTEM_TASK:>
Select a backend by name. See class docstring for details.
<END_TASK>
<USER_TASK:>
Description:
def _use(self, backend_name=None):
"""Select a backend by name. See class docstring for details.
""" |
# See if we're in a specific testing mode, if so DONT check to see
# if it's a valid backend. If it isn't, it's a good thing we
# get an error later because we should have decorated our test
# with requires_application()
test_name = os.getenv('_VISPY_TESTING_APP', None)
# Check whether the given name is valid
if backend_name is not None:
if backend_name.lower() == 'default':
backend_name = None # Explicitly use default, avoid using test
elif backend_name.lower() not in BACKENDMAP:
raise ValueError('backend_name must be one of %s or None, not '
'%r' % (BACKEND_NAMES, backend_name))
elif test_name is not None:
backend_name = test_name.lower()
assert backend_name in BACKENDMAP
# Should we try and load any backend, or just this specific one?
try_others = backend_name is None
# Get backends to try ...
imported_toolkits = [] # Backends for which the native lib is imported
backends_to_try = []
if not try_others:
# We should never hit this, since we check above
assert backend_name.lower() in BACKENDMAP.keys()
# Add it
backends_to_try.append(backend_name.lower())
else:
# See if a backend is loaded
for name, module_name, native_module_name in CORE_BACKENDS:
if native_module_name and native_module_name in sys.modules:
imported_toolkits.append(name.lower())
backends_to_try.append(name.lower())
# See if a default is given
default_backend = config['default_backend'].lower()
if default_backend.lower() in BACKENDMAP.keys():
if default_backend not in backends_to_try:
backends_to_try.append(default_backend)
# After this, try each one
for name, module_name, native_module_name in CORE_BACKENDS:
name = name.lower()
if name not in backends_to_try:
backends_to_try.append(name)
# Now try each one
for key in backends_to_try:
name, module_name, native_module_name = BACKENDMAP[key]
TRIED_BACKENDS.append(name)
mod_name = 'backends.' + module_name
__import__(mod_name, globals(), level=1)
mod = getattr(backends, module_name)
if not mod.available:
msg = ('Could not import backend "%s":\n%s'
% (name, str(mod.why_not)))
if not try_others:
# Fail if user wanted to use a specific backend
raise RuntimeError(msg)
elif key in imported_toolkits:
# Warn if were unable to use an already imported toolkit
msg = ('Although %s is already imported, the %s backend '
'could not\nbe used ("%s"). \nNote that running '
'multiple GUI toolkits simultaneously can cause '
'side effects.' %
(native_module_name, name, str(mod.why_not)))
logger.warning(msg)
else:
# Inform otherwise
logger.info(msg)
else:
# Success!
self._backend_module = mod
logger.debug('Selected backend %s' % module_name)
break
else:
raise RuntimeError('Could not import any of the backends. '
'You need to install any of %s. We recommend '
'PyQt' % [b[0] for b in CORE_BACKENDS])
# Store classes for app backend and canvas backend
self._backend = self.backend_module.ApplicationBackend() |
<SYSTEM_TASK:>
Helper to extract list of mods from event
<END_TASK>
<USER_TASK:>
Description:
def _get_mods(evt):
"""Helper to extract list of mods from event""" |
mods = []
mods += [keys.CONTROL] if evt.ControlDown() else []
mods += [keys.ALT] if evt.AltDown() else []
mods += [keys.SHIFT] if evt.ShiftDown() else []
mods += [keys.META] if evt.MetaDown() else []
return mods |
<SYSTEM_TASK:>
Helper to convert from wx keycode to vispy keycode
<END_TASK>
<USER_TASK:>
Description:
def _process_key(evt):
"""Helper to convert from wx keycode to vispy keycode""" |
key = evt.GetKeyCode()
if key in KEYMAP:
return KEYMAP[key], ''
if 97 <= key <= 122:
key -= 32
if key >= 32 and key <= 127:
return keys.Key(chr(key)), chr(key)
else:
return None, None |
<SYSTEM_TASK:>
Check if a node is a child of the current node
<END_TASK>
<USER_TASK:>
Description:
def is_child(self, node):
"""Check if a node is a child of the current node
Parameters
----------
node : instance of Node
The potential child.
Returns
-------
child : bool
Whether or not the node is a child.
""" |
if node in self.children:
return True
for c in self.children:
if c.is_child(node):
return True
return False |
<SYSTEM_TASK:>
The first ancestor of this node that is a SubScene instance, or self
<END_TASK>
<USER_TASK:>
Description:
def scene_node(self):
"""The first ancestor of this node that is a SubScene instance, or self
if no such node exists.
""" |
if self._scene_node is None:
from .subscene import SubScene
p = self.parent
while True:
if isinstance(p, SubScene) or p is None:
self._scene_node = p
break
p = p.parent
if self._scene_node is None:
self._scene_node = self
return self._scene_node |
<SYSTEM_TASK:>
Emit an event to inform listeners that properties of this Node have
<END_TASK>
<USER_TASK:>
Description:
def update(self):
"""
Emit an event to inform listeners that properties of this Node have
changed. Also request a canvas update.
""" |
self.events.update()
c = getattr(self, 'canvas', None)
if c is not None:
c.update(node=self) |
<SYSTEM_TASK:>
Return the list of parents starting from this node. The chain ends
<END_TASK>
<USER_TASK:>
Description:
def parent_chain(self):
"""
Return the list of parents starting from this node. The chain ends
at the first node with no parents.
""" |
chain = [self]
while True:
try:
parent = chain[-1].parent
except Exception:
break
if parent is None:
break
chain.append(parent)
return chain |
<SYSTEM_TASK:>
Helper function to actuall construct the tree
<END_TASK>
<USER_TASK:>
Description:
def _describe_tree(self, prefix, with_transform):
"""Helper function to actuall construct the tree""" |
extra = ': "%s"' % self.name if self.name is not None else ''
if with_transform:
extra += (' [%s]' % self.transform.__class__.__name__)
output = ''
if len(prefix) > 0:
output += prefix[:-3]
output += ' +--'
output += '%s%s\n' % (self.__class__.__name__, extra)
n_children = len(self.children)
for ii, child in enumerate(self.children):
sub_prefix = prefix + (' ' if ii+1 == n_children else ' |')
output += child._describe_tree(sub_prefix, with_transform)
return output |
<SYSTEM_TASK:>
Return the common parent of two entities
<END_TASK>
<USER_TASK:>
Description:
def common_parent(self, node):
"""
Return the common parent of two entities
If the entities have no common parent, return None.
Parameters
----------
node : instance of Node
The other node.
Returns
-------
parent : instance of Node | None
The parent.
""" |
p1 = self.parent_chain()
p2 = node.parent_chain()
for p in p1:
if p in p2:
return p
return None |
<SYSTEM_TASK:>
Return a list describing the path from this node to a child node
<END_TASK>
<USER_TASK:>
Description:
def node_path_to_child(self, node):
"""Return a list describing the path from this node to a child node
If *node* is not a (grand)child of this node, then raise RuntimeError.
Parameters
----------
node : instance of Node
The child node.
Returns
-------
path : list | None
The path.
""" |
if node is self:
return []
# Go up from the child node as far as we can
path1 = [node]
child = node
while child.parent is not None:
child = child.parent
path1.append(child)
# Early exit
if child is self:
return list(reversed(path1))
# Verify that we're not cut off
if path1[-1].parent is None:
raise RuntimeError('%r is not a child of %r' % (node, self))
def _is_child(path, parent, child):
path.append(parent)
if child in parent.children:
return path
else:
for c in parent.children:
possible_path = _is_child(path[:], c, child)
if possible_path:
return possible_path
return None
# Search from the parent towards the child
path2 = _is_child([], self, path1[-1])
if not path2:
raise RuntimeError('%r is not a child of %r' % (node, self))
# Return
return path2 + list(reversed(path1)) |
<SYSTEM_TASK:>
Return two lists describing the path from this node to another
<END_TASK>
<USER_TASK:>
Description:
def node_path(self, node):
"""Return two lists describing the path from this node to another
Parameters
----------
node : instance of Node
The other node.
Returns
-------
p1 : list
First path (see below).
p2 : list
Second path (see below).
Notes
-----
The first list starts with this node and ends with the common parent
between the endpoint nodes. The second list contains the remainder of
the path from the common parent to the specified ending node.
For example, consider the following scenegraph::
A --- B --- C --- D
\
--- E --- F
Calling `D.node_path(F)` will return::
([D, C, B], [E, F])
""" |
p1 = self.parent_chain()
p2 = node.parent_chain()
cp = None
for p in p1:
if p in p2:
cp = p
break
if cp is None:
raise RuntimeError("No single-path common parent between nodes %s "
"and %s." % (self, node))
p1 = p1[:p1.index(cp)+1]
p2 = p2[:p2.index(cp)][::-1]
return p1, p2 |
<SYSTEM_TASK:>
Return the list of transforms along the path to another node.
<END_TASK>
<USER_TASK:>
Description:
def node_path_transforms(self, node):
"""Return the list of transforms along the path to another node.
The transforms are listed in reverse order, such that the last
transform should be applied first when mapping from this node to
the other.
Parameters
----------
node : instance of Node
The other node.
Returns
-------
transforms : list
A list of Transform instances.
""" |
a, b = self.node_path(node)
return ([n.transform for n in a[:-1]] +
[n.transform.inverse for n in b])[::-1] |
<SYSTEM_TASK:>
The method that reads a line and processes it.
<END_TASK>
<USER_TASK:>
Description:
def readLine(self):
""" The method that reads a line and processes it.
""" |
# Read line
line = self._f.readline().decode('ascii', 'ignore')
if not line:
raise EOFError()
line = line.strip()
if line.startswith('v '):
# self._vertices.append( *self.readTuple(line) )
self._v.append(self.readTuple(line))
elif line.startswith('vt '):
self._vt.append(self.readTuple(line, 3))
elif line.startswith('vn '):
self._vn.append(self.readTuple(line))
elif line.startswith('f '):
self._faces.append(self.readFace(line))
elif line.startswith('#'):
pass # Comment
elif line.startswith('mtllib '):
logger.warning('Notice reading .OBJ: material properties are '
'ignored.')
elif any(line.startswith(x) for x in ('g ', 's ', 'o ', 'usemtl ')):
pass # Ignore groups and smoothing groups, obj names, material
elif not line.strip():
pass
else:
logger.warning('Notice reading .OBJ: ignoring %s command.'
% line.strip()) |
<SYSTEM_TASK:>
Converts gathere lists to numpy arrays and creates
<END_TASK>
<USER_TASK:>
Description:
def finish(self):
""" Converts gathere lists to numpy arrays and creates
BaseMesh instance.
""" |
self._vertices = np.array(self._vertices, 'float32')
if self._faces:
self._faces = np.array(self._faces, 'uint32')
else:
# Use vertices only
self._vertices = np.array(self._v, 'float32')
self._faces = None
if self._normals:
self._normals = np.array(self._normals, 'float32')
else:
self._normals = self._calculate_normals()
if self._texcords:
self._texcords = np.array(self._texcords, 'float32')
else:
self._texcords = None
return self._vertices, self._faces, self._normals, self._texcords |
<SYSTEM_TASK:>
This classmethod is the entry point for writing mesh data to OBJ.
<END_TASK>
<USER_TASK:>
Description:
def write(cls, fname, vertices, faces, normals,
texcoords, name='', reshape_faces=True):
""" This classmethod is the entry point for writing mesh data to OBJ.
Parameters
----------
fname : string
The filename to write to. Must end with ".obj" or ".gz".
vertices : numpy array
The vertex data
faces : numpy array
The face data
texcoords : numpy array
The texture coordinate per vertex
name : str
The name of the object (e.g. 'teapot')
reshape_faces : bool
Reshape the `faces` array to (Nf, 3). Set to `False`
if you need to write a mesh with non triangular faces.
""" |
# Open file
fmt = op.splitext(fname)[1].lower()
if fmt not in ('.obj', '.gz'):
raise ValueError('Filename must end with .obj or .gz, not "%s"'
% (fmt,))
opener = open if fmt == '.obj' else gzip_open
f = opener(fname, 'wb')
try:
writer = WavefrontWriter(f)
writer.writeMesh(vertices, faces, normals,
texcoords, name, reshape_faces=reshape_faces)
except EOFError:
pass
finally:
f.close() |
<SYSTEM_TASK:>
Write the face info to the net line.
<END_TASK>
<USER_TASK:>
Description:
def writeFace(self, val, what='f'):
""" Write the face info to the net line.
""" |
# OBJ counts from 1
val = [v + 1 for v in val]
# Make string
if self._hasValues and self._hasNormals:
val = ' '.join(['%i/%i/%i' % (v, v, v) for v in val])
elif self._hasNormals:
val = ' '.join(['%i//%i' % (v, v) for v in val])
elif self._hasValues:
val = ' '.join(['%i/%i' % (v, v) for v in val])
else:
val = ' '.join(['%i' % v for v in val])
# Write line
self.writeLine('%s %s' % (what, val)) |
<SYSTEM_TASK:>
Compute cross product between list of 3D vectors
<END_TASK>
<USER_TASK:>
Description:
def _fast_cross_3d(x, y):
"""Compute cross product between list of 3D vectors
Much faster than np.cross() when the number of cross products
becomes large (>500). This is because np.cross() methods become
less memory efficient at this stage.
Parameters
----------
x : array
Input array 1.
y : array
Input array 2.
Returns
-------
z : array
Cross product of x and y.
Notes
-----
x and y must both be 2D row vectors. One must have length 1, or both
lengths must match.
""" |
assert x.ndim == 2
assert y.ndim == 2
assert x.shape[1] == 3
assert y.shape[1] == 3
assert (x.shape[0] == 1 or y.shape[0] == 1) or x.shape[0] == y.shape[0]
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y) |
<SYSTEM_TASK:>
Efficiently compute vertex normals for triangulated surface
<END_TASK>
<USER_TASK:>
Description:
def _calculate_normals(rr, tris):
"""Efficiently compute vertex normals for triangulated surface""" |
# ensure highest precision for our summation/vectorization "trick"
rr = rr.astype(np.float64)
# first, compute triangle normals
r1 = rr[tris[:, 0], :]
r2 = rr[tris[:, 1], :]
r3 = rr[tris[:, 2], :]
tri_nn = _fast_cross_3d((r2 - r1), (r3 - r1))
# Triangle normals and areas
size = np.sqrt(np.sum(tri_nn * tri_nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
tri_nn /= size[:, np.newaxis]
npts = len(rr)
# the following code replaces this, but is faster (vectorized):
#
# for p, verts in enumerate(tris):
# nn[verts, :] += tri_nn[p, :]
#
nn = np.zeros((npts, 3))
for verts in tris.T: # note this only loops 3x (number of verts per tri)
for idx in range(3): # x, y, z
nn[:, idx] += np.bincount(verts.astype(np.int32),
tri_nn[:, idx], minlength=npts)
size = np.sqrt(np.sum(nn * nn, axis=1))
size[size == 0] = 1.0 # prevent ugly divide-by-zero
nn /= size[:, np.newaxis]
return nn |
<SYSTEM_TASK:>
Parse the lines, and fill self.line_fields accordingly.
<END_TASK>
<USER_TASK:>
Description:
def parse(self):
"""Parse the lines, and fill self.line_fields accordingly.""" |
for line in self.lines:
# Parse the line
field_defs = self.parse_line(line)
fields = []
# Convert field parameters into Field objects
for (kind, options) in field_defs:
logger.debug("Creating field %s(%r)", kind, options)
fields.append(self.field_registry.create(kind, **options))
# Add the list of Field objects to the 'fields per line'.
self.line_fields.append(fields)
# Pre-fill the list of widgets
for field in fields:
self.widgets[field] = None |
<SYSTEM_TASK:>
Compute the relative position of the fields on a given line.
<END_TASK>
<USER_TASK:>
Description:
def compute_positions(cls, screen_width, line):
"""Compute the relative position of the fields on a given line.
Args:
screen_width (int): the width of the screen
line (mpdlcd.display_fields.Field list): the list of fields on the
line
Returns:
((int, mpdlcd.display_fields.Field) list): the positions of fields,
as (position, field) tuples.
Raises:
FormatError: if the line contains more than one flexible field, or
is too long for the screen size.
""" |
# First index
left = 1
# Last index
right = screen_width + 1
# Current 'flexible' field
flexible = None
# Compute the space to the left and to the right of the (optional)
# flexible field.
for field in line:
if field.is_flexible():
if flexible:
raise FormatError(
'There can be only one flexible field per line.')
flexible = field
elif not flexible:
left += field.width
else:
# Met a 'flexible', computing from the right
right -= field.width
# Available space for the 'flexible' field
available = right - left
if available <= 0:
raise FormatError("Too much data for screen width")
if flexible:
if available < 1:
raise FormatError(
"Not enough space to display flexible field %s" %
flexible.name)
flexible.width = available
positions = []
left = 1
for field in line:
positions.append((left, field))
left += field.width
logger.debug('Positions are %r', positions)
return positions |
<SYSTEM_TASK:>
Add the pattern to a screen.
<END_TASK>
<USER_TASK:>
Description:
def add_to_screen(self, screen_width, screen):
"""Add the pattern to a screen.
Also fills self.widgets.
Args:
screen_width (int): the width of the screen
screen (lcdprod.Screen): the screen to fill.
""" |
for lineno, fields in enumerate(self.line_fields):
for left, field in self.compute_positions(screen_width, fields):
logger.debug(
"Adding field %s to screen %s at x=%d->%d, y=%d",
field, screen.ref, left, left + field.width - 1, 1 + lineno,
)
self.widgets[field] = field.add_to_screen(screen, left, 1 + lineno)
self.register_hooks(field) |
<SYSTEM_TASK:>
Register a field on its target hooks.
<END_TASK>
<USER_TASK:>
Description:
def register_hooks(self, field):
"""Register a field on its target hooks.""" |
for hook, subhooks in field.register_hooks():
self.hooks[hook].append(field)
self.subhooks[hook] |= set(subhooks) |
<SYSTEM_TASK:>
Called whenever the data for a hook changed.
<END_TASK>
<USER_TASK:>
Description:
def hook_changed(self, hook, new_data):
"""Called whenever the data for a hook changed.""" |
for field in self.hooks[hook]:
widget = self.widgets[field]
field.hook_changed(hook, widget, new_data) |
<SYSTEM_TASK:>
Add a pattern to the list.
<END_TASK>
<USER_TASK:>
Description:
def add(self, pattern_txt):
"""Add a pattern to the list.
Args:
pattern_txt (str list): the pattern, as a list of lines.
""" |
self.patterns[len(pattern_txt)] = pattern_txt
low = 0
high = len(pattern_txt) - 1
while not pattern_txt[low]:
low += 1
while not pattern_txt[high]:
high -= 1
min_pattern = pattern_txt[low:high + 1]
self.min_patterns[len(min_pattern)] = min_pattern |
<SYSTEM_TASK:>
Decorator to convert argument to array.
<END_TASK>
<USER_TASK:>
Description:
def arg_to_array(func):
"""
Decorator to convert argument to array.
Parameters
----------
func : function
The function to decorate.
Returns
-------
func : function
The decorated function.
""" |
def fn(self, arg, *args, **kwargs):
"""Function
Parameters
----------
arg : array-like
Argument to convert.
*args : tuple
Arguments.
**kwargs : dict
Keyword arguments.
Returns
-------
value : object
The return value of the function.
"""
return func(self, np.array(arg), *args, **kwargs)
return fn |
<SYSTEM_TASK:>
Decorator for converting argument to vec4 format suitable for 4x4 matrix
<END_TASK>
<USER_TASK:>
Description:
def arg_to_vec4(func, self_, arg, *args, **kwargs):
"""
Decorator for converting argument to vec4 format suitable for 4x4 matrix
multiplication.
[x, y] => [[x, y, 0, 1]]
[x, y, z] => [[x, y, z, 1]]
[[x1, y1], [[x1, y1, 0, 1],
[x2, y2], => [x2, y2, 0, 1],
[x3, y3]] [x3, y3, 0, 1]]
If 1D input is provided, then the return value will be flattened.
Accepts input of any dimension, as long as shape[-1] <= 4
Alternatively, any class may define its own transform conversion interface
by defining a _transform_in() method that returns an array with shape
(.., 4), and a _transform_out() method that accepts the same array shape
and returns a new (mapped) object.
""" |
if isinstance(arg, (tuple, list, np.ndarray)):
arg = np.array(arg)
flatten = arg.ndim == 1
arg = as_vec4(arg)
ret = func(self_, arg, *args, **kwargs)
if flatten and ret is not None:
return ret.flatten()
return ret
elif hasattr(arg, '_transform_in'):
arr = arg._transform_in()
ret = func(self_, arr, *args, **kwargs)
return arg._transform_out(ret)
else:
raise TypeError("Cannot convert argument to 4D vector: %s" % arg) |
<SYSTEM_TASK:>
Increase the age of all items in the cache by 1. Items whose age
<END_TASK>
<USER_TASK:>
Description:
def roll(self):
""" Increase the age of all items in the cache by 1. Items whose age
is greater than self.max_age will be removed from the cache.
""" |
rem = []
for key, item in self._cache.items():
if item[0] > self.max_age:
rem.append(key)
item[0] += 1
for key in rem:
logger.debug("TransformCache remove: %s", key)
del self._cache[key] |
<SYSTEM_TASK:>
Calculate and show a histogram of data
<END_TASK>
<USER_TASK:>
Description:
def histogram(self, data, bins=10, color='w', orientation='h'):
"""Calculate and show a histogram of data
Parameters
----------
data : array-like
Data to histogram. Currently only 1D data is supported.
bins : int | array-like
Number of bins, or bin edges.
color : instance of Color
Color of the histogram.
orientation : {'h', 'v'}
Orientation of the histogram.
Returns
-------
hist : instance of Polygon
The histogram polygon.
""" |
self._configure_2d()
hist = scene.Histogram(data, bins, color, orientation)
self.view.add(hist)
self.view.camera.set_range()
return hist |
<SYSTEM_TASK:>
Show an image
<END_TASK>
<USER_TASK:>
Description:
def image(self, data, cmap='cubehelix', clim='auto', fg_color=None):
"""Show an image
Parameters
----------
data : ndarray
Should have shape (N, M), (N, M, 3) or (N, M, 4).
cmap : str
Colormap name.
clim : str | tuple
Colormap limits. Should be ``'auto'`` or a two-element tuple of
min and max values.
fg_color : Color or None
Sets the plot foreground color if specified.
Returns
-------
image : instance of Image
The image.
Notes
-----
The colormap is only used if the image pixels are scalars.
""" |
self._configure_2d(fg_color)
image = scene.Image(data, cmap=cmap, clim=clim)
self.view.add(image)
self.view.camera.aspect = 1
self.view.camera.set_range()
return image |
<SYSTEM_TASK:>
Show a 3D mesh
<END_TASK>
<USER_TASK:>
Description:
def mesh(self, vertices=None, faces=None, vertex_colors=None,
face_colors=None, color=(0.5, 0.5, 1.), fname=None,
meshdata=None):
"""Show a 3D mesh
Parameters
----------
vertices : array
Vertices.
faces : array | None
Face definitions.
vertex_colors : array | None
Vertex colors.
face_colors : array | None
Face colors.
color : instance of Color
Color to use.
fname : str | None
Filename to load. If not None, then vertices, faces, and meshdata
must be None.
meshdata : MeshData | None
Meshdata to use. If not None, then vertices, faces, and fname
must be None.
Returns
-------
mesh : instance of Mesh
The mesh.
""" |
self._configure_3d()
if fname is not None:
if not all(x is None for x in (vertices, faces, meshdata)):
raise ValueError('vertices, faces, and meshdata must be None '
'if fname is not None')
vertices, faces = read_mesh(fname)[:2]
if meshdata is not None:
if not all(x is None for x in (vertices, faces, fname)):
raise ValueError('vertices, faces, and fname must be None if '
'fname is not None')
else:
meshdata = MeshData(vertices, faces)
mesh = scene.Mesh(meshdata=meshdata, vertex_colors=vertex_colors,
face_colors=face_colors, color=color,
shading='smooth')
self.view.add(mesh)
self.view.camera.set_range()
return mesh |
<SYSTEM_TASK:>
Plot a series of data using lines and markers
<END_TASK>
<USER_TASK:>
Description:
def plot(self, data, color='k', symbol=None, line_kind='-', width=1.,
marker_size=10., edge_color='k', face_color='b', edge_width=1.,
title=None, xlabel=None, ylabel=None):
"""Plot a series of data using lines and markers
Parameters
----------
data : array | two arrays
Arguments can be passed as ``(Y,)``, ``(X, Y)`` or
``np.array((X, Y))``.
color : instance of Color
Color of the line.
symbol : str
Marker symbol to use.
line_kind : str
Kind of line to draw. For now, only solid lines (``'-'``)
are supported.
width : float
Line width.
marker_size : float
Marker size. If `size == 0` markers will not be shown.
edge_color : instance of Color
Color of the marker edge.
face_color : instance of Color
Color of the marker face.
edge_width : float
Edge width of the marker.
title : str | None
The title string to be displayed above the plot
xlabel : str | None
The label to display along the bottom axis
ylabel : str | None
The label to display along the left axis.
Returns
-------
line : instance of LinePlot
The line plot.
See also
--------
marker_types, LinePlot
""" |
self._configure_2d()
line = scene.LinePlot(data, connect='strip', color=color,
symbol=symbol, line_kind=line_kind,
width=width, marker_size=marker_size,
edge_color=edge_color,
face_color=face_color,
edge_width=edge_width)
self.view.add(line)
self.view.camera.set_range()
self.visuals.append(line)
if title is not None:
self.title.text = title
if xlabel is not None:
self.xlabel.text = xlabel
if ylabel is not None:
self.ylabel.text = ylabel
return line |
<SYSTEM_TASK:>
Calculate and show a spectrogram
<END_TASK>
<USER_TASK:>
Description:
def spectrogram(self, x, n_fft=256, step=None, fs=1., window='hann',
color_scale='log', cmap='cubehelix', clim='auto'):
"""Calculate and show a spectrogram
Parameters
----------
x : array-like
1D signal to operate on. ``If len(x) < n_fft``, x will be
zero-padded to length ``n_fft``.
n_fft : int
Number of FFT points. Much faster for powers of two.
step : int | None
Step size between calculations. If None, ``n_fft // 2``
will be used.
fs : float
The sample rate of the data.
window : str | None
Window function to use. Can be ``'hann'`` for Hann window, or None
for no windowing.
color_scale : {'linear', 'log'}
Scale to apply to the result of the STFT.
``'log'`` will use ``10 * log10(power)``.
cmap : str
Colormap name.
clim : str | tuple
Colormap limits. Should be ``'auto'`` or a two-element tuple of
min and max values.
Returns
-------
spec : instance of Spectrogram
The spectrogram.
See also
--------
Image
""" |
self._configure_2d()
# XXX once we have axes, we should use "fft_freqs", too
spec = scene.Spectrogram(x, n_fft, step, fs, window,
color_scale, cmap, clim)
self.view.add(spec)
self.view.camera.set_range()
return spec |
<SYSTEM_TASK:>
Show a 3D volume
<END_TASK>
<USER_TASK:>
Description:
def volume(self, vol, clim=None, method='mip', threshold=None,
cmap='grays'):
"""Show a 3D volume
Parameters
----------
vol : ndarray
Volume to render.
clim : tuple of two floats | None
The contrast limits. The values in the volume are mapped to
black and white corresponding to these values. Default maps
between min and max.
method : {'mip', 'iso', 'translucent', 'additive'}
The render style to use. See corresponding docs for details.
Default 'mip'.
threshold : float
The threshold to use for the isosurafce render style. By default
the mean of the given volume is used.
cmap : str
The colormap to use.
Returns
-------
volume : instance of Volume
The volume visualization.
See also
--------
Volume
""" |
self._configure_3d()
volume = scene.Volume(vol, clim, method, threshold, cmap=cmap)
self.view.add(volume)
self.view.camera.set_range()
return volume |
<SYSTEM_TASK:>
Show a ColorBar
<END_TASK>
<USER_TASK:>
Description:
def colorbar(self, cmap, position="right",
label="", clim=("", ""),
border_width=0.0, border_color="black",
**kwargs):
"""Show a ColorBar
Parameters
----------
cmap : str | vispy.color.ColorMap
Either the name of the ColorMap to be used from the standard
set of names (refer to `vispy.color.get_colormap`),
or a custom ColorMap object.
The ColorMap is used to apply a gradient on the colorbar.
position : {'left', 'right', 'top', 'bottom'}
The position of the colorbar with respect to the plot.
'top' and 'bottom' are placed horizontally, while
'left' and 'right' are placed vertically
label : str
The label that is to be drawn with the colorbar
that provides information about the colorbar.
clim : tuple (min, max)
the minimum and maximum values of the data that
is given to the colorbar. This is used to draw the scale
on the side of the colorbar.
border_width : float (in px)
The width of the border the colormap should have. This measurement
is given in pixels
border_color : str | vispy.color.Color
The color of the border of the colormap. This can either be a
str as the color's name or an actual instace of a vipy.color.Color
Returns
-------
colorbar : instance of ColorBarWidget
See also
--------
ColorBarWidget
""" |
self._configure_2d()
cbar = scene.ColorBarWidget(orientation=position,
label_str=label,
cmap=cmap,
clim=clim,
border_width=border_width,
border_color=border_color,
**kwargs)
CBAR_LONG_DIM = 50
if cbar.orientation == "bottom":
self.grid.remove_widget(self.cbar_bottom)
self.cbar_bottom = self.grid.add_widget(cbar, row=5, col=4)
self.cbar_bottom.height_max = \
self.cbar_bottom.height_max = CBAR_LONG_DIM
elif cbar.orientation == "top":
self.grid.remove_widget(self.cbar_top)
self.cbar_top = self.grid.add_widget(cbar, row=1, col=4)
self.cbar_top.height_max = self.cbar_top.height_max = CBAR_LONG_DIM
elif cbar.orientation == "left":
self.grid.remove_widget(self.cbar_left)
self.cbar_left = self.grid.add_widget(cbar, row=2, col=1)
self.cbar_left.width_max = self.cbar_left.width_min = CBAR_LONG_DIM
else: # cbar.orientation == "right"
self.grid.remove_widget(self.cbar_right)
self.cbar_right = self.grid.add_widget(cbar, row=2, col=5)
self.cbar_right.width_max = \
self.cbar_right.width_min = CBAR_LONG_DIM
return cbar |
<SYSTEM_TASK:>
Redraw the Vispy canvas
<END_TASK>
<USER_TASK:>
Description:
def redraw(self):
"""
Redraw the Vispy canvas
""" |
if self._multiscat is not None:
self._multiscat._update()
self.vispy_widget.canvas.update() |
<SYSTEM_TASK:>
Remove the layer artist from the visualization
<END_TASK>
<USER_TASK:>
Description:
def remove(self):
"""
Remove the layer artist from the visualization
""" |
if self._multiscat is None:
return
self._multiscat.deallocate(self.id)
self._multiscat = None
self._viewer_state.remove_global_callback(self._update_scatter)
self.state.remove_global_callback(self._update_scatter) |
<SYSTEM_TASK:>
Check for existence of key in dict, return value or raise error
<END_TASK>
<USER_TASK:>
Description:
def _check_conversion(key, valid_dict):
"""Check for existence of key in dict, return value or raise error""" |
if key not in valid_dict and key not in valid_dict.values():
# Only show users the nice string values
keys = [v for v in valid_dict.keys() if isinstance(v, string_types)]
raise ValueError('value must be one of %s, not %s' % (keys, key))
return valid_dict[key] if key in valid_dict else key |
<SYSTEM_TASK:>
Read pixels from the currently selected buffer.
<END_TASK>
<USER_TASK:>
Description:
def read_pixels(viewport=None, alpha=True, out_type='unsigned_byte'):
"""Read pixels from the currently selected buffer.
Under most circumstances, this function reads from the front buffer.
Unlike all other functions in vispy.gloo, this function directly executes
an OpenGL command.
Parameters
----------
viewport : array-like | None
4-element list of x, y, w, h parameters. If None (default),
the current GL viewport will be queried and used.
alpha : bool
If True (default), the returned array has 4 elements (RGBA).
If False, it has 3 (RGB).
out_type : str | dtype
Can be 'unsigned_byte' or 'float'. Note that this does not
use casting, but instead determines how values are read from
the current buffer. Can also be numpy dtypes ``np.uint8``,
``np.ubyte``, or ``np.float32``.
Returns
-------
pixels : array
3D array of pixels in np.uint8 or np.float32 format.
The array shape is (h, w, 3) or (h, w, 4), with the top-left corner
of the framebuffer at index [0, 0] in the returned array.
""" |
# Check whether the GL context is direct or remote
context = get_current_canvas().context
if context.shared.parser.is_remote():
raise RuntimeError('Cannot use read_pixels() with remote GLIR parser')
finish() # noqa - finish first, also flushes GLIR commands
type_dict = {'unsigned_byte': gl.GL_UNSIGNED_BYTE,
np.uint8: gl.GL_UNSIGNED_BYTE,
'float': gl.GL_FLOAT,
np.float32: gl.GL_FLOAT}
type_ = _check_conversion(out_type, type_dict)
if viewport is None:
viewport = gl.glGetParameter(gl.GL_VIEWPORT)
viewport = np.array(viewport, int)
if viewport.ndim != 1 or viewport.size != 4:
raise ValueError('viewport should be 1D 4-element array-like, not %s'
% (viewport,))
x, y, w, h = viewport
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) # PACK, not UNPACK
fmt = gl.GL_RGBA if alpha else gl.GL_RGB
im = gl.glReadPixels(x, y, w, h, fmt, type_)
gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 4)
# reshape, flip, and return
if not isinstance(im, np.ndarray):
np_dtype = np.uint8 if type_ == gl.GL_UNSIGNED_BYTE else np.float32
im = np.frombuffer(im, np_dtype)
im.shape = h, w, (4 if alpha else 3) # RGBA vs RGB
im = im[::-1, :, :] # flip the image
return im |
<SYSTEM_TASK:>
Read the current gl configuration
<END_TASK>
<USER_TASK:>
Description:
def get_gl_configuration():
"""Read the current gl configuration
This function uses constants that are not in the OpenGL ES 2.1
namespace, so only use this on desktop systems.
Returns
-------
config : dict
The currently active OpenGL configuration.
""" |
# XXX eventually maybe we can ask `gl` whether or not we can access these
gl.check_error('pre-config check')
config = dict()
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
fb_param = gl.glGetFramebufferAttachmentParameter
# copied since they aren't in ES:
GL_FRONT_LEFT = 1024
GL_DEPTH = 6145
GL_STENCIL = 6146
GL_SRGB = 35904
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING = 33296
GL_STEREO = 3123
GL_DOUBLEBUFFER = 3122
sizes = dict(red=(GL_FRONT_LEFT, 33298),
green=(GL_FRONT_LEFT, 33299),
blue=(GL_FRONT_LEFT, 33300),
alpha=(GL_FRONT_LEFT, 33301),
depth=(GL_DEPTH, 33302),
stencil=(GL_STENCIL, 33303))
for key, val in sizes.items():
config[key + '_size'] = fb_param(gl.GL_FRAMEBUFFER, val[0], val[1])
val = fb_param(gl.GL_FRAMEBUFFER, GL_FRONT_LEFT,
GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING)
if val not in (gl.GL_LINEAR, GL_SRGB):
raise RuntimeError('unknown value for SRGB: %s' % val)
config['srgb'] = True if val == GL_SRGB else False # GL_LINEAR
config['stereo'] = True if gl.glGetParameter(GL_STEREO) else False
config['double_buffer'] = (True if gl.glGetParameter(GL_DOUBLEBUFFER)
else False)
config['samples'] = gl.glGetParameter(gl.GL_SAMPLES)
gl.check_error('post-config check')
return config |
<SYSTEM_TASK:>
Set the screen clear color
<END_TASK>
<USER_TASK:>
Description:
def set_clear_color(self, color='black', alpha=None):
"""Set the screen clear color
This is a wrapper for gl.glClearColor.
Parameters
----------
color : str | tuple | instance of Color
Color to use. See vispy.color.Color for options.
alpha : float | None
Alpha to use.
""" |
self.glir.command('FUNC', 'glClearColor', *Color(color, alpha).rgba) |
<SYSTEM_TASK:>
The GLIR queue corresponding to the current canvas
<END_TASK>
<USER_TASK:>
Description:
def glir(self):
""" The GLIR queue corresponding to the current canvas
""" |
canvas = get_current_canvas()
if canvas is None:
msg = ("If you want to use gloo without vispy.app, " +
"use a gloo.context.FakeCanvas.")
raise RuntimeError('Gloo requires a Canvas to run.\n' + msg)
return canvas.context.glir |
<SYSTEM_TASK:>
Clear names that are not part of the strict ES API
<END_TASK>
<USER_TASK:>
Description:
def _clear_namespace():
""" Clear names that are not part of the strict ES API
""" |
ok_names = set(default_backend.__dict__)
ok_names.update(['gl2', 'glplus']) # don't remove the module
NS = globals()
for name in list(NS.keys()):
if name.lower().startswith('gl'):
if name not in ok_names:
del NS[name] |
<SYSTEM_TASK:>
Inject all objects that start with 'gl' from the source
<END_TASK>
<USER_TASK:>
Description:
def _copy_gl_functions(source, dest, constants=False):
""" Inject all objects that start with 'gl' from the source
into the dest. source and dest can be dicts, modules or BaseGLProxy's.
""" |
# Get dicts
if isinstance(source, BaseGLProxy):
s = {}
for key in dir(source):
s[key] = getattr(source, key)
source = s
elif not isinstance(source, dict):
source = source.__dict__
if not isinstance(dest, dict):
dest = dest.__dict__
# Copy names
funcnames = [name for name in source.keys() if name.startswith('gl')]
for name in funcnames:
dest[name] = source[name]
# Copy constants
if constants:
constnames = [name for name in source.keys() if name.startswith('GL_')]
for name in constnames:
dest[name] = source[name] |
<SYSTEM_TASK:>
Check this from time to time to detect GL errors.
<END_TASK>
<USER_TASK:>
Description:
def check_error(when='periodic check'):
""" Check this from time to time to detect GL errors.
Parameters
----------
when : str
Shown in the exception to help the developer determine when
this check was done.
""" |
errors = []
while True:
err = glGetError()
if err == GL_NO_ERROR or (errors and err == errors[-1]):
break
errors.append(err)
if errors:
msg = ', '.join([repr(ENUM_MAP.get(e, e)) for e in errors])
err = RuntimeError('OpenGL got errors (%s): %s' % (when, msg))
err.errors = errors
err.err = errors[-1] # pyopengl compat
raise err |
<SYSTEM_TASK:>
retrieve vertices and connects from given paths-list
<END_TASK>
<USER_TASK:>
Description:
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
""" |
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect |
<SYSTEM_TASK:>
compute LineVisual vertices, connects and color-index
<END_TASK>
<USER_TASK:>
Description:
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
""" |
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts) |
<SYSTEM_TASK:>
Connect this emitter to a new callback.
<END_TASK>
<USER_TASK:>
Description:
def connect(self, callback, ref=False, position='first',
before=None, after=None):
"""Connect this emitter to a new callback.
Parameters
----------
callback : function | tuple
*callback* may be either a callable object or a tuple
(object, attr_name) where object.attr_name will point to a
callable object. Note that only a weak reference to ``object``
will be kept.
ref : bool | str
Reference used to identify the callback in ``before``/``after``.
If True, the callback ref will automatically determined (see
Notes). If False, the callback cannot be referred to by a string.
If str, the given string will be used. Note that if ``ref``
is not unique in ``callback_refs``, an error will be thrown.
position : str
If ``'first'``, the first eligible position is used (that
meets the before and after criteria), ``'last'`` will use
the last position.
before : str | callback | list of str or callback | None
List of callbacks that the current callback should precede.
Can be None if no before-criteria should be used.
after : str | callback | list of str or callback | None
List of callbacks that the current callback should follow.
Can be None if no after-criteria should be used.
Notes
-----
If ``ref=True``, the callback reference will be determined from:
1. If ``callback`` is ``tuple``, the secend element in the tuple.
2. The ``__name__`` attribute.
3. The ``__class__.__name__`` attribute.
The current list of callback refs can be obtained using
``event.callback_refs``. Callbacks can be referred to by either
their string reference (if given), or by the actual callback that
was attached (e.g., ``(canvas, 'swap_buffers')``).
If the specified callback is already connected, then the request is
ignored.
If before is None and after is None (default), the new callback will
be added to the beginning of the callback list. Thus the
callback that is connected _last_ will be the _first_ to receive
events from the emitter.
""" |
callbacks = self.callbacks
callback_refs = self.callback_refs
callback = self._normalize_cb(callback)
if callback in callbacks:
return
# deal with the ref
if isinstance(ref, bool):
if ref:
if isinstance(callback, tuple):
ref = callback[1]
elif hasattr(callback, '__name__'): # function
ref = callback.__name__
else: # Method, or other
ref = callback.__class__.__name__
else:
ref = None
elif not isinstance(ref, string_types):
raise TypeError('ref must be a bool or string')
if ref is not None and ref in self._callback_refs:
raise ValueError('ref "%s" is not unique' % ref)
# positions
if position not in ('first', 'last'):
raise ValueError('position must be "first" or "last", not %s'
% position)
# bounds
bounds = list() # upper & lower bnds (inclusive) of possible cb locs
for ri, criteria in enumerate((before, after)):
if criteria is None or criteria == []:
bounds.append(len(callback_refs) if ri == 0 else 0)
else:
if not isinstance(criteria, list):
criteria = [criteria]
for c in criteria:
count = sum([(c == cn or c == cc) for cn, cc
in zip(callback_refs, callbacks)])
if count != 1:
raise ValueError('criteria "%s" is in the current '
'callback list %s times:\n%s\n%s'
% (criteria, count,
callback_refs, callbacks))
matches = [ci for ci, (cn, cc) in enumerate(zip(callback_refs,
callbacks))
if (cc in criteria or cn in criteria)]
bounds.append(matches[0] if ri == 0 else (matches[-1] + 1))
if bounds[0] < bounds[1]: # i.e., "place before" < "place after"
raise RuntimeError('cannot place callback before "%s" '
'and after "%s" for callbacks: %s'
% (before, after, callback_refs))
idx = bounds[1] if position == 'first' else bounds[0] # 'last'
# actually add the callback
self._callbacks.insert(idx, callback)
self._callback_refs.insert(idx, ref)
return callback |
<SYSTEM_TASK:>
Disconnect a callback from this emitter.
<END_TASK>
<USER_TASK:>
Description:
def disconnect(self, callback=None):
"""Disconnect a callback from this emitter.
If no callback is specified, then *all* callbacks are removed.
If the callback was not already connected, then the call does nothing.
""" |
if callback is None:
self._callbacks = []
self._callback_refs = []
else:
callback = self._normalize_cb(callback)
if callback in self._callbacks:
idx = self._callbacks.index(callback)
self._callbacks.pop(idx)
self._callback_refs.pop(idx) |
<SYSTEM_TASK:>
Block all emitters in this group.
<END_TASK>
<USER_TASK:>
Description:
def block_all(self):
""" Block all emitters in this group.
""" |
self.block()
for em in self._emitters.values():
em.block() |
<SYSTEM_TASK:>
Unblock all emitters in this group.
<END_TASK>
<USER_TASK:>
Description:
def unblock_all(self):
""" Unblock all emitters in this group.
""" |
self.unblock()
for em in self._emitters.values():
em.unblock() |
<SYSTEM_TASK:>
Create a JSON-serializable message of GLIR commands. NumPy arrays
<END_TASK>
<USER_TASK:>
Description:
def create_glir_message(commands, array_serialization=None):
"""Create a JSON-serializable message of GLIR commands. NumPy arrays
are serialized according to the specified method.
Arguments
---------
commands : list
List of GLIR commands.
array_serialization : string or None
Serialization method for NumPy arrays. Possible values are:
'binary' (default) : use a binary string
'base64' : base64 encoded string of the array
""" |
# Default serialization method for NumPy arrays.
if array_serialization is None:
array_serialization = 'binary'
# Extract the buffers.
commands_modified, buffers = _extract_buffers(commands)
# Serialize the modified commands (with buffer pointers) and the buffers.
commands_serialized = [_serialize_command(command_modified)
for command_modified in commands_modified]
buffers_serialized = [_serialize_buffer(buffer, array_serialization)
for buffer in buffers]
# Create the final message.
msg = {
'msg_type': 'glir_commands',
'commands': commands_serialized,
'buffers': buffers_serialized,
}
return msg |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.