INSTRUCTION
stringlengths 1
46.3k
| RESPONSE
stringlengths 75
80.2k
|
---|---|
Draws an oval between 2 points
:param int x1:
The x position of the starting point.
:param int y1:
The y position of the starting point.
:param int x2:
The x position of the end point.
:param int y2:
The y position of the end point.
:param str color:
The color of the shape. Defaults to `"black"`.
:param int outline:
`0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`.
:param str outline_color:
The color of the outline. Defaults to `"black"`.
:return:
The id of the shape. | def oval(self, x1, y1, x2, y2, color="black", outline=False, outline_color="black"):
"""
Draws an oval between 2 points
:param int x1:
The x position of the starting point.
:param int y1:
The y position of the starting point.
:param int x2:
The x position of the end point.
:param int y2:
The y position of the end point.
:param str color:
The color of the shape. Defaults to `"black"`.
:param int outline:
`0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`.
:param str outline_color:
The color of the outline. Defaults to `"black"`.
:return:
The id of the shape.
"""
return self.tk.create_oval(
x1, y1, x2, y2,
outline = utils.convert_color(outline_color) if outline else "",
width = int(outline),
fill = "" if color is None else utils.convert_color(color)
) |
Draws a rectangle between 2 points
:param int x1:
The x position of the starting point.
:param int y1:
The y position of the starting point.
:param int x2:
The x position of the end point.
:param int y2:
The y position of the end point.
:param str color:
The color of the shape. Defaults to `"black"`.
:param int outline:
`0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`.
:param str outline_color:
The color of the outline. Defaults to `"black"`.
:return:
The id of the shape. | def rectangle(self, x1, y1, x2, y2, color="black", outline=False, outline_color="black"):
"""
Draws a rectangle between 2 points
:param int x1:
The x position of the starting point.
:param int y1:
The y position of the starting point.
:param int x2:
The x position of the end point.
:param int y2:
The y position of the end point.
:param str color:
The color of the shape. Defaults to `"black"`.
:param int outline:
`0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`.
:param str outline_color:
The color of the outline. Defaults to `"black"`.
:return:
The id of the shape.
"""
return self.tk.create_rectangle(
x1, y1, x2, y2,
outline = utils.convert_color(outline_color) if outline else "",
width = int(outline),
fill = "" if color is None else utils.convert_color(color)
) |
Draws a polygon from an list of co-ordinates
:param int *coords:
Pairs of x and y positions which make up the polygon.
:param str color:
The color of the shape. Defaults to `"black"`.
:param int outline:
`0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`.
:param str outline_color:
The color of the outline. Defaults to `"black"`.
:return:
The id of the shape. | def polygon(self, *coords, color="black", outline=False, outline_color="black"):
"""
Draws a polygon from an list of co-ordinates
:param int *coords:
Pairs of x and y positions which make up the polygon.
:param str color:
The color of the shape. Defaults to `"black"`.
:param int outline:
`0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`.
:param str outline_color:
The color of the outline. Defaults to `"black"`.
:return:
The id of the shape.
"""
return self.tk.create_polygon(
*coords,
outline = utils.convert_color(outline_color) if outline else "",
width = int(outline),
fill = "" if color is None else utils.convert_color(color)
) |
Draws a triangle between 3 points
:param int x1:
The x position of the starting point.
:param int y1:
The y position of the starting point.
:param int x2:
The x position of the middle point.
:param int y2:
The y position of the middle point.
:param int x3:
The x position of the end point.
:param int y3:
The y position of the end point.
:param str color:
The color of the shape. Defaults to `"black"`.
:param int outline:
`0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`.
:param str outline_color:
The color of the outline. Defaults to `"black"`.
:return:
The id of the shape. | def triangle(self, x1, y1, x2, y2, x3, y3, color="black", outline=False, outline_color="black"):
"""
Draws a triangle between 3 points
:param int x1:
The x position of the starting point.
:param int y1:
The y position of the starting point.
:param int x2:
The x position of the middle point.
:param int y2:
The y position of the middle point.
:param int x3:
The x position of the end point.
:param int y3:
The y position of the end point.
:param str color:
The color of the shape. Defaults to `"black"`.
:param int outline:
`0` or `False` is no outline. `True` or value > 1 sets an outline. Defaults to `False`.
:param str outline_color:
The color of the outline. Defaults to `"black"`.
:return:
The id of the shape.
"""
return self.polygon(
x1, y1, x2, y2, x3, y3,
color=color,
outline=outline,
outline_color=outline_color) |
Inserts an image into the drawing, position by its top-left corner.
:param int x:
The x position to insert the image.
:param int y:
The y position to insert the image.
:param str image:
The file path or a PhotoImage or PIL.Image object.
:param str width:
The width to scale the image too, setting to `None` will use the
actual width of the Image. Default to `None`.
:param str height:
The width to scale the image too, setting to `None` will use the
actual height of the Image. Default to `None`.
:return:
The id of the image. | def image(self, x, y, image, width=None, height=None):
"""
Inserts an image into the drawing, position by its top-left corner.
:param int x:
The x position to insert the image.
:param int y:
The y position to insert the image.
:param str image:
The file path or a PhotoImage or PIL.Image object.
:param str width:
The width to scale the image too, setting to `None` will use the
actual width of the Image. Default to `None`.
:param str height:
The width to scale the image too, setting to `None` will use the
actual height of the Image. Default to `None`.
:return:
The id of the image.
"""
# load the image and add to the dict (otherwise tk destroys the reference to them!)
_image = utils.GUIZeroImage(image, width, height)
id = self.tk.create_image(x, y, image=_image.tk_image, anchor="nw")
self._images[id] = _image
return id |
Inserts text into the drawing, position by its top-left corner.
:param int x:
The x position of the text.
:param int y:
The x position of the text.
:param str color:
The color of the text. Defaults to `"black"`.
:param str font:
The font to use. Defaults to `None` and will use the system
default font.
:param int size:
The size of the text. Defaults to `None` and will use the system
default font size.
:param int max_width:
Maximum line length. Lines longer than this value are wrapped.
Default is `None` (no wrapping). | def text(self, x, y, text, color="black", font=None, size=None, max_width=None):
"""
Inserts text into the drawing, position by its top-left corner.
:param int x:
The x position of the text.
:param int y:
The x position of the text.
:param str color:
The color of the text. Defaults to `"black"`.
:param str font:
The font to use. Defaults to `None` and will use the system
default font.
:param int size:
The size of the text. Defaults to `None` and will use the system
default font size.
:param int max_width:
Maximum line length. Lines longer than this value are wrapped.
Default is `None` (no wrapping).
"""
# create the font
if size is None:
f = Font(self.tk, family=font)
else:
f = Font(self.tk, family=font, size=size)
return self.tk.create_text(
x, y,
text=text,
fill = "" if color is None else utils.convert_color(color),
font=f,
width=max_width,
anchor="nw") |
Deletes an "object" (line, triangle, image, etc) from the drawing.
:param int id:
The id of the object. | def delete(self, id):
"""
Deletes an "object" (line, triangle, image, etc) from the drawing.
:param int id:
The id of the object.
"""
if id in self._images.keys():
del self._images[id]
self.tk.delete(id) |
Gets the config from the widget's tk object.
:param string key:
The tk config key.
:param bool default:
Returns the default value for this key. Defaults to `False`. | def _get_tk_config(self, key, default=False):
"""
Gets the config from the widget's tk object.
:param string key:
The tk config key.
:param bool default:
Returns the default value for this key. Defaults to `False`.
"""
if default:
return self._tk_defaults[key]
else:
return self.tk[key] |
Gets the config from the widget's tk object
:param string/List keys:
The tk config key or a list of tk keys.
:param variable value:
The value to set. If the value is `None`, the config value will be
reset to its default. | def _set_tk_config(self, keys, value):
"""
Gets the config from the widget's tk object
:param string/List keys:
The tk config key or a list of tk keys.
:param variable value:
The value to set. If the value is `None`, the config value will be
reset to its default.
"""
# if a single key is passed, convert to list
if isinstance(keys, str):
keys = [keys]
# loop through all the keys
for key in keys:
if key in self.tk.keys():
if value is None:
# reset to default
self.tk[key] = self._tk_defaults[key]
else:
self.tk[key] = value |
Destroy the tk widget. | def destroy(self):
"""
Destroy the tk widget.
"""
# if this widget has a master remove the it from the master
if self.master is not None:
self.master._remove_child(self)
self.tk.destroy() |
Adds a tk widget into a guizero container.
:param tkinter.Widget tk_widget:
The Container (App, Box, etc) the tk widget will belong too.
:param List grid:
Grid co-ordinates for the widget, required if the master layout
is 'grid', defaults to `None`.
:param string align:
How to align the widget within the grid, defaults to None.
:param bool visible:
If the widget should be visible, defaults to `True`.
:param bool enabled:
If the widget should be enabled, defaults to `None`. If `None`
the value is inherited from the master.
:param int width:
The starting width of the widget. Defaults to `None` and will auto
size.
:param int height:
The starting height of the widget. Defaults to `None` and will auto
size. | def add_tk_widget(self, tk_widget, grid=None, align=None, visible=True, enabled=None, width=None, height=None):
"""
Adds a tk widget into a guizero container.
:param tkinter.Widget tk_widget:
The Container (App, Box, etc) the tk widget will belong too.
:param List grid:
Grid co-ordinates for the widget, required if the master layout
is 'grid', defaults to `None`.
:param string align:
How to align the widget within the grid, defaults to None.
:param bool visible:
If the widget should be visible, defaults to `True`.
:param bool enabled:
If the widget should be enabled, defaults to `None`. If `None`
the value is inherited from the master.
:param int width:
The starting width of the widget. Defaults to `None` and will auto
size.
:param int height:
The starting height of the widget. Defaults to `None` and will auto
size.
"""
return Widget(self, tk_widget, "tk widget", grid, align, visible, enabled, width, height) |
Displays all the widgets associated with this Container.
Should be called when the widgets need to be "re-packed/gridded". | def display_widgets(self):
"""
Displays all the widgets associated with this Container.
Should be called when the widgets need to be "re-packed/gridded".
"""
# All widgets are removed and then recreated to ensure the order they
# were created is the order they are displayed.
for child in self.children:
if child.displayable:
# forget the widget
if self.layout != "grid":
child.tk.pack_forget()
else:
child.tk.grid_forget()
# display the widget
if child.visible:
if self.layout != "grid":
self._pack_widget(child)
else:
self._grid_widget(child) |
Disable all the widgets in this container | def disable(self):
"""
Disable all the widgets in this container
"""
self._enabled = False
for child in self.children:
if isinstance(child, (Container, Widget)):
child.disable() |
Enable all the widgets in this container | def enable(self):
"""
Enable all the widgets in this container
"""
self._enabled = True
for child in self.children:
if isinstance(child, (Container, Widget)):
child.enable() |
Make this window full screen and bind the Escape key (or given key) to exit full screen mode | def set_full_screen(self, keybind="<Escape>"):
"""Make this window full screen and bind the Escape key (or given key) to exit full screen mode"""
self.tk.attributes("-fullscreen", True)
self._full_screen = True
self.events.set_event("<FullScreen.Escape>", keybind, self.exit_full_screen) |
Change from full screen to windowed mode and remove key binding | def exit_full_screen(self):
"""Change from full screen to windowed mode and remove key binding"""
self.tk.attributes("-fullscreen", False)
self._full_screen = False
self.events.remove_event("<FullScreen.Escape>") |
Set the propagation value of the tk widget dependent on the width and height
:param int width:
The width of the widget.
:param int height:
The height of the widget. | def _set_propagation(self, width, height):
"""
Set the propagation value of the tk widget dependent on the width and height
:param int width:
The width of the widget.
:param int height:
The height of the widget.
"""
if width is None:
width = 0
if height is None:
height = 0
# set the propagate value
propagate_function = self.tk.pack_propagate
if self.layout == "grid":
propagate_function = self.tk.grid_propagate
propagate_value = True
# if height or width > 0 need to stop propagation
if isinstance(width, int):
if width > 0:
propagate_value = False
if isinstance(height, int):
if height > 0:
propagate_value = False
# if you specify a height or width you must specify they other
# (unless its a fill)
if isinstance(width, int) and isinstance(height, int):
if (width == 0 and height > 0) or (height == 0 and width > 0):
utils.error_format("You must specify a width and a height for {}".format(self.description))
propagate_function(propagate_value) |
This is the main body of the "lifting" for the instruction.
This can/should be overriden to provide the general flow of how instructions in your arch work.
For example, in MSP430, this is:
- Figure out what your operands are by parsing the addressing, and load them into temporary registers
- Do the actual operation, and commit the result, if needed.
- Compute the flags | def lift(self, irsb_c, past_instructions, future_instructions): # pylint: disable=unused-argument
"""
This is the main body of the "lifting" for the instruction.
This can/should be overriden to provide the general flow of how instructions in your arch work.
For example, in MSP430, this is:
- Figure out what your operands are by parsing the addressing, and load them into temporary registers
- Do the actual operation, and commit the result, if needed.
- Compute the flags
"""
self.irsb_c = irsb_c
# Always call this first!
self.mark_instruction_start()
# Then do the actual stuff.
inputs = self.fetch_operands()
retval = self.compute_result(*inputs)
vals = list(inputs) + [retval]
if retval is not None:
self.commit_result(retval)
self.compute_flags(*vals) |
Load a value from memory into a VEX temporary register.
:param addr: The VexValue containing the addr to load from.
:param ty: The Type of the resulting data
:return: a VexValue | def load(self, addr, ty):
"""
Load a value from memory into a VEX temporary register.
:param addr: The VexValue containing the addr to load from.
:param ty: The Type of the resulting data
:return: a VexValue
"""
rdt = self.irsb_c.load(addr.rdt, ty)
return VexValue(self.irsb_c, rdt) |
Creates a constant as a VexValue
:param val: The value, as an integer
:param ty: The type of the resulting VexValue
:return: a VexValue | def constant(self, val, ty):
"""
Creates a constant as a VexValue
:param val: The value, as an integer
:param ty: The type of the resulting VexValue
:return: a VexValue
"""
if isinstance(val, VexValue) and not isinstance(val, IRExpr):
raise Exception('Constant cannot be made from VexValue or IRExpr')
rdt = self.irsb_c.mkconst(val, ty)
return VexValue(self.irsb_c, rdt) |
Load a value from a machine register into a VEX temporary register.
All values must be loaded out of registers before they can be used with operations, etc
and stored back into them when the instruction is over. See Put().
:param reg: Register number as an integer, or register string name
:param ty: The Type to use.
:return: A VexValue of the gotten value. | def get(self, reg, ty):
"""
Load a value from a machine register into a VEX temporary register.
All values must be loaded out of registers before they can be used with operations, etc
and stored back into them when the instruction is over. See Put().
:param reg: Register number as an integer, or register string name
:param ty: The Type to use.
:return: A VexValue of the gotten value.
"""
offset = self.lookup_register(self.irsb_c.irsb.arch, reg)
if offset == self.irsb_c.irsb.arch.ip_offset:
return self.constant(self.addr, ty)
rdt = self.irsb_c.rdreg(offset, ty)
return VexValue(self.irsb_c, rdt) |
Puts a value from a VEX temporary register into a machine register.
This is how the results of operations done to registers get committed to the machine's state.
:param val: The VexValue to store (Want to store a constant? See Constant() first)
:param reg: The integer register number to store into, or register name
:return: None | def put(self, val, reg):
"""
Puts a value from a VEX temporary register into a machine register.
This is how the results of operations done to registers get committed to the machine's state.
:param val: The VexValue to store (Want to store a constant? See Constant() first)
:param reg: The integer register number to store into, or register name
:return: None
"""
offset = self.lookup_register(self.irsb_c.irsb.arch, reg)
self.irsb_c.put(val.rdt, offset) |
Like put, except it checks a condition
to decide what to put in the destination register.
:param cond: The VexValue representing the logical expression for the condition
(if your expression only has constants, don't use this method!)
:param valiftrue: the VexValue to put in reg if cond evals as true
:param validfalse: the VexValue to put in reg if cond evals as false
:param reg: The integer register number to store into, or register name
:return: None | def put_conditional(self, cond, valiftrue, valiffalse, reg):
"""
Like put, except it checks a condition
to decide what to put in the destination register.
:param cond: The VexValue representing the logical expression for the condition
(if your expression only has constants, don't use this method!)
:param valiftrue: the VexValue to put in reg if cond evals as true
:param validfalse: the VexValue to put in reg if cond evals as false
:param reg: The integer register number to store into, or register name
:return: None
"""
val = self.irsb_c.ite(cond.rdt , valiftrue.rdt, valiffalse.rdt)
offset = self.lookup_register(self.irsb_c.irsb.arch, reg)
self.irsb_c.put(val, offset) |
Store a VexValue in memory at the specified loaction.
:param val: The VexValue of the value to store
:param addr: The VexValue of the address to store into
:return: None | def store(self, val, addr):
"""
Store a VexValue in memory at the specified loaction.
:param val: The VexValue of the value to store
:param addr: The VexValue of the address to store into
:return: None
"""
self.irsb_c.store(addr.rdt, val.rdt) |
Jump to a specified destination, under the specified condition.
Used for branches, jumps, calls, returns, etc.
:param condition: The VexValue representing the expression for the guard, or None for an unconditional jump
:param to_addr: The address to jump to.
:param jumpkind: The JumpKind to use. See the VEX docs for what these are; you only need them for things
aren't normal jumps (e.g., calls, interrupts, program exits, etc etc)
:return: None | def jump(self, condition, to_addr, jumpkind=JumpKind.Boring, ip_offset=None):
"""
Jump to a specified destination, under the specified condition.
Used for branches, jumps, calls, returns, etc.
:param condition: The VexValue representing the expression for the guard, or None for an unconditional jump
:param to_addr: The address to jump to.
:param jumpkind: The JumpKind to use. See the VEX docs for what these are; you only need them for things
aren't normal jumps (e.g., calls, interrupts, program exits, etc etc)
:return: None
"""
to_addr_ty = None
if isinstance(to_addr, VexValue):
# Unpack a VV
to_addr_rdt = to_addr.rdt
to_addr_ty = to_addr.ty
elif isinstance(to_addr, int):
# Direct jump to an int, make an RdT and Ty
to_addr_ty = vex_int_class(self.irsb_c.irsb.arch.bits).type
to_addr = self.constant(to_addr, to_addr_ty) # TODO archinfo may be changing
to_addr_rdt = to_addr.rdt
elif isinstance(to_addr, RdTmp):
# An RdT; just get the Ty of the arch's pointer type
to_addr_ty = vex_int_class(self.irsb_c.irsb.arch.bits).type
to_addr_rdt = to_addr
else:
raise ValueError("Jump destination has unknown type: " + repr(type(to_addr)))
if not condition:
# This is the default exit.
self.irsb_c.irsb.jumpkind = jumpkind
self.irsb_c.irsb.next = to_addr_rdt
else:
# add another exit
# EDG says: We should make sure folks set ArchXYZ.ip_offset like they're supposed to
if ip_offset is None:
ip_offset = self.arch.ip_offset
assert ip_offset is not None
negated_condition_rdt = self.ite(condition, self.constant(0, condition.ty), self.constant(1, condition.ty))
direct_exit_target = self.constant(self.addr + (self.bitwidth // 8), to_addr_ty)
self.irsb_c.add_exit(negated_condition_rdt, direct_exit_target.rdt, jumpkind, ip_offset)
self.irsb_c.irsb.jumpkind = jumpkind
self.irsb_c.irsb.next = to_addr_rdt |
Creates a CCall operation.
A CCall is a procedure that calculates a value at *runtime*, not at lift-time.
You can use these for flags, unresolvable jump targets, etc.
We caution you to avoid using them when at all possible though.
For an example of how to write and use a CCall, see gymrat/bf/lift_bf.py
:param ret_type: The return type of the CCall
:param func_obj: The function object to eventually call.
:param args: List of arguments to the function
:return: A VexValue of the result. | def ccall(self, ret_type, func_obj, args):
"""
Creates a CCall operation.
A CCall is a procedure that calculates a value at *runtime*, not at lift-time.
You can use these for flags, unresolvable jump targets, etc.
We caution you to avoid using them when at all possible though.
For an example of how to write and use a CCall, see gymrat/bf/lift_bf.py
:param ret_type: The return type of the CCall
:param func_obj: The function object to eventually call.
:param args: List of arguments to the function
:return: A VexValue of the result.
"""
# HACK: FIXME: If you're reading this, I'm sorry. It's truly a crime against Python...
from angr.engines.vex import ccall
# Check the args to make sure they're the right type
list_args = list(args)
new_args = []
for arg in list_args:
if isinstance(arg, VexValue):
arg = arg.rdt
new_args.append(arg)
args = tuple(new_args)
# Support calling ccalls via string name
if isinstance(func_obj, str):
func_obj = getattr(ccall, func_obj)
else:
# ew, monkey-patch in the user-provided CCall
if not hasattr(ccall, func_obj.__name__):
setattr(ccall, func_obj.__name__, func_obj)
cc = self.irsb_c.op_ccall(ret_type, func_obj.__name__, args)
return VexValue(self.irsb_c, cc) |
Recursively lifts blocks using the registered lifters and postprocessors. Tries each lifter in the order in
which they are registered on the data to lift.
If a lifter raises a LiftingException on the data, it is skipped.
If it succeeds and returns a block with a jumpkind of Ijk_NoDecode, all of the lifters are tried on the rest
of the data and if they work, their output is appended to the first block.
:param arch: The arch to lift the data as.
:type arch: :class:`archinfo.Arch`
:param addr: The starting address of the block. Effects the IMarks.
:param data: The bytes to lift as either a python string of bytes or a cffi buffer object.
:param max_bytes: The maximum number of bytes to lift. If set to None, no byte limit is used.
:param max_inst: The maximum number of instructions to lift. If set to None, no instruction limit is used.
:param bytes_offset: The offset into `data` to start lifting at.
:param opt_level: The level of optimization to apply to the IR, -1 through 2. -1 is the strictest
unoptimized level, 0 is unoptimized but will perform some lookahead/lookbehind
optimizations, 1 performs constant propogation, and 2 performs loop unrolling,
which honestly doesn't make much sense in the context of pyvex. The default is 1.
:param traceflags: The libVEX traceflags, controlling VEX debug prints.
.. note:: Explicitly specifying the number of instructions to lift (`max_inst`) may not always work
exactly as expected. For example, on MIPS, it is meaningless to lift a branch or jump
instruction without its delay slot. VEX attempts to Do The Right Thing by possibly decoding
fewer instructions than requested. Specifically, this means that lifting a branch or jump
on MIPS as a single instruction (`max_inst=1`) will result in an empty IRSB, and subsequent
attempts to run this block will raise `SimIRSBError('Empty IRSB passed to SimIRSB.')`.
.. note:: If no instruction and byte limit is used, pyvex will continue lifting the block until the block
ends properly or until it runs out of data to lift. | def lift(data, addr, arch, max_bytes=None, max_inst=None, bytes_offset=0, opt_level=1, traceflags=0,
strict_block_end=True, inner=False, skip_stmts=False, collect_data_refs=False):
"""
Recursively lifts blocks using the registered lifters and postprocessors. Tries each lifter in the order in
which they are registered on the data to lift.
If a lifter raises a LiftingException on the data, it is skipped.
If it succeeds and returns a block with a jumpkind of Ijk_NoDecode, all of the lifters are tried on the rest
of the data and if they work, their output is appended to the first block.
:param arch: The arch to lift the data as.
:type arch: :class:`archinfo.Arch`
:param addr: The starting address of the block. Effects the IMarks.
:param data: The bytes to lift as either a python string of bytes or a cffi buffer object.
:param max_bytes: The maximum number of bytes to lift. If set to None, no byte limit is used.
:param max_inst: The maximum number of instructions to lift. If set to None, no instruction limit is used.
:param bytes_offset: The offset into `data` to start lifting at.
:param opt_level: The level of optimization to apply to the IR, -1 through 2. -1 is the strictest
unoptimized level, 0 is unoptimized but will perform some lookahead/lookbehind
optimizations, 1 performs constant propogation, and 2 performs loop unrolling,
which honestly doesn't make much sense in the context of pyvex. The default is 1.
:param traceflags: The libVEX traceflags, controlling VEX debug prints.
.. note:: Explicitly specifying the number of instructions to lift (`max_inst`) may not always work
exactly as expected. For example, on MIPS, it is meaningless to lift a branch or jump
instruction without its delay slot. VEX attempts to Do The Right Thing by possibly decoding
fewer instructions than requested. Specifically, this means that lifting a branch or jump
on MIPS as a single instruction (`max_inst=1`) will result in an empty IRSB, and subsequent
attempts to run this block will raise `SimIRSBError('Empty IRSB passed to SimIRSB.')`.
.. note:: If no instruction and byte limit is used, pyvex will continue lifting the block until the block
ends properly or until it runs out of data to lift.
"""
if max_bytes is not None and max_bytes <= 0:
raise PyVEXError("Cannot lift block with no data (max_bytes <= 0)")
if not data:
raise PyVEXError("Cannot lift block with no data (data is empty)")
if isinstance(data, str):
raise TypeError("Cannot pass unicode string as data to lifter")
if isinstance(data, bytes):
py_data = data
c_data = None
allow_arch_optimizations = False
else:
if max_bytes is None:
raise PyVEXError("Cannot lift block with ffi pointer and no size (max_bytes is None)")
c_data = data
py_data = None
allow_arch_optimizations = True
# In order to attempt to preserve the property that
# VEX lifts the same bytes to the same IR at all times when optimizations are disabled
# we hack off all of VEX's non-IROpt optimizations when opt_level == -1.
# This is intended to enable comparisons of the lifted IR between code that happens to be
# found in different contexts.
if opt_level < 0:
allow_arch_optimizations = False
opt_level = 0
for lifter in lifters[arch.name]:
try:
u_data = data
if lifter.REQUIRE_DATA_C:
if c_data is None:
u_data = ffi.new('unsigned char [%d]' % (len(py_data) + 8), py_data + b'\0' * 8)
max_bytes = min(len(py_data), max_bytes) if max_bytes is not None else len(py_data)
else:
u_data = c_data
elif lifter.REQUIRE_DATA_PY:
if py_data is None:
if max_bytes is None:
l.debug('Cannot create py_data from c_data when no max length is given')
continue
u_data = ffi.buffer(c_data, max_bytes)[:]
else:
u_data = py_data
try:
final_irsb = lifter(arch, addr)._lift(u_data, bytes_offset, max_bytes, max_inst, opt_level, traceflags,
allow_arch_optimizations, strict_block_end, skip_stmts, collect_data_refs,
)
except SkipStatementsError:
assert skip_stmts is True
final_irsb = lifter(arch, addr)._lift(u_data, bytes_offset, max_bytes, max_inst, opt_level, traceflags,
allow_arch_optimizations, strict_block_end, skip_stmts=False,
collect_data_refs=collect_data_refs,
)
#l.debug('block lifted by %s' % str(lifter))
#l.debug(str(final_irsb))
break
except LiftingException as ex:
l.debug('Lifting Exception: %s', str(ex))
continue
else:
final_irsb = IRSB.empty_block(arch,
addr,
size=0,
nxt=Const(const.vex_int_class(arch.bits)(addr)),
jumpkind='Ijk_NoDecode',
)
final_irsb.invalidate_direct_next()
return final_irsb
if final_irsb.size > 0 and final_irsb.jumpkind == 'Ijk_NoDecode':
# We have decoded a few bytes before we hit an undecodeable instruction.
# Determine if this is an intentional NoDecode, like the ud2 instruction on AMD64
nodecode_addr_expr = final_irsb.next
if type(nodecode_addr_expr) is Const:
nodecode_addr = nodecode_addr_expr.con.value
next_irsb_start_addr = addr + final_irsb.size
if nodecode_addr != next_irsb_start_addr:
# The last instruction of the IRSB has a non-zero length. This is an intentional NoDecode.
# The very last instruction has been decoded
final_irsb.jumpkind = 'Ijk_NoDecode'
final_irsb.next = final_irsb.next
final_irsb.invalidate_direct_next()
return final_irsb
# Decode more bytes
if skip_stmts:
# When gymrat will be invoked, we will merge future basic blocks to the current basic block. In this case,
# statements are usually required.
# TODO: In the future, we may further optimize it to handle cases where getting statements in gymrat is not
# TODO: required.
return lift(data, addr, arch,
max_bytes=max_bytes,
max_inst=max_inst,
bytes_offset=bytes_offset,
opt_level=opt_level,
traceflags=traceflags,
strict_block_end=strict_block_end,
skip_stmts=False,
collect_data_refs=collect_data_refs,
)
next_addr = addr + final_irsb.size
if max_bytes is not None:
max_bytes -= final_irsb.size
if isinstance(data, (str, bytes)):
data_left = data[final_irsb.size:]
else:
data_left = data + final_irsb.size
if max_inst is not None:
max_inst -= final_irsb.instructions
if (max_bytes is None or max_bytes > 0) and (max_inst is None or max_inst > 0) and data_left:
more_irsb = lift(data_left, next_addr, arch,
max_bytes=max_bytes,
max_inst=max_inst,
bytes_offset=bytes_offset,
opt_level=opt_level,
traceflags=traceflags,
strict_block_end=strict_block_end,
inner=True,
skip_stmts=False,
collect_data_refs=collect_data_refs,
)
if more_irsb.size:
# Successfully decoded more bytes
final_irsb.extend(more_irsb)
elif max_bytes == 0:
# We have no more bytes left. Mark the jumpkind of the IRSB as Ijk_Boring
if final_irsb.size > 0 and final_irsb.jumpkind == 'Ijk_NoDecode':
final_irsb.jumpkind = 'Ijk_Boring'
final_irsb.next = Const(vex_int_class(arch.bits)(final_irsb.addr + final_irsb.size))
if not inner:
for postprocessor in postprocessors[arch.name]:
try:
postprocessor(final_irsb).postprocess()
except NeedStatementsNotification:
# The post-processor cannot work without statements. Re-lift the current block with skip_stmts=False
if not skip_stmts:
# sanity check
# Why does the post-processor raise NeedStatementsNotification when skip_stmts is False?
raise TypeError("Bad post-processor %s: "
"NeedStatementsNotification is raised when statements are available." %
postprocessor.__class__)
# Re-lift the current IRSB
return lift(data, addr, arch,
max_bytes=max_bytes,
max_inst=max_inst,
bytes_offset=bytes_offset,
opt_level=opt_level,
traceflags=traceflags,
strict_block_end=strict_block_end,
inner=inner,
skip_stmts=False,
collect_data_refs=collect_data_refs,
)
except LiftingException:
continue
return final_irsb |
Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order
in which they are registered. Postprocessors will be run in registration order.
:param lifter: The Lifter or Postprocessor to register
:vartype lifter: :class:`Lifter` or :class:`Postprocessor` | def register(lifter, arch_name):
"""
Registers a Lifter or Postprocessor to be used by pyvex. Lifters are are given priority based on the order
in which they are registered. Postprocessors will be run in registration order.
:param lifter: The Lifter or Postprocessor to register
:vartype lifter: :class:`Lifter` or :class:`Postprocessor`
"""
if issubclass(lifter, Lifter):
l.debug("Registering lifter %s for architecture %s.", lifter.__name__, arch_name)
lifters[arch_name].append(lifter)
if issubclass(lifter, Postprocessor):
l.debug("Registering postprocessor %s for architecture %s.", lifter.__name__, arch_name)
postprocessors[arch_name].append(lifter) |
A list of all of the expressions that this expression ends up evaluating. | def child_expressions(self):
"""
A list of all of the expressions that this expression ends up evaluating.
"""
expressions = [ ]
for k in self.__slots__:
v = getattr(self, k)
if isinstance(v, IRExpr):
expressions.append(v)
expressions.extend(v.child_expressions)
return expressions |
A list of all of the constants that this expression ends up using. | def constants(self):
"""
A list of all of the constants that this expression ends up using.
"""
constants = [ ]
for k in self.__slots__:
v = getattr(self, k)
if isinstance(v, IRExpr):
constants.extend(v.constants)
elif isinstance(v, IRConst):
constants.append(v)
return constants |
Replace child expressions in-place.
:param IRExpr expr: The expression to look for.
:param IRExpr replacement: The expression to replace with.
:return: None | def replace_expression(self, expr, replacement):
"""
Replace child expressions in-place.
:param IRExpr expr: The expression to look for.
:param IRExpr replacement: The expression to replace with.
:return: None
"""
for k in self.__slots__:
v = getattr(self, k)
if v is expr:
setattr(self, k, replacement)
elif type(v) is list:
# Replace the instance in the list
for i, expr_ in enumerate(v):
if expr_ is expr:
v[i] = replacement
elif type(v) is tuple:
# Rebuild the tuple
_lst = [ ]
replaced = False
for i, expr_ in enumerate(v):
if expr_ is expr:
_lst.append(replacement)
replaced = True
else:
_lst.append(expr_)
if replaced:
setattr(self, k, tuple(_lst))
elif isinstance(v, IRExpr):
v.replace_expression(expr, replacement) |
Appends an irsb to the current irsb. The irsb that is appended is invalidated. The appended irsb's jumpkind and
default exit are used.
:param extendwith: The IRSB to append to this IRSB
:vartype extendwith: :class:`IRSB` | def extend(self, extendwith):
"""
Appends an irsb to the current irsb. The irsb that is appended is invalidated. The appended irsb's jumpkind and
default exit are used.
:param extendwith: The IRSB to append to this IRSB
:vartype extendwith: :class:`IRSB`
"""
if self.stmts_used == 0:
self._from_py(extendwith)
return
conversion_dict = { }
invalid_vals = (0xffffffff, -1)
new_size = self.size + extendwith.size
new_instructions = self.instructions + extendwith.instructions
new_direct_next = extendwith.direct_next
def convert_tmp(tmp):
"""
Converts a tmp from the appended-block into one in the appended-to-block. Creates a new tmp if it does not
already exist. Prevents collisions in tmp numbers between the two blocks.
:param tmp: The tmp number to convert
"""
if tmp not in conversion_dict:
tmp_type = extendwith.tyenv.lookup(tmp)
conversion_dict[tmp] = self.tyenv.add(tmp_type)
return conversion_dict[tmp]
def convert_expr(expr_):
"""
Converts a VEX expression to use tmps in the appended-block instead of the appended-to-block. Used to prevent
collisions in tmp numbers between the two blocks.
:param tmp: The VEX expression to convert
:vartype expr: :class:`IRExpr`
"""
if type(expr_) is RdTmp:
return RdTmp.get_instance(convert_tmp(expr_.tmp))
return expr_
for stmt_ in extendwith.statements:
stmttype = type(stmt_)
if stmttype is WrTmp:
stmt_.tmp = convert_tmp(stmt_.tmp)
elif stmttype is LoadG:
stmt_.dst = convert_tmp(stmt_.dst)
elif stmttype is LLSC:
stmt_.result = convert_tmp(stmt_.result)
elif stmttype is Dirty:
if stmt_.tmp not in invalid_vals:
stmt_.tmp = convert_tmp(stmt_.tmp)
for e in stmt_.args:
convert_expr(e)
elif stmttype is CAS:
if stmt_.oldLo not in invalid_vals: stmt_.oldLo = convert_tmp(stmt_.oldLo)
if stmt_.oldHi not in invalid_vals: stmt_.oldHi = convert_tmp(stmt_.oldHi)
# Convert all expressions
to_replace = { }
for expr_ in stmt_.expressions:
replacement = convert_expr(expr_)
if replacement is not expr_:
to_replace[expr_] = replacement
for expr_, replacement in to_replace.items():
stmt_.replace_expression(expr_, replacement)
# Add the converted statement to self.statements
self.statements.append(stmt_)
extendwith.next = convert_expr(extendwith.next)
self.next = extendwith.next
self.jumpkind = extendwith.jumpkind
self._size = new_size
self._instructions = new_instructions
self._direct_next = new_direct_next |
Return an iterator of all expressions contained in the IRSB. | def expressions(self):
"""
Return an iterator of all expressions contained in the IRSB.
"""
for s in self.statements:
for expr_ in s.expressions:
yield expr_
yield self.next |
The number of instructions in this block | def instructions(self):
"""
The number of instructions in this block
"""
if self._instructions is None:
if self.statements is None:
self._instructions = 0
else:
self._instructions = len([s for s in self.statements if type(s) is stmt.IMark])
return self._instructions |
Addresses of instructions in this block. | def instruction_addresses(self):
"""
Addresses of instructions in this block.
"""
if self._instruction_addresses is None:
if self.statements is None:
self._instruction_addresses = [ ]
else:
self._instruction_addresses = [ (s.addr + s.delta) for s in self.statements if type(s) is stmt.IMark ]
return self._instruction_addresses |
The size of this block, in bytes | def size(self):
"""
The size of this block, in bytes
"""
if self._size is None:
self._size = sum(s.len for s in self.statements if type(s) is stmt.IMark)
return self._size |
A list of all operations done by the IRSB, as libVEX enum names | def operations(self):
"""
A list of all operations done by the IRSB, as libVEX enum names
"""
ops = []
for e in self.expressions:
if hasattr(e, 'op'):
ops.append(e.op)
return ops |
The constants (excluding updates of the program counter) in the IRSB as :class:`pyvex.const.IRConst`. | def constants(self):
"""
The constants (excluding updates of the program counter) in the IRSB as :class:`pyvex.const.IRConst`.
"""
return sum(
(s.constants for s in self.statements if not (type(s) is stmt.Put and s.offset == self.offsIP)), []) |
A set of the static jump targets of the basic block. | def constant_jump_targets(self):
"""
A set of the static jump targets of the basic block.
"""
exits = set()
if self.exit_statements:
for _, _, stmt_ in self.exit_statements:
exits.add(stmt_.dst.value)
default_target = self.default_exit_target
if default_target is not None:
exits.add(default_target)
return exits |
A dict of the static jump targets of the basic block to their jumpkind. | def constant_jump_targets_and_jumpkinds(self):
"""
A dict of the static jump targets of the basic block to their jumpkind.
"""
exits = dict()
if self.exit_statements:
for _, _, stmt_ in self.exit_statements:
exits[stmt_.dst.value] = stmt_.jumpkind
default_target = self.default_exit_target
if default_target is not None:
exits[default_target] = self.jumpkind
return exits |
Return the pretty-printed IRSB.
:rtype: str | def _pp_str(self):
"""
Return the pretty-printed IRSB.
:rtype: str
"""
sa = []
sa.append("IRSB {")
if self.statements is not None:
sa.append(" %s" % self.tyenv)
sa.append("")
if self.statements is not None:
for i, s in enumerate(self.statements):
if isinstance(s, stmt.Put):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.offset, s.data.result_size(self.tyenv) // 8))
elif isinstance(s, stmt.WrTmp) and isinstance(s.data, expr.Get):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.data.offset, s.data.result_size(self.tyenv) // 8))
elif isinstance(s, stmt.Exit):
stmt_str = s.__str__(reg_name=self.arch.translate_register_name(s.offsIP, self.arch.bits // 8))
else:
stmt_str = s.__str__()
sa.append(" %02d | %s" % (i, stmt_str))
else:
sa.append(" Statements are omitted.")
sa.append(
" NEXT: PUT(%s) = %s; %s" % (self.arch.translate_register_name(self.offsIP), self.next, self.jumpkind))
sa.append("}")
return '\n'.join(sa) |
Checks if the default of this IRSB a direct jump or not. | def _is_defaultexit_direct_jump(self):
"""
Checks if the default of this IRSB a direct jump or not.
"""
if not (self.jumpkind == 'Ijk_InvalICache' or self.jumpkind == 'Ijk_Boring' or self.jumpkind == 'Ijk_Call'):
return False
target = self.default_exit_target
return target is not None |
Return the type of temporary variable `tmp` as an enum string | def lookup(self, tmp):
"""
Return the type of temporary variable `tmp` as an enum string
"""
if tmp < 0 or tmp > self.types_used:
l.debug("Invalid temporary number %d", tmp)
raise IndexError(tmp)
return self.types[tmp] |
Returns the size, in BITS, of a VEX type specifier
e.g., Ity_I16 -> 16
:param ty:
:return: | def get_type_size(ty):
"""
Returns the size, in BITS, of a VEX type specifier
e.g., Ity_I16 -> 16
:param ty:
:return:
"""
m = type_str_re.match(ty)
if m is None:
raise ValueError('Type %s does not have size' % ty)
return int(m.group('size')) |
Get the width of a "type specifier"
like I16U
or F16
or just 16
(Yes, this really just takes the int out. If we must special-case, do it here.
:param tyspec:
:return: | def get_type_spec_size(ty):
"""
Get the width of a "type specifier"
like I16U
or F16
or just 16
(Yes, this really just takes the int out. If we must special-case, do it here.
:param tyspec:
:return:
"""
m = type_tag_str_re.match(ty)
if m is None:
raise ValueError('Type specifier %s does not have size' % ty)
return int(m.group('size')) |
Wrapper around the `lift` method on Lifters. Should not be overridden in child classes.
:param data: The bytes to lift as either a python string of bytes or a cffi buffer object.
:param bytes_offset: The offset into `data` to start lifting at.
:param max_bytes: The maximum number of bytes to lift. If set to None, no byte limit is used.
:param max_inst: The maximum number of instructions to lift. If set to None, no instruction limit is used.
:param opt_level: The level of optimization to apply to the IR, 0-2. Most likely will be ignored in any lifter
other then LibVEX.
:param traceflags: The libVEX traceflags, controlling VEX debug prints. Most likely will be ignored in any
lifter other than LibVEX.
:param allow_arch_optimizations: Should the LibVEX lifter be allowed to perform lift-time preprocessing optimizations
(e.g., lookback ITSTATE optimization on THUMB)
Most likely will be ignored in any lifter other than LibVEX.
:param strict_block_end: Should the LibVEX arm-thumb split block at some instructions, for example CB{N}Z.
:param skip_stmts: Should the lifter skip transferring IRStmts from C to Python.
:param collect_data_refs: Should the LibVEX lifter collect data references in C. | def _lift(self,
data,
bytes_offset=None,
max_bytes=None,
max_inst=None,
opt_level=1,
traceflags=None,
allow_arch_optimizations=None,
strict_block_end=None,
skip_stmts=False,
collect_data_refs=False):
"""
Wrapper around the `lift` method on Lifters. Should not be overridden in child classes.
:param data: The bytes to lift as either a python string of bytes or a cffi buffer object.
:param bytes_offset: The offset into `data` to start lifting at.
:param max_bytes: The maximum number of bytes to lift. If set to None, no byte limit is used.
:param max_inst: The maximum number of instructions to lift. If set to None, no instruction limit is used.
:param opt_level: The level of optimization to apply to the IR, 0-2. Most likely will be ignored in any lifter
other then LibVEX.
:param traceflags: The libVEX traceflags, controlling VEX debug prints. Most likely will be ignored in any
lifter other than LibVEX.
:param allow_arch_optimizations: Should the LibVEX lifter be allowed to perform lift-time preprocessing optimizations
(e.g., lookback ITSTATE optimization on THUMB)
Most likely will be ignored in any lifter other than LibVEX.
:param strict_block_end: Should the LibVEX arm-thumb split block at some instructions, for example CB{N}Z.
:param skip_stmts: Should the lifter skip transferring IRStmts from C to Python.
:param collect_data_refs: Should the LibVEX lifter collect data references in C.
"""
irsb = IRSB.empty_block(self.arch, self.addr)
self.data = data
self.bytes_offset = bytes_offset
self.opt_level = opt_level
self.traceflags = traceflags
self.allow_arch_optimizations = allow_arch_optimizations
self.strict_block_end = strict_block_end
self.collect_data_refs = collect_data_refs
self.max_inst = max_inst
self.max_bytes = max_bytes
self.skip_stmts = skip_stmts
self.irsb = irsb
self.lift()
return self.irsb |
Return a function which generates an op format (just a string of the vex instruction)
Functions by formatting the fmt_string with the types of the arguments | def make_format_op_generator(fmt_string):
"""
Return a function which generates an op format (just a string of the vex instruction)
Functions by formatting the fmt_string with the types of the arguments
"""
def gen(arg_types):
converted_arg_types = list(map(get_op_format_from_const_ty, arg_types))
op = fmt_string.format(arg_t=converted_arg_types)
return op
return gen |
Add an exit out of the middle of an IRSB.
(e.g., a conditional jump)
:param guard: An expression, the exit is taken if true
:param dst: the destination of the exit (a Const)
:param jk: the JumpKind of this exit (probably Ijk_Boring)
:param ip: The address of this exit's source | def add_exit(self, guard, dst, jk, ip):
"""
Add an exit out of the middle of an IRSB.
(e.g., a conditional jump)
:param guard: An expression, the exit is taken if true
:param dst: the destination of the exit (a Const)
:param jk: the JumpKind of this exit (probably Ijk_Boring)
:param ip: The address of this exit's source
"""
self.irsb.statements.append(Exit(guard, dst.con, jk, ip)) |
Creates a constant as a VexValue
:param irsb_c: The IRSBCustomizer to use
:param val: The value, as an integer
:param ty: The type of the resulting VexValue
:return: a VexValue | def Constant(cls, irsb_c, val, ty):
"""
Creates a constant as a VexValue
:param irsb_c: The IRSBCustomizer to use
:param val: The value, as an integer
:param ty: The type of the resulting VexValue
:return: a VexValue
"""
assert not (isinstance(val, VexValue) or isinstance(val, IRExpr))
rdt = irsb_c.mkconst(val, ty)
return cls(irsb_c, rdt) |
Replace child expressions in-place.
:param IRExpr expression: The expression to look for.
:param IRExpr replacement: The expression to replace with.
:return: None | def replace_expression(self, expression, replacement):
"""
Replace child expressions in-place.
:param IRExpr expression: The expression to look for.
:param IRExpr replacement: The expression to replace with.
:return: None
"""
for k in self.__slots__:
v = getattr(self, k)
if v is expression:
setattr(self, k, replacement)
elif isinstance(v, IRExpr):
v.replace_expression(expression, replacement)
elif type(v) is tuple:
# Rebuild the tuple
_lst = [ ]
replaced = False
for expr_ in v:
if expr_ is expression:
_lst.append(replacement)
replaced = True
else:
_lst.append(expr_)
if replaced:
setattr(self, k, tuple(_lst)) |
Exponential backoff time | def exp_backoff(attempt, cap=3600, base=300):
""" Exponential backoff time """
# this is a numerically stable version of
# min(cap, base * 2 ** attempt)
max_attempts = math.log(cap / base, 2)
if attempt <= max_attempts:
return base * 2 ** attempt
return cap |
Return a random available proxy (either good or unchecked) | def get_random(self):
""" Return a random available proxy (either good or unchecked) """
available = list(self.unchecked | self.good)
if not available:
return None
return random.choice(available) |
Return complete proxy name associated with a hostport of a given
``proxy_address``. If ``proxy_address`` is unkonwn or empty,
return None. | def get_proxy(self, proxy_address):
"""
Return complete proxy name associated with a hostport of a given
``proxy_address``. If ``proxy_address`` is unkonwn or empty,
return None.
"""
if not proxy_address:
return None
hostport = extract_proxy_hostport(proxy_address)
return self.proxies_by_hostport.get(hostport, None) |
Mark a proxy as dead | def mark_dead(self, proxy, _time=None):
""" Mark a proxy as dead """
if proxy not in self.proxies:
logger.warn("Proxy <%s> was not found in proxies list" % proxy)
return
if proxy in self.good:
logger.debug("GOOD proxy became DEAD: <%s>" % proxy)
else:
logger.debug("Proxy <%s> is DEAD" % proxy)
self.unchecked.discard(proxy)
self.good.discard(proxy)
self.dead.add(proxy)
now = _time or time.time()
state = self.proxies[proxy]
state.backoff_time = self.backoff(state.failed_attempts)
state.next_check = now + state.backoff_time
state.failed_attempts += 1 |
Mark a proxy as good | def mark_good(self, proxy):
""" Mark a proxy as good """
if proxy not in self.proxies:
logger.warn("Proxy <%s> was not found in proxies list" % proxy)
return
if proxy not in self.good:
logger.debug("Proxy <%s> is GOOD" % proxy)
self.unchecked.discard(proxy)
self.dead.discard(proxy)
self.good.add(proxy)
self.proxies[proxy].failed_attempts = 0 |
Move dead proxies to unchecked if a backoff timeout passes | def reanimate(self, _time=None):
""" Move dead proxies to unchecked if a backoff timeout passes """
n_reanimated = 0
now = _time or time.time()
for proxy in list(self.dead):
state = self.proxies[proxy]
assert state.next_check is not None
if state.next_check <= now:
self.dead.remove(proxy)
self.unchecked.add(proxy)
n_reanimated += 1
return n_reanimated |
Mark all dead proxies as unchecked | def reset(self):
""" Mark all dead proxies as unchecked """
for proxy in list(self.dead):
self.dead.remove(proxy)
self.unchecked.add(proxy) |
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols
Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package.
example:
quotes = getQuotes('AAPL')
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}]
quotes = getQuotes(['AAPL', 'GOOG'])
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}]
:param symbols: a single symbol or a list of stock symbols
:return: real-time quotes list | def getQuotes(symbols):
'''
get real-time quotes (index, last trade price, last trade time, etc) for stocks, using google api: http://finance.google.com/finance/info?client=ig&q=symbols
Unlike python package 'yahoo-finance' (15 min delay), There is no delay for NYSE and NASDAQ stocks in 'googlefinance' package.
example:
quotes = getQuotes('AAPL')
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}]
quotes = getQuotes(['AAPL', 'GOOG'])
return:
[{u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'129.09', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'129.09', u'Yield': u'1.46', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'0.47', u'StockSymbol': u'AAPL', u'ID': u'22144'}, {u'Index': u'NASDAQ', u'LastTradeWithCurrency': u'571.34', u'LastTradeDateTime': u'2015-03-02T16:04:29Z', u'LastTradePrice': u'571.34', u'Yield': u'', u'LastTradeTime': u'4:04PM EST', u'LastTradeDateTimeLong': u'Mar 2, 4:04PM EST', u'Dividend': u'', u'StockSymbol': u'GOOG', u'ID': u'304466804484872'}]
:param symbols: a single symbol or a list of stock symbols
:return: real-time quotes list
'''
if type(symbols) == type('str'):
symbols = [symbols]
content = json.loads(request(symbols))
return replaceKeys(content); |
Add a folder, library (.py) or resource file (.robot, .tsv, .txt) to the database | def add(self, name, monitor=True):
"""Add a folder, library (.py) or resource file (.robot, .tsv, .txt) to the database
"""
if os.path.isdir(name):
if (not os.path.basename(name).startswith(".")):
self.add_folder(name)
elif os.path.isfile(name):
if ((self._looks_like_resource_file(name)) or
(self._looks_like_libdoc_file(name)) or
(self._looks_like_library_file(name))):
self.add_file(name)
else:
# let's hope it's a library name!
self.add_library(name) |
Respond to changes in the file system
This method will be given the path to a file that
has changed on disk. We need to reload the keywords
from that file | def on_change(self, path, event_type):
"""Respond to changes in the file system
This method will be given the path to a file that
has changed on disk. We need to reload the keywords
from that file
"""
# I can do all this work in a sql statement, but
# for debugging it's easier to do it in stages.
sql = """SELECT collection_id
FROM collection_table
WHERE path == ?
"""
cursor = self._execute(sql, (path,))
results = cursor.fetchall()
# there should always be exactly one result, but
# there's no harm in using a loop to process the
# single result
for result in results:
collection_id = result[0]
# remove all keywords in this collection
sql = """DELETE from keyword_table
WHERE collection_id == ?
"""
cursor = self._execute(sql, (collection_id,))
self._load_keywords(collection_id, path=path) |
Load a collection of keywords
One of path or libdoc needs to be passed in... | def _load_keywords(self, collection_id, path=None, libdoc=None):
"""Load a collection of keywords
One of path or libdoc needs to be passed in...
"""
if libdoc is None and path is None:
raise(Exception("You must provide either a path or libdoc argument"))
if libdoc is None:
libdoc = LibraryDocumentation(path)
if len(libdoc.keywords) > 0:
for keyword in libdoc.keywords:
self._add_keyword(collection_id, keyword.name, keyword.doc, keyword.args) |
Add a resource file or library file to the database | def add_file(self, path):
"""Add a resource file or library file to the database"""
libdoc = LibraryDocumentation(path)
if len(libdoc.keywords) > 0:
if libdoc.doc.startswith("Documentation for resource file"):
# bah! The file doesn't have an file-level documentation
# and libdoc substitutes some placeholder text.
libdoc.doc = ""
collection_id = self.add_collection(path, libdoc.name, libdoc.type,
libdoc.doc, libdoc.version,
libdoc.scope, libdoc.named_args,
libdoc.doc_format)
self._load_keywords(collection_id, libdoc=libdoc) |
Add a library to the database
This method is for adding a library by name (eg: "BuiltIn")
rather than by a file. | def add_library(self, name):
"""Add a library to the database
This method is for adding a library by name (eg: "BuiltIn")
rather than by a file.
"""
libdoc = LibraryDocumentation(name)
if len(libdoc.keywords) > 0:
# FIXME: figure out the path to the library file
collection_id = self.add_collection(None, libdoc.name, libdoc.type,
libdoc.doc, libdoc.version,
libdoc.scope, libdoc.named_args,
libdoc.doc_format)
self._load_keywords(collection_id, libdoc=libdoc) |
Recursively add all files in a folder to the database
By "all files" I mean, "all files that are resource files
or library files". It will silently ignore files that don't
look like they belong in the database. Pity the fool who
uses non-standard suffixes.
N.B. folders with names that begin with '." will be skipped | def add_folder(self, dirname, watch=True):
"""Recursively add all files in a folder to the database
By "all files" I mean, "all files that are resource files
or library files". It will silently ignore files that don't
look like they belong in the database. Pity the fool who
uses non-standard suffixes.
N.B. folders with names that begin with '." will be skipped
"""
ignore_file = os.path.join(dirname, ".rfhubignore")
exclude_patterns = []
try:
with open(ignore_file, "r") as f:
exclude_patterns = []
for line in f.readlines():
line = line.strip()
if (re.match(r'^\s*#', line)): continue
if len(line.strip()) > 0:
exclude_patterns.append(line)
except:
# should probably warn the user?
pass
for filename in os.listdir(dirname):
path = os.path.join(dirname, filename)
(basename, ext) = os.path.splitext(filename.lower())
try:
if (os.path.isdir(path)):
if (not basename.startswith(".")):
if os.access(path, os.R_OK):
self.add_folder(path, watch=False)
else:
if (ext in (".xml", ".robot", ".txt", ".py", ".tsv")):
if os.access(path, os.R_OK):
self.add(path)
except Exception as e:
# I really need to get the logging situation figured out.
print("bummer:", str(e))
# FIXME:
# instead of passing a flag around, I should just keep track
# of which folders we're watching, and don't add wathers for
# any subfolders. That will work better in the case where
# the user accidentally starts up the hub giving the same
# folder, or a folder and it's children, on the command line...
if watch:
# add watcher on normalized path
dirname = os.path.abspath(dirname)
event_handler = WatchdogHandler(self, dirname)
self.observer.schedule(event_handler, dirname, recursive=True) |
Insert data into the collection table | def add_collection(self, path, c_name, c_type, c_doc, c_version="unknown",
c_scope="", c_namedargs="yes", c_doc_format="ROBOT"):
"""Insert data into the collection table"""
if path is not None:
# We want to store the normalized form of the path in the
# database
path = os.path.abspath(path)
cursor = self.db.cursor()
cursor.execute("""
INSERT INTO collection_table
(name, type, version, scope, namedargs, path, doc, doc_format)
VALUES
(?,?,?,?,?,?,?,?)
""", (c_name, c_type, c_version, c_scope, c_namedargs, path, c_doc, c_doc_format))
collection_id = cursor.lastrowid
return collection_id |
Add any installed libraries that we can find
We do this by looking in the `libraries` folder where
robot is installed. If you have libraries installed
in a non-standard place, this won't pick them up. | def add_installed_libraries(self, extra_libs = ["Selenium2Library",
"SudsLibrary",
"RequestsLibrary"]):
"""Add any installed libraries that we can find
We do this by looking in the `libraries` folder where
robot is installed. If you have libraries installed
in a non-standard place, this won't pick them up.
"""
libdir = os.path.dirname(robot.libraries.__file__)
loaded = []
for filename in os.listdir(libdir):
if filename.endswith(".py") or filename.endswith(".pyc"):
libname, ext = os.path.splitext(filename)
if (libname.lower() not in loaded and
not self._should_ignore(libname)):
try:
self.add(libname)
loaded.append(libname.lower())
except Exception as e:
# need a better way to log this...
self.log.debug("unable to add library: " + str(e))
# I hate how I implemented this, but I don't think there's
# any way to find out which installed python packages are
# robot libraries.
for library in extra_libs:
if (library.lower() not in loaded and
not self._should_ignore(library)):
try:
self.add(library)
loaded.append(library.lower())
except Exception as e:
self.log.debug("unable to add external library %s: %s" % \
(library, str(e))) |
Get a specific collection | def get_collection(self, collection_id):
"""Get a specific collection"""
sql = """SELECT collection.collection_id, collection.type,
collection.name, collection.path,
collection.doc,
collection.version, collection.scope,
collection.namedargs,
collection.doc_format
FROM collection_table as collection
WHERE collection_id == ? OR collection.name like ?
"""
cursor = self._execute(sql, (collection_id, collection_id))
# need to handle the case where we get more than one result...
sql_result = cursor.fetchone()
return {
"collection_id": sql_result[0],
"type": sql_result[1],
"name": sql_result[2],
"path": sql_result[3],
"doc": sql_result[4],
"version": sql_result[5],
"scope": sql_result[6],
"namedargs": sql_result[7],
"doc_format": sql_result[8]
}
return sql_result |
Returns a list of collection name/summary tuples | def get_collections(self, pattern="*", libtype="*"):
"""Returns a list of collection name/summary tuples"""
sql = """SELECT collection.collection_id, collection.name, collection.doc,
collection.type, collection.path
FROM collection_table as collection
WHERE name like ?
AND type like ?
ORDER BY collection.name
"""
cursor = self._execute(sql, (self._glob_to_sql(pattern),
self._glob_to_sql(libtype)))
sql_result = cursor.fetchall()
return [{"collection_id": result[0],
"name": result[1],
"synopsis": result[2].split("\n")[0],
"type": result[3],
"path": result[4]
} for result in sql_result] |
Get a specific keyword from a library | def get_keyword(self, collection_id, name):
"""Get a specific keyword from a library"""
sql = """SELECT keyword.name, keyword.args, keyword.doc
FROM keyword_table as keyword
WHERE keyword.collection_id == ?
AND keyword.name like ?
"""
cursor = self._execute(sql, (collection_id,name))
# We're going to assume no library has duplicate keywords
# While that in theory _could_ happen, it never _should_,
# and you get what you deserve if it does.
row = cursor.fetchone()
if row is not None:
return {"name": row[0],
"args": json.loads(row[1]),
"doc": row[2],
"collection_id": collection_id
}
return {} |
Returns all keywords that match a glob-style pattern
The result is a list of dictionaries, sorted by collection
name.
The pattern matching is insensitive to case. The function
returns a list of (library_name, keyword_name,
keyword_synopsis tuples) sorted by keyword name | def get_keyword_hierarchy(self, pattern="*"):
"""Returns all keywords that match a glob-style pattern
The result is a list of dictionaries, sorted by collection
name.
The pattern matching is insensitive to case. The function
returns a list of (library_name, keyword_name,
keyword_synopsis tuples) sorted by keyword name
"""
sql = """SELECT collection.collection_id, collection.name, collection.path,
keyword.name, keyword.doc
FROM collection_table as collection
JOIN keyword_table as keyword
WHERE collection.collection_id == keyword.collection_id
AND keyword.name like ?
ORDER by collection.name, collection.collection_id, keyword.name
"""
cursor = self._execute(sql, (self._glob_to_sql(pattern),))
libraries = []
current_library = None
for row in cursor.fetchall():
(c_id, c_name, c_path, k_name, k_doc) = row
if c_id != current_library:
current_library = c_id
libraries.append({"name": c_name, "collection_id": c_id, "keywords": [], "path": c_path})
libraries[-1]["keywords"].append({"name": k_name, "doc": k_doc})
return libraries |
Perform a pattern-based search on keyword names and documentation
The pattern matching is insensitive to case. The function
returns a list of tuples of the form library_id, library_name,
keyword_name, keyword_synopsis, sorted by library id,
library name, and then keyword name
If a pattern begins with "name:", only the keyword names will
be searched. Otherwise, the pattern is searched for in both
the name and keyword documentation.
You can limit the search to a single library by specifying
"in:" followed by the name of the library or resource
file. For example, "screenshot in:Selenium2Library" will only
search for the word 'screenshot' in the Selenium2Library. | def search(self, pattern="*", mode="both"):
"""Perform a pattern-based search on keyword names and documentation
The pattern matching is insensitive to case. The function
returns a list of tuples of the form library_id, library_name,
keyword_name, keyword_synopsis, sorted by library id,
library name, and then keyword name
If a pattern begins with "name:", only the keyword names will
be searched. Otherwise, the pattern is searched for in both
the name and keyword documentation.
You can limit the search to a single library by specifying
"in:" followed by the name of the library or resource
file. For example, "screenshot in:Selenium2Library" will only
search for the word 'screenshot' in the Selenium2Library.
"""
pattern = self._glob_to_sql(pattern)
COND = "(keyword.name like ? OR keyword.doc like ?)"
args = [pattern, pattern]
if mode == "name":
COND = "(keyword.name like ?)"
args = [pattern,]
sql = """SELECT collection.collection_id, collection.name, keyword.name, keyword.doc
FROM collection_table as collection
JOIN keyword_table as keyword
WHERE collection.collection_id == keyword.collection_id
AND %s
ORDER by collection.collection_id, collection.name, keyword.name
""" % COND
cursor = self._execute(sql, args)
result = [(row[0], row[1], row[2], row[3].strip().split("\n")[0])
for row in cursor.fetchall()]
return list(set(result)) |
Returns all keywords that match a glob-style pattern
The pattern matching is insensitive to case. The function
returns a list of (library_name, keyword_name,
keyword_synopsis tuples) sorted by keyword name | def get_keywords(self, pattern="*"):
"""Returns all keywords that match a glob-style pattern
The pattern matching is insensitive to case. The function
returns a list of (library_name, keyword_name,
keyword_synopsis tuples) sorted by keyword name
"""
sql = """SELECT collection.collection_id, collection.name,
keyword.name, keyword.doc, keyword.args
FROM collection_table as collection
JOIN keyword_table as keyword
WHERE collection.collection_id == keyword.collection_id
AND keyword.name like ?
ORDER by collection.name, keyword.name
"""
pattern = self._glob_to_sql(pattern)
cursor = self._execute(sql, (pattern,))
result = [(row[0], row[1], row[2], row[3], row[4])
for row in cursor.fetchall()]
return list(set(result)) |
Return true if an xml file looks like a libdoc file | def _looks_like_libdoc_file(self, name):
"""Return true if an xml file looks like a libdoc file"""
# inefficient since we end up reading the file twice,
# but it's fast enough for our purposes, and prevents
# us from doing a full parse of files that are obviously
# not libdoc files
if name.lower().endswith(".xml"):
with open(name, "r") as f:
# read the first few lines; if we don't see
# what looks like libdoc data, return false
data = f.read(200)
index = data.lower().find("<keywordspec ")
if index > 0:
return True
return False |
Return true if the file has a keyword table but not a testcase table | def _looks_like_resource_file(self, name):
"""Return true if the file has a keyword table but not a testcase table"""
# inefficient since we end up reading the file twice,
# but it's fast enough for our purposes, and prevents
# us from doing a full parse of files that are obviously
# not robot files
if (re.search(r'__init__.(txt|robot|html|tsv)$', name)):
# These are initialize files, not resource files
return False
found_keyword_table = False
if (name.lower().endswith(".robot") or
name.lower().endswith(".txt") or
name.lower().endswith(".tsv")):
with open(name, "r") as f:
data = f.read()
for match in re.finditer(r'^\*+\s*(Test Cases?|(?:User )?Keywords?)',
data, re.MULTILINE|re.IGNORECASE):
if (re.match(r'Test Cases?', match.group(1), re.IGNORECASE)):
# if there's a test case table, it's not a keyword file
return False
if (not found_keyword_table and
re.match(r'(User )?Keywords?', match.group(1), re.IGNORECASE)):
found_keyword_table = True
return found_keyword_table |
Return True if a given library name should be ignored
This is necessary because not all files we find in the library
folder are libraries. I wish there was a public robot API
for "give me a list of installed libraries"... | def _should_ignore(self, name):
"""Return True if a given library name should be ignored
This is necessary because not all files we find in the library
folder are libraries. I wish there was a public robot API
for "give me a list of installed libraries"...
"""
_name = name.lower()
return (_name.startswith("deprecated") or
_name.startswith("_") or
_name in ("remote", "reserved",
"dialogs_py", "dialogs_ipy", "dialogs_jy")) |
Execute an SQL query
This exists because I think it's tedious to get a cursor and
then use a cursor. | def _execute(self, *args):
"""Execute an SQL query
This exists because I think it's tedious to get a cursor and
then use a cursor.
"""
cursor = self.db.cursor()
cursor.execute(*args)
return cursor |
Insert data into the keyword table
'args' should be a list, but since we can't store a list in an
sqlite database we'll make it json we can can convert it back
to a list later. | def _add_keyword(self, collection_id, name, doc, args):
"""Insert data into the keyword table
'args' should be a list, but since we can't store a list in an
sqlite database we'll make it json we can can convert it back
to a list later.
"""
argstring = json.dumps(args)
self.db.execute("""
INSERT INTO keyword_table
(collection_id, name, doc, args)
VALUES
(?,?,?,?)
""", (collection_id, name, doc, argstring)) |
Show a list of libraries, along with the nav panel on the left | def doc():
"""Show a list of libraries, along with the nav panel on the left"""
kwdb = current_app.kwdb
libraries = get_collections(kwdb, libtype="library")
resource_files = get_collections(kwdb, libtype="resource")
hierarchy = get_navpanel_data(kwdb)
return flask.render_template("home.html",
data={"libraries": libraries,
"version": __version__,
"libdoc": None,
"hierarchy": hierarchy,
"resource_files": resource_files
}) |
Show a list of available libraries, and resource files | def index():
"""Show a list of available libraries, and resource files"""
kwdb = current_app.kwdb
libraries = get_collections(kwdb, libtype="library")
resource_files = get_collections(kwdb, libtype="resource")
return flask.render_template("libraryNames.html",
data={"libraries": libraries,
"version": __version__,
"resource_files": resource_files
}) |
Show all keywords that match a pattern | def search():
"""Show all keywords that match a pattern"""
pattern = flask.request.args.get('pattern', "*").strip().lower()
# if the pattern contains "in:<collection>" (eg: in:builtin),
# filter results to only that (or those) collections
# This was kind-of hacked together, but seems to work well enough
collections = [c["name"].lower() for c in current_app.kwdb.get_collections()]
words = []
filters = []
if pattern.startswith("name:"):
pattern = pattern[5:].strip()
mode = "name"
else:
mode="both"
for word in pattern.split(" "):
if word.lower().startswith("in:"):
filters.extend([name for name in collections if name.startswith(word[3:])])
else:
words.append(word)
pattern = " ".join(words)
keywords = []
for keyword in current_app.kwdb.search(pattern, mode):
kw = list(keyword)
collection_id = kw[0]
collection_name = kw[1].lower()
if len(filters) == 0 or collection_name in filters:
url = flask.url_for(".doc_for_library", collection_id=kw[0], keyword=kw[2])
row_id = "row-%s.%s" % (keyword[1].lower(), keyword[2].lower().replace(" ","-"))
keywords.append({"collection_id": keyword[0],
"collection_name": keyword[1],
"name": keyword[2],
"synopsis": keyword[3],
"version": __version__,
"url": url,
"row_id": row_id
})
keywords.sort(key=lambda kw: kw["name"])
return flask.render_template("search.html",
data={"keywords": keywords,
"version": __version__,
"pattern": pattern
}) |
Get list of collections from kwdb, then add urls necessary for hyperlinks | def get_collections(kwdb, libtype="*"):
"""Get list of collections from kwdb, then add urls necessary for hyperlinks"""
collections = kwdb.get_collections(libtype=libtype)
for result in collections:
url = flask.url_for(".doc_for_library", collection_id=result["collection_id"])
result["url"] = url
return collections |
Get navpanel data from kwdb, and add urls necessary for hyperlinks | def get_navpanel_data(kwdb):
"""Get navpanel data from kwdb, and add urls necessary for hyperlinks"""
data = kwdb.get_keyword_hierarchy()
for library in data:
library["url"] = flask.url_for(".doc_for_library", collection_id=library["collection_id"])
for keyword in library["keywords"]:
url = flask.url_for(".doc_for_library",
collection_id=library["collection_id"],
keyword=keyword["name"])
keyword["url"] = url
return data |
Convert documentation to HTML | def doc_to_html(doc, doc_format="ROBOT"):
"""Convert documentation to HTML"""
from robot.libdocpkg.htmlwriter import DocToHtml
return DocToHtml(doc_format)(doc) |
Start the app | def start(self):
"""Start the app"""
if self.args.debug:
self.app.run(port=self.args.port, debug=self.args.debug, host=self.args.interface)
else:
root = "http://%s:%s" % (self.args.interface, self.args.port)
print("tornado web server running on " + root)
self.shutdown_requested = False
http_server = HTTPServer(WSGIContainer(self.app))
http_server.listen(port=self.args.port, address=self.args.interface)
signal.signal(signal.SIGINT, self.signal_handler)
tornado.ioloop.PeriodicCallback(self.check_shutdown_flag, 500).start()
tornado.ioloop.IOLoop.instance().start() |
Shutdown the server if the flag has been set | def check_shutdown_flag(self):
"""Shutdown the server if the flag has been set"""
if self.shutdown_requested:
tornado.ioloop.IOLoop.instance().stop()
print("web server stopped.") |
Yields the coordinates from a Feature or Geometry.
:param obj: A geometry or feature to extract the coordinates from.
:type obj: Feature, Geometry
:return: A generator with coordinate tuples from the geometry or feature.
:rtype: generator | def coords(obj):
"""
Yields the coordinates from a Feature or Geometry.
:param obj: A geometry or feature to extract the coordinates from.
:type obj: Feature, Geometry
:return: A generator with coordinate tuples from the geometry or feature.
:rtype: generator
"""
# Handle recursive case first
if 'features' in obj:
for f in obj['features']:
# For Python 2 compatibility
# See https://www.reddit.com/r/learnpython/comments/4rc15s/yield_from_and_python_27/ # noqa: E501
for c in coords(f):
yield c
else:
if isinstance(obj, (tuple, list)):
coordinates = obj
elif 'geometry' in obj:
coordinates = obj['geometry']['coordinates']
else:
coordinates = obj.get('coordinates', obj)
for e in coordinates:
if isinstance(e, (float, int)):
yield tuple(coordinates)
break
for f in coords(e):
yield f |
Returns the mapped coordinates from a Geometry after applying the provided
function to each dimension in tuples list (ie, linear scaling).
:param func: Function to apply to individual coordinate values
independently
:type func: function
:param obj: A geometry or feature to extract the coordinates from.
:type obj: Point, LineString, MultiPoint, MultiLineString, Polygon,
MultiPolygon
:return: The result of applying the function to each dimension in the
array.
:rtype: list
:raises ValueError: if the provided object is not GeoJSON. | def map_coords(func, obj):
"""
Returns the mapped coordinates from a Geometry after applying the provided
function to each dimension in tuples list (ie, linear scaling).
:param func: Function to apply to individual coordinate values
independently
:type func: function
:param obj: A geometry or feature to extract the coordinates from.
:type obj: Point, LineString, MultiPoint, MultiLineString, Polygon,
MultiPolygon
:return: The result of applying the function to each dimension in the
array.
:rtype: list
:raises ValueError: if the provided object is not GeoJSON.
"""
def tuple_func(coord):
return (func(coord[0]), func(coord[1]))
return map_tuples(tuple_func, obj) |
Returns the mapped coordinates from a Geometry after applying the provided
function to each coordinate.
:param func: Function to apply to tuples
:type func: function
:param obj: A geometry or feature to extract the coordinates from.
:type obj: Point, LineString, MultiPoint, MultiLineString, Polygon,
MultiPolygon
:return: The result of applying the function to each dimension in the
array.
:rtype: list
:raises ValueError: if the provided object is not GeoJSON. | def map_tuples(func, obj):
"""
Returns the mapped coordinates from a Geometry after applying the provided
function to each coordinate.
:param func: Function to apply to tuples
:type func: function
:param obj: A geometry or feature to extract the coordinates from.
:type obj: Point, LineString, MultiPoint, MultiLineString, Polygon,
MultiPolygon
:return: The result of applying the function to each dimension in the
array.
:rtype: list
:raises ValueError: if the provided object is not GeoJSON.
"""
if obj['type'] == 'Point':
coordinates = tuple(func(obj['coordinates']))
elif obj['type'] in ['LineString', 'MultiPoint']:
coordinates = [tuple(func(c)) for c in obj['coordinates']]
elif obj['type'] in ['MultiLineString', 'Polygon']:
coordinates = [[
tuple(func(c)) for c in curve]
for curve in obj['coordinates']]
elif obj['type'] == 'MultiPolygon':
coordinates = [[[
tuple(func(c)) for c in curve]
for curve in part]
for part in obj['coordinates']]
elif obj['type'] in ['Feature', 'FeatureCollection', 'GeometryCollection']:
return map_geometries(lambda g: map_tuples(func, g), obj)
else:
raise ValueError("Invalid geometry object %s" % repr(obj))
return {'type': obj['type'], 'coordinates': coordinates} |
Returns the result of passing every geometry in the given geojson object
through func.
:param func: Function to apply to tuples
:type func: function
:param obj: A geometry or feature to extract the coordinates from.
:type obj: GeoJSON
:return: The result of applying the function to each geometry
:rtype: list
:raises ValueError: if the provided object is not geojson. | def map_geometries(func, obj):
"""
Returns the result of passing every geometry in the given geojson object
through func.
:param func: Function to apply to tuples
:type func: function
:param obj: A geometry or feature to extract the coordinates from.
:type obj: GeoJSON
:return: The result of applying the function to each geometry
:rtype: list
:raises ValueError: if the provided object is not geojson.
"""
simple_types = [
'Point',
'LineString',
'MultiPoint',
'MultiLineString',
'Polygon',
'MultiPolygon',
]
if obj['type'] in simple_types:
return func(obj)
elif obj['type'] == 'GeometryCollection':
geoms = [func(geom) if geom else None for geom in obj['geometries']]
return {'type': obj['type'], 'geometries': geoms}
elif obj['type'] == 'Feature':
geom = func(obj['geometry']) if obj['geometry'] else None
return {
'type': obj['type'],
'geometry': geom,
'properties': obj['properties'],
}
elif obj['type'] == 'FeatureCollection':
feats = [map_geometries(func, feat) for feat in obj['features']]
return {'type': obj['type'], 'features': feats}
else:
raise ValueError("Invalid GeoJSON object %s" % repr(obj)) |
Generates random geojson features depending on the parameters
passed through.
The bounding box defaults to the world - [-180.0, -90.0, 180.0, 90.0].
The number of vertices defaults to 3.
:param featureType: A geometry type
:type featureType: Point, LineString, Polygon
:param numberVertices: The number vertices that a linestring or polygon
will have
:type numberVertices: int
:param boundingBox: A bounding box in which features will be restricted to
:type boundingBox: list
:return: The resulting random geojson object or geometry collection.
:rtype: object
:raises ValueError: if there is no featureType provided. | def generate_random(featureType, numberVertices=3,
boundingBox=[-180.0, -90.0, 180.0, 90.0]):
"""
Generates random geojson features depending on the parameters
passed through.
The bounding box defaults to the world - [-180.0, -90.0, 180.0, 90.0].
The number of vertices defaults to 3.
:param featureType: A geometry type
:type featureType: Point, LineString, Polygon
:param numberVertices: The number vertices that a linestring or polygon
will have
:type numberVertices: int
:param boundingBox: A bounding box in which features will be restricted to
:type boundingBox: list
:return: The resulting random geojson object or geometry collection.
:rtype: object
:raises ValueError: if there is no featureType provided.
"""
from geojson import Point, LineString, Polygon
import random
import math
lonMin = boundingBox[0]
lonMax = boundingBox[2]
def randomLon():
return random.uniform(lonMin, lonMax)
latMin = boundingBox[1]
latMax = boundingBox[3]
def randomLat():
return random.uniform(latMin, latMax)
def createPoint():
return Point((randomLon(), randomLat()))
def createLine():
return LineString([createPoint() for unused in range(numberVertices)])
def createPoly():
aveRadius = 60
ctrX = 0.1
ctrY = 0.2
irregularity = clip(0.1, 0, 1) * 2 * math.pi / numberVertices
spikeyness = clip(0.5, 0, 1) * aveRadius
angleSteps = []
lower = (2 * math.pi / numberVertices) - irregularity
upper = (2 * math.pi / numberVertices) + irregularity
sum = 0
for i in range(numberVertices):
tmp = random.uniform(lower, upper)
angleSteps.append(tmp)
sum = sum + tmp
k = sum / (2 * math.pi)
for i in range(numberVertices):
angleSteps[i] = angleSteps[i] / k
points = []
angle = random.uniform(0, 2 * math.pi)
for i in range(numberVertices):
r_i = clip(random.gauss(aveRadius, spikeyness), 0, 2 * aveRadius)
x = ctrX + r_i * math.cos(angle)
y = ctrY + r_i * math.sin(angle)
points.append((int(x), int(y)))
angle = angle + angleSteps[i]
firstVal = points[0]
points.append(firstVal)
return Polygon([points])
def clip(x, min, max):
if(min > max):
return x
elif(x < min):
return min
elif(x > max):
return max
else:
return x
if featureType == 'Point':
return createPoint()
if featureType == 'LineString':
return createLine()
if featureType == 'Polygon':
return createPoly() |
Create an instance of SimpleWebFeature from a dict, o. If o does not
match a Python feature object, simply return o. This function serves as a
json decoder hook. See coding.load().
:param o: A dict to create the SimpleWebFeature from.
:type o: dict
:return: A SimpleWebFeature from the dict provided.
:rtype: SimpleWebFeature | def create_simple_web_feature(o):
"""
Create an instance of SimpleWebFeature from a dict, o. If o does not
match a Python feature object, simply return o. This function serves as a
json decoder hook. See coding.load().
:param o: A dict to create the SimpleWebFeature from.
:type o: dict
:return: A SimpleWebFeature from the dict provided.
:rtype: SimpleWebFeature
"""
try:
id = o['id']
g = o['geometry']
p = o['properties']
return SimpleWebFeature(str(id), {
'type': str(g.get('type')),
'coordinates': g.get('coordinates', [])},
title=p.get('title'),
summary=p.get('summary'),
link=str(p.get('link')))
except (KeyError, TypeError):
pass
return o |
Encode a GeoJSON dict into an GeoJSON object.
Assumes the caller knows that the dict should satisfy a GeoJSON type.
:param cls: Dict containing the elements to be encoded into a GeoJSON
object.
:type cls: dict
:param ob: GeoJSON object into which to encode the dict provided in
`cls`.
:type ob: GeoJSON
:param default: A default instance to append the content of the dict
to if none is provided.
:type default: GeoJSON
:param strict: Raise error if unable to coerce particular keys or
attributes to a valid GeoJSON structure.
:type strict: bool
:return: A GeoJSON object with the dict's elements as its constituents.
:rtype: GeoJSON
:raises TypeError: If the input dict contains items that are not valid
GeoJSON types.
:raises UnicodeEncodeError: If the input dict contains items of a type
that contain non-ASCII characters.
:raises AttributeError: If the input dict contains items that are not
valid GeoJSON types. | def to_instance(cls, ob, default=None, strict=False):
"""Encode a GeoJSON dict into an GeoJSON object.
Assumes the caller knows that the dict should satisfy a GeoJSON type.
:param cls: Dict containing the elements to be encoded into a GeoJSON
object.
:type cls: dict
:param ob: GeoJSON object into which to encode the dict provided in
`cls`.
:type ob: GeoJSON
:param default: A default instance to append the content of the dict
to if none is provided.
:type default: GeoJSON
:param strict: Raise error if unable to coerce particular keys or
attributes to a valid GeoJSON structure.
:type strict: bool
:return: A GeoJSON object with the dict's elements as its constituents.
:rtype: GeoJSON
:raises TypeError: If the input dict contains items that are not valid
GeoJSON types.
:raises UnicodeEncodeError: If the input dict contains items of a type
that contain non-ASCII characters.
:raises AttributeError: If the input dict contains items that are not
valid GeoJSON types.
"""
if ob is None and default is not None:
instance = default()
elif isinstance(ob, GeoJSON):
instance = ob
else:
mapping = to_mapping(ob)
d = {}
for k in mapping:
d[k] = mapping[k]
try:
type_ = d.pop("type")
try:
type_ = str(type_)
except UnicodeEncodeError:
# If the type contains non-ascii characters, we can assume
# it's not a valid GeoJSON type
raise AttributeError(
"{0} is not a GeoJSON type").format(type_)
geojson_factory = getattr(geojson.factory, type_)
instance = geojson_factory(**d)
except (AttributeError, KeyError) as invalid:
if strict:
msg = "Cannot coerce %r into a valid GeoJSON structure: %s"
msg %= (ob, invalid)
raise ValueError(msg)
instance = ob
return instance |
Validation helper function. | def check_list_errors(self, checkFunc, lst):
"""Validation helper function."""
# check for errors on each subitem, filter only subitems with errors
results = (checkFunc(i) for i in lst)
return [err for err in results if err] |
Escape all special characters. | def _glob_escape(pathname):
"""
Escape all special characters.
"""
drive, pathname = os.path.splitdrive(pathname)
pathname = _magic_check.sub(r'[\1]', pathname)
return drive + pathname |
Runs a keyword only once in one of the parallel processes.
As the keyword will be called
only in one process and the return value could basically be anything.
The "Run Only Once" can't return the actual return value.
If the keyword fails, "Run Only Once" fails.
Others executing "Run Only Once" wait before going through this
keyword before the actual command has been executed.
NOTE! This is a potential "Shoot yourself in to knee" keyword
Especially note that all the namespace changes are only visible
in the process that actually executed the keyword.
Also note that this might lead to odd situations if used inside
of other keywords.
Also at this point the keyword will be identified to be same
if it has the same name. | def run_only_once(self, keyword):
"""
Runs a keyword only once in one of the parallel processes.
As the keyword will be called
only in one process and the return value could basically be anything.
The "Run Only Once" can't return the actual return value.
If the keyword fails, "Run Only Once" fails.
Others executing "Run Only Once" wait before going through this
keyword before the actual command has been executed.
NOTE! This is a potential "Shoot yourself in to knee" keyword
Especially note that all the namespace changes are only visible
in the process that actually executed the keyword.
Also note that this might lead to odd situations if used inside
of other keywords.
Also at this point the keyword will be identified to be same
if it has the same name.
"""
lock_name = 'pabot_run_only_once_%s' % keyword
try:
self.acquire_lock(lock_name)
passed = self.get_parallel_value_for_key(lock_name)
if passed != '':
if passed == 'FAILED':
raise AssertionError('Keyword failed in other process')
return
BuiltIn().run_keyword(keyword)
self.set_parallel_value_for_key(lock_name, 'PASSED')
except:
self.set_parallel_value_for_key(lock_name, 'FAILED')
raise
finally:
self.release_lock(lock_name) |
Set a globally available key and value that can be accessed
from all the pabot processes. | def set_parallel_value_for_key(self, key, value):
"""
Set a globally available key and value that can be accessed
from all the pabot processes.
"""
if self._remotelib:
self._remotelib.run_keyword('set_parallel_value_for_key',
[key, value], {})
else:
_PabotLib.set_parallel_value_for_key(self, key, value) |
Get the value for a key. If there is no value for the key then empty
string is returned. | def get_parallel_value_for_key(self, key):
"""
Get the value for a key. If there is no value for the key then empty
string is returned.
"""
if self._remotelib:
return self._remotelib.run_keyword('get_parallel_value_for_key',
[key], {})
return _PabotLib.get_parallel_value_for_key(self, key) |
Wait for a lock with name.
This will prevent other processes from acquiring the lock with
the name while it is held. Thus they will wait in the position
where they are acquiring the lock until the process that has it
releases it. | def acquire_lock(self, name):
"""
Wait for a lock with name.
This will prevent other processes from acquiring the lock with
the name while it is held. Thus they will wait in the position
where they are acquiring the lock until the process that has it
releases it.
"""
if self._remotelib:
try:
while not self._remotelib.run_keyword('acquire_lock',
[name, self._my_id], {}):
time.sleep(0.1)
logger.debug('waiting for lock to release')
return True
except RuntimeError:
logger.warn('no connection')
self.__remotelib = None
return _PabotLib.acquire_lock(self, name, self._my_id) |
Release a lock with name.
This will enable others to acquire the lock. | def release_lock(self, name):
"""
Release a lock with name.
This will enable others to acquire the lock.
"""
if self._remotelib:
self._remotelib.run_keyword('release_lock',
[name, self._my_id], {})
else:
_PabotLib.release_lock(self, name, self._my_id) |
Release all locks called by instance. | def release_locks(self):
"""
Release all locks called by instance.
"""
if self._remotelib:
self._remotelib.run_keyword('release_locks',
[self._my_id], {})
else:
_PabotLib.release_locks(self, self._my_id) |
Subsets and Splits