code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def test_hash(self): """Test it is hashable""" r = Rectangle(1, 1, 1, 1) d = {r: 43} self.assertEqual(d[r], 43)
Test it is hashable
test_hash
python
secnot/rectpack
tests/test_geometry.py
https://github.com/secnot/rectpack/blob/master/tests/test_geometry.py
Apache-2.0
def random_rectangle_generator(num, max_side=30, min_side=8): """ Generate a random rectangle list with dimensions within specified parameters. Arguments: max_dim (number): Max rectangle side length min_side (number): Min rectangle side length max_ratio (number): Returns: Rectangle list """ return (random_rectangle(max_side, min_side) for i in range(0, num))
Generate a random rectangle list with dimensions within specified parameters. Arguments: max_dim (number): Max rectangle side length min_side (number): Min rectangle side length max_ratio (number): Returns: Rectangle list
random_rectangle_generator
python
secnot/rectpack
tests/test_decimal.py
https://github.com/secnot/rectpack/blob/master/tests/test_decimal.py
Apache-2.0
def test_rounding(self): """Test rounding is allways up""" d = float2dec(3.141511, 3) self.assertEqual(decimal.Decimal('3.142'), d) d = float2dec(3.444444, 3) self.assertEqual(decimal.Decimal('3.445'), d) d = float2dec(3.243234, 0) self.assertEqual(decimal.Decimal('4'), d) d = float2dec(7.234234, 0) self.assertEqual(decimal.Decimal('8'), d)
Test rounding is allways up
test_rounding
python
secnot/rectpack
tests/test_decimal.py
https://github.com/secnot/rectpack/blob/master/tests/test_decimal.py
Apache-2.0
def test_decimal_places(self): """Test rounded to correct decimal place""" d = float2dec(4.2, 3) self.assertEqual(decimal.Decimal('4.201'), d) d = float2dec(5.7, 3) self.assertEqual(decimal.Decimal('5.701'), d) d = float2dec(2.2, 4) self.assertEqual(decimal.Decimal('2.2001'), d)
Test rounded to correct decimal place
test_decimal_places
python
secnot/rectpack
tests/test_decimal.py
https://github.com/secnot/rectpack/blob/master/tests/test_decimal.py
Apache-2.0
def test_integer(self): """Test integers are also converted, but not rounded""" d = float2dec(7, 3) self.assertEqual(decimal.Decimal('7.000'), d) d = float2dec(2, 3) self.assertEqual(decimal.Decimal('2.000'), d)
Test integers are also converted, but not rounded
test_integer
python
secnot/rectpack
tests/test_decimal.py
https://github.com/secnot/rectpack/blob/master/tests/test_decimal.py
Apache-2.0
def test_not_rounded(self): """Test floats are only rounded when needed""" d = float2dec(3.0, 3) self.assertEqual(decimal.Decimal('3.000'), d)
Test floats are only rounded when needed
test_not_rounded
python
secnot/rectpack
tests/test_decimal.py
https://github.com/secnot/rectpack/blob/master/tests/test_decimal.py
Apache-2.0
def _rect_fitness(self, max_rect, width, height): """ Arguments: max_rect (Rectangle): Destination max_rect width (int, float): Rectangle width height (int, float): Rectangle height Returns: None: Rectangle couldn't be placed into max_rect integer, float: fitness value """ if width <= max_rect.width and height <= max_rect.height: return 0 else: return None
Arguments: max_rect (Rectangle): Destination max_rect width (int, float): Rectangle width height (int, float): Rectangle height Returns: None: Rectangle couldn't be placed into max_rect integer, float: fitness value
_rect_fitness
python
secnot/rectpack
rectpack/maxrects.py
https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
Apache-2.0
def _select_position(self, w, h): """ Find max_rect with best fitness for placing a rectangle of dimentsions w*h Arguments: w (int, float): Rectangle width h (int, float): Rectangle height Returns: (rect, max_rect) rect (Rectangle): Placed rectangle or None if was unable. max_rect (Rectangle): Maximal rectangle were rect was placed """ if not self._max_rects: return None, None # Normal rectangle fitn = ((self._rect_fitness(m, w, h), w, h, m) for m in self._max_rects if self._rect_fitness(m, w, h) is not None) # Rotated rectangle fitr = ((self._rect_fitness(m, h, w), h, w, m) for m in self._max_rects if self._rect_fitness(m, h, w) is not None) if not self.rot: fitr = [] fit = itertools.chain(fitn, fitr) try: _, w, h, m = min(fit, key=first_item) except ValueError: return None, None return Rectangle(m.x, m.y, w, h), m
Find max_rect with best fitness for placing a rectangle of dimentsions w*h Arguments: w (int, float): Rectangle width h (int, float): Rectangle height Returns: (rect, max_rect) rect (Rectangle): Placed rectangle or None if was unable. max_rect (Rectangle): Maximal rectangle were rect was placed
_select_position
python
secnot/rectpack
rectpack/maxrects.py
https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
Apache-2.0
def _generate_splits(self, m, r): """ When a rectangle is placed inside a maximal rectangle, it stops being one and up to 4 new maximal rectangles may appear depending on the placement. _generate_splits calculates them. Arguments: m (Rectangle): max_rect rectangle r (Rectangle): rectangle placed Returns: list : list containing new maximal rectangles or an empty list """ new_rects = [] if r.left > m.left: new_rects.append(Rectangle(m.left, m.bottom, r.left-m.left, m.height)) if r.right < m.right: new_rects.append(Rectangle(r.right, m.bottom, m.right-r.right, m.height)) if r.top < m.top: new_rects.append(Rectangle(m.left, r.top, m.width, m.top-r.top)) if r.bottom > m.bottom: new_rects.append(Rectangle(m.left, m.bottom, m.width, r.bottom-m.bottom)) return new_rects
When a rectangle is placed inside a maximal rectangle, it stops being one and up to 4 new maximal rectangles may appear depending on the placement. _generate_splits calculates them. Arguments: m (Rectangle): max_rect rectangle r (Rectangle): rectangle placed Returns: list : list containing new maximal rectangles or an empty list
_generate_splits
python
secnot/rectpack
rectpack/maxrects.py
https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
Apache-2.0
def _split(self, rect): """ Split all max_rects intersecting the rectangle rect into up to 4 new max_rects. Arguments: rect (Rectangle): Rectangle Returns: split (Rectangle list): List of rectangles resulting from the split """ max_rects = collections.deque() for r in self._max_rects: if r.intersects(rect): max_rects.extend(self._generate_splits(r, rect)) else: max_rects.append(r) # Add newly generated max_rects self._max_rects = list(max_rects)
Split all max_rects intersecting the rectangle rect into up to 4 new max_rects. Arguments: rect (Rectangle): Rectangle Returns: split (Rectangle list): List of rectangles resulting from the split
_split
python
secnot/rectpack
rectpack/maxrects.py
https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
Apache-2.0
def _remove_duplicates(self): """ Remove every maximal rectangle contained by another one. """ contained = set() for m1, m2 in itertools.combinations(self._max_rects, 2): if m1.contains(m2): contained.add(m2) elif m2.contains(m1): contained.add(m1) # Remove from max_rects self._max_rects = [m for m in self._max_rects if m not in contained]
Remove every maximal rectangle contained by another one.
_remove_duplicates
python
secnot/rectpack
rectpack/maxrects.py
https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
Apache-2.0
def fitness(self, width, height): """ Metric used to rate how much space is wasted if a rectangle is placed. Returns a value greater or equal to zero, the smaller the value the more 'fit' is the rectangle. If the rectangle can't be placed, returns None. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height Returns: int, float: Rectangle fitness None: Rectangle can't be placed """ assert(width > 0 and height > 0) rect, max_rect = self._select_position(width, height) if rect is None: return None # Return fitness return self._rect_fitness(max_rect, rect.width, rect.height)
Metric used to rate how much space is wasted if a rectangle is placed. Returns a value greater or equal to zero, the smaller the value the more 'fit' is the rectangle. If the rectangle can't be placed, returns None. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height Returns: int, float: Rectangle fitness None: Rectangle can't be placed
fitness
python
secnot/rectpack
rectpack/maxrects.py
https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
Apache-2.0
def add_rect(self, width, height, rid=None): """ Add rectangle of widthxheight dimensions. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rid: Optional rectangle user id Returns: Rectangle: Rectangle with placemente coordinates None: If the rectangle couldn be placed. """ assert(width > 0 and height >0) # Search best position and orientation rect, _ = self._select_position(width, height) if not rect: return None # Subdivide all the max rectangles intersecting with the selected # rectangle. self._split(rect) # Remove any max_rect contained by another self._remove_duplicates() # Store and return rectangle position. rect.rid = rid self.rectangles.append(rect) return rect
Add rectangle of widthxheight dimensions. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rid: Optional rectangle user id Returns: Rectangle: Rectangle with placemente coordinates None: If the rectangle couldn be placed.
add_rect
python
secnot/rectpack
rectpack/maxrects.py
https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
Apache-2.0
def _select_position(self, w, h): """ Select the position where the y coordinate of the top of the rectangle is lower, if there are severtal pick the one with the smallest x coordinate """ fitn = ((m.y+h, m.x, w, h, m) for m in self._max_rects if self._rect_fitness(m, w, h) is not None) fitr = ((m.y+w, m.x, h, w, m) for m in self._max_rects if self._rect_fitness(m, h, w) is not None) if not self.rot: fitr = [] fit = itertools.chain(fitn, fitr) try: _, _, w, h, m = min(fit, key=first_item) except ValueError: return None, None return Rectangle(m.x, m.y, w, h), m
Select the position where the y coordinate of the top of the rectangle is lower, if there are severtal pick the one with the smallest x coordinate
_select_position
python
secnot/rectpack
rectpack/maxrects.py
https://github.com/secnot/rectpack/blob/master/rectpack/maxrects.py
Apache-2.0
def __init__(self, width, height, rot=True, bid=None, *args, **kwargs): """ Initialize packing algorithm Arguments: width (int, float): Packing surface width height (int, float): Packing surface height rot (bool): Rectangle rotation enabled or disabled bid (string|int|...): Packing surface identification """ self.width = width self.height = height self.rot = rot self.rectangles = [] self.bid = bid self._surface = Rectangle(0, 0, width, height) self.reset()
Initialize packing algorithm Arguments: width (int, float): Packing surface width height (int, float): Packing surface height rot (bool): Rectangle rotation enabled or disabled bid (string|int|...): Packing surface identification
__init__
python
secnot/rectpack
rectpack/pack_algo.py
https://github.com/secnot/rectpack/blob/master/rectpack/pack_algo.py
Apache-2.0
def _fits_surface(self, width, height): """ Test surface is big enough to place a rectangle Arguments: width (int, float): Rectangle width height (int, float): Rectangle height Returns: boolean: True if it could be placed, False otherwise """ assert(width > 0 and height > 0) if self.rot and (width > self.width or height > self.height): width, height = height, width if width > self.width or height > self.height: return False else: return True
Test surface is big enough to place a rectangle Arguments: width (int, float): Rectangle width height (int, float): Rectangle height Returns: boolean: True if it could be placed, False otherwise
_fits_surface
python
secnot/rectpack
rectpack/pack_algo.py
https://github.com/secnot/rectpack/blob/master/rectpack/pack_algo.py
Apache-2.0
def __getitem__(self, key): """ Return rectangle in selected position. """ return self.rectangles[key]
Return rectangle in selected position.
__getitem__
python
secnot/rectpack
rectpack/pack_algo.py
https://github.com/secnot/rectpack/blob/master/rectpack/pack_algo.py
Apache-2.0
def used_area(self): """ Total area of rectangles placed Returns: int, float: Area """ return sum(r.area() for r in self)
Total area of rectangles placed Returns: int, float: Area
used_area
python
secnot/rectpack
rectpack/pack_algo.py
https://github.com/secnot/rectpack/blob/master/rectpack/pack_algo.py
Apache-2.0
def fitness(self, width, height, rot = False): """ Metric used to rate how much space is wasted if a rectangle is placed. Returns a value greater or equal to zero, the smaller the value the more 'fit' is the rectangle. If the rectangle can't be placed, returns None. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rot (bool): Enable rectangle rotation Returns: int, float: Rectangle fitness None: Rectangle can't be placed """ raise NotImplementedError
Metric used to rate how much space is wasted if a rectangle is placed. Returns a value greater or equal to zero, the smaller the value the more 'fit' is the rectangle. If the rectangle can't be placed, returns None. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rot (bool): Enable rectangle rotation Returns: int, float: Rectangle fitness None: Rectangle can't be placed
fitness
python
secnot/rectpack
rectpack/pack_algo.py
https://github.com/secnot/rectpack/blob/master/rectpack/pack_algo.py
Apache-2.0
def add_rect(self, width, height, rid=None): """ Add rectangle of widthxheight dimensions. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rid: Optional rectangle user id Returns: Rectangle: Rectangle with placemente coordinates None: If the rectangle couldn be placed. """ raise NotImplementedError
Add rectangle of widthxheight dimensions. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rid: Optional rectangle user id Returns: Rectangle: Rectangle with placemente coordinates None: If the rectangle couldn be placed.
add_rect
python
secnot/rectpack
rectpack/pack_algo.py
https://github.com/secnot/rectpack/blob/master/rectpack/pack_algo.py
Apache-2.0
def rect_list(self): """ Returns a list with all rectangles placed into the surface. Returns: List: Format [(x, y, width, height, rid), ...] """ rectangle_list = [] for r in self: rectangle_list.append((r.x, r.y, r.width, r.height, r.rid)) return rectangle_list
Returns a list with all rectangles placed into the surface. Returns: List: Format [(x, y, width, height, rid), ...]
rect_list
python
secnot/rectpack
rectpack/pack_algo.py
https://github.com/secnot/rectpack/blob/master/rectpack/pack_algo.py
Apache-2.0
def validate_packing(self): """ Check for collisions between rectangles, also check all are placed inside surface. """ surface = Rectangle(0, 0, self.width, self.height) for r in self: if not surface.contains(r): raise Exception("Rectangle placed outside surface") rectangles = [r for r in self] if len(rectangles) <= 1: return for r1 in range(0, len(rectangles)-2): for r2 in range(r1+1, len(rectangles)-1): if rectangles[r1].intersects(rectangles[r2]): raise Exception("Rectangle collision detected")
Check for collisions between rectangles, also check all are placed inside surface.
validate_packing
python
secnot/rectpack
rectpack/pack_algo.py
https://github.com/secnot/rectpack/blob/master/rectpack/pack_algo.py
Apache-2.0
def add_waste(self, x, y, width, height): """Add new waste section""" self._add_section(Rectangle(x, y, width, height))
Add new waste section
add_waste
python
secnot/rectpack
rectpack/waste.py
https://github.com/secnot/rectpack/blob/master/rectpack/waste.py
Apache-2.0
def __init__(self, width, height, rot=True, merge=True, *args, **kwargs): """ Arguments: width (int, float): height (int, float): merge (bool): Optional keyword argument """ self._merge = merge super(Guillotine, self).__init__(width, height, rot, *args, **kwargs)
Arguments: width (int, float): height (int, float): merge (bool): Optional keyword argument
__init__
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def _add_section(self, section): """Adds a new section to the free section list, but before that and if section merge is enabled, tries to join the rectangle with all existing sections, if successful the resulting section is again merged with the remaining sections until the operation fails. The result is then appended to the list. Arguments: section (Rectangle): New free section. """ section.rid = 0 plen = 0 while self._merge and self._sections and plen != len(self._sections): plen = len(self._sections) self._sections = [s for s in self._sections if not section.join(s)] self._sections.append(section)
Adds a new section to the free section list, but before that and if section merge is enabled, tries to join the rectangle with all existing sections, if successful the resulting section is again merged with the remaining sections until the operation fails. The result is then appended to the list. Arguments: section (Rectangle): New free section.
_add_section
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def _split_horizontal(self, section, width, height): """For an horizontal split the rectangle is placed in the lower left corner of the section (section's xy coordinates), the top most side of the rectangle and its horizontal continuation, marks the line of division for the split. +-----------------+ | | | | | | | | +-------+---------+ |#######| | |#######| | |#######| | +-------+---------+ If the rectangle width is equal to the the section width, only one section is created over the rectangle. If the rectangle height is equal to the section height, only one section to the right of the rectangle is created. If both width and height are equal, no sections are created. """ # First remove the section we are splitting so it doesn't # interfere when later we try to merge the resulting split # rectangles, with the rest of free sections. #self._sections.remove(section) # Creates two new empty sections, and returns the new rectangle. if height < section.height: self._add_section(Rectangle(section.x, section.y+height, section.width, section.height-height)) if width < section.width: self._add_section(Rectangle(section.x+width, section.y, section.width-width, height))
For an horizontal split the rectangle is placed in the lower left corner of the section (section's xy coordinates), the top most side of the rectangle and its horizontal continuation, marks the line of division for the split. +-----------------+ | | | | | | | | +-------+---------+ |#######| | |#######| | |#######| | +-------+---------+ If the rectangle width is equal to the the section width, only one section is created over the rectangle. If the rectangle height is equal to the section height, only one section to the right of the rectangle is created. If both width and height are equal, no sections are created.
_split_horizontal
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def _split_vertical(self, section, width, height): """For a vertical split the rectangle is placed in the lower left corner of the section (section's xy coordinates), the right most side of the rectangle and its vertical continuation, marks the line of division for the split. +-------+---------+ | | | | | | | | | | | | +-------+ | |#######| | |#######| | |#######| | +-------+---------+ If the rectangle width is equal to the the section width, only one section is created over the rectangle. If the rectangle height is equal to the section height, only one section to the right of the rectangle is created. If both width and height are equal, no sections are created. """ # When a section is split, depending on the rectangle size # two, one, or no new sections will be created. if height < section.height: self._add_section(Rectangle(section.x, section.y+height, width, section.height-height)) if width < section.width: self._add_section(Rectangle(section.x+width, section.y, section.width-width, section.height))
For a vertical split the rectangle is placed in the lower left corner of the section (section's xy coordinates), the right most side of the rectangle and its vertical continuation, marks the line of division for the split. +-------+---------+ | | | | | | | | | | | | +-------+ | |#######| | |#######| | |#######| | +-------+---------+ If the rectangle width is equal to the the section width, only one section is created over the rectangle. If the rectangle height is equal to the section height, only one section to the right of the rectangle is created. If both width and height are equal, no sections are created.
_split_vertical
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def _split(self, section, width, height): """ Selects the best split for a section, given a rectangle of dimmensions width and height, then calls _split_vertical or _split_horizontal, to do the dirty work. Arguments: section (Rectangle): Section to split width (int, float): Rectangle width height (int, float): Rectangle height """ raise NotImplementedError
Selects the best split for a section, given a rectangle of dimmensions width and height, then calls _split_vertical or _split_horizontal, to do the dirty work. Arguments: section (Rectangle): Section to split width (int, float): Rectangle width height (int, float): Rectangle height
_split
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def _section_fitness(self, section, width, height): """The subclass for each one of the Guillotine selection methods, BAF, BLSF.... will override this method, this is here only to asure a valid value return if the worst happens. """ raise NotImplementedError
The subclass for each one of the Guillotine selection methods, BAF, BLSF.... will override this method, this is here only to asure a valid value return if the worst happens.
_section_fitness
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def _select_fittest_section(self, w, h): """Calls _section_fitness for each of the sections in free section list. Returns the section with the minimal fitness value, all the rest is boilerplate to make the fitness comparison, to rotatate the rectangles, and to take into account when _section_fitness returns None because the rectangle couldn't be placed. Arguments: w (int, float): Rectangle width h (int, float): Rectangle height Returns: (section, was_rotated): Returns the tuple section (Rectangle): Section with best fitness was_rotated (bool): The rectangle was rotated """ fitn = ((self._section_fitness(s, w, h), s, False) for s in self._sections if self._section_fitness(s, w, h) is not None) fitr = ((self._section_fitness(s, h, w), s, True) for s in self._sections if self._section_fitness(s, h, w) is not None) if not self.rot: fitr = [] fit = itertools.chain(fitn, fitr) try: _, sec, rot = min(fit, key=operator.itemgetter(0)) except ValueError: return None, None return sec, rot
Calls _section_fitness for each of the sections in free section list. Returns the section with the minimal fitness value, all the rest is boilerplate to make the fitness comparison, to rotatate the rectangles, and to take into account when _section_fitness returns None because the rectangle couldn't be placed. Arguments: w (int, float): Rectangle width h (int, float): Rectangle height Returns: (section, was_rotated): Returns the tuple section (Rectangle): Section with best fitness was_rotated (bool): The rectangle was rotated
_select_fittest_section
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def add_rect(self, width, height, rid=None): """ Add rectangle of widthxheight dimensions. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rid: Optional rectangle user id Returns: Rectangle: Rectangle with placemente coordinates None: If the rectangle couldn be placed. """ assert(width > 0 and height >0) # Obtain the best section to place the rectangle. section, rotated = self._select_fittest_section(width, height) if not section: return None if rotated: width, height = height, width # Remove section, split and store results self._sections.remove(section) self._split(section, width, height) # Store rectangle in the selected position rect = Rectangle(section.x, section.y, width, height, rid) self.rectangles.append(rect) return rect
Add rectangle of widthxheight dimensions. Arguments: width (int, float): Rectangle width height (int, float): Rectangle height rid: Optional rectangle user id Returns: Rectangle: Rectangle with placemente coordinates None: If the rectangle couldn be placed.
add_rect
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def fitness(self, width, height): """ In guillotine algorithm case, returns the min of the fitness of all free sections, for the given dimension, both normal and rotated (if rotation enabled.) """ assert(width > 0 and height > 0) # Get best fitness section. section, rotated = self._select_fittest_section(width, height) if not section: return None # Return fitness of returned section, with correct dimmensions if the # the rectangle was rotated. if rotated: return self._section_fitness(section, height, width) else: return self._section_fitness(section, width, height)
In guillotine algorithm case, returns the min of the fitness of all free sections, for the given dimension, both normal and rotated (if rotation enabled.)
fitness
python
secnot/rectpack
rectpack/guillotine.py
https://github.com/secnot/rectpack/blob/master/rectpack/guillotine.py
Apache-2.0
def distance(self, point): """ Calculate distance to another point """ return sqrt((self.x-point.x)**2+(self.y-point.y)**2)
Calculate distance to another point
distance
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def __init__(self, start, end): """ Arguments: start (Point): Segment start point end (Point): Segment end point """ assert(isinstance(start, Point) and isinstance(end, Point)) self.start = start self.end = end
Arguments: start (Point): Segment start point end (Point): Segment end point
__init__
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def length_squared(self): """Faster than length and useful for some comparisons""" return self.start.distance_squared(self.end)
Faster than length and useful for some comparisons
length_squared
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def __init__(self, start, length): """ Create an Horizontal segment given its left most end point and its length. Arguments: - start (Point): Starting Point - length (number): segment length """ assert(isinstance(start, Point) and not isinstance(length, Point)) super(HSegment, self).__init__(start, Point(start.x+length, start.y))
Create an Horizontal segment given its left most end point and its length. Arguments: - start (Point): Starting Point - length (number): segment length
__init__
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def __init__(self, start, length): """ Create a Vertical segment given its bottom most end point and its length. Arguments: - start (Point): Starting Point - length (number): segment length """ assert(isinstance(start, Point) and not isinstance(length, Point)) super(VSegment, self).__init__(start, Point(start.x, start.y+length))
Create a Vertical segment given its bottom most end point and its length. Arguments: - start (Point): Starting Point - length (number): segment length
__init__
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def __init__(self, x, y, width, height, rid = None): """ Args: x (int, float): y (int, float): width (int, float): height (int, float): rid (int): """ assert(height >=0 and width >=0) self.width = width self.height = height self.x = x self.y = y self.rid = rid
Args: x (int, float): y (int, float): width (int, float): height (int, float): rid (int):
__init__
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def bottom(self): """ Rectangle bottom edge y coordinate """ return self.y
Rectangle bottom edge y coordinate
bottom
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def top(self): """ Rectangle top edge y coordiante """ return self.y+self.height
Rectangle top edge y coordiante
top
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def left(self): """ Rectangle left ednge x coordinate """ return self.x
Rectangle left ednge x coordinate
left
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def right(self): """ Rectangle right edge x coordinate """ return self.x+self.width
Rectangle right edge x coordinate
right
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def __lt__(self, other): """ Compare rectangles by area (used for sorting) """ return self.area() < other.area()
Compare rectangles by area (used for sorting)
__lt__
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def __eq__(self, other): """ Equal rectangles have same area. """ if not isinstance(other, self.__class__): return False return (self.width == other.width and \ self.height == other.height and \ self.x == other.x and \ self.y == other.y)
Equal rectangles have same area.
__eq__
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def __iter__(self): """ Iterate through rectangle corners """ yield self.corner_top_l yield self.corner_top_r yield self.corner_bot_r yield self.corner_bot_l
Iterate through rectangle corners
__iter__
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def area(self): """ Rectangle area """ return self.width * self.height
Rectangle area
area
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def move(self, x, y): """ Move Rectangle to x,y coordinates Arguments: x (int, float): X coordinate y (int, float): Y coordinate """ self.x = x self.y = y
Move Rectangle to x,y coordinates Arguments: x (int, float): X coordinate y (int, float): Y coordinate
move
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def contains(self, rect): """ Tests if another rectangle is contained by this one Arguments: rect (Rectangle): The other rectangle Returns: bool: True if it is container, False otherwise """ return (rect.y >= self.y and \ rect.x >= self.x and \ rect.y+rect.height <= self.y+self.height and \ rect.x+rect.width <= self.x+self.width)
Tests if another rectangle is contained by this one Arguments: rect (Rectangle): The other rectangle Returns: bool: True if it is container, False otherwise
contains
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def intersects(self, rect, edges=False): """ Detect intersections between this and another Rectangle. Parameters: rect (Rectangle): The other rectangle. edges (bool): True to consider rectangles touching by their edges or corners to be intersecting. (Should have been named include_touching) Returns: bool: True if the rectangles intersect, False otherwise """ if edges: if (self.bottom > rect.top or self.top < rect.bottom or\ self.left > rect.right or self.right < rect.left): return False else: if (self.bottom >= rect.top or self.top <= rect.bottom or self.left >= rect.right or self.right <= rect.left): return False return True
Detect intersections between this and another Rectangle. Parameters: rect (Rectangle): The other rectangle. edges (bool): True to consider rectangles touching by their edges or corners to be intersecting. (Should have been named include_touching) Returns: bool: True if the rectangles intersect, False otherwise
intersects
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def intersection(self, rect, edges=False): """ Returns the rectangle resulting of the intersection between this and another rectangle. If the rectangles are only touching by their edges, and the argument 'edges' is True the rectangle returned will have an area of 0. Returns None if there is no intersection. Arguments: rect (Rectangle): The other rectangle. edges (bool): If True Rectangles touching by their edges are considered to be intersection. In this case a rectangle of 0 height or/and width will be returned. Returns: Rectangle: Intersection. None: There was no intersection. """ if not self.intersects(rect, edges=edges): return None bottom = max(self.bottom, rect.bottom) left = max(self.left, rect.left) top = min(self.top, rect.top) right = min(self.right, rect.right) return Rectangle(left, bottom, right-left, top-bottom)
Returns the rectangle resulting of the intersection between this and another rectangle. If the rectangles are only touching by their edges, and the argument 'edges' is True the rectangle returned will have an area of 0. Returns None if there is no intersection. Arguments: rect (Rectangle): The other rectangle. edges (bool): If True Rectangles touching by their edges are considered to be intersection. In this case a rectangle of 0 height or/and width will be returned. Returns: Rectangle: Intersection. None: There was no intersection.
intersection
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def join(self, other): """ Try to join a rectangle to this one, if the result is also a rectangle and the operation is successful and this rectangle is modified to the union. Arguments: other (Rectangle): Rectangle to join Returns: bool: True when successfully joined, False otherwise """ if self.contains(other): return True if other.contains(self): self.x = other.x self.y = other.y self.width = other.width self.height = other.height return True if not self.intersects(other, edges=True): return False # Other rectangle is Up/Down from this if self.left == other.left and self.width == other.width: y_min = min(self.bottom, other.bottom) y_max = max(self.top, other.top) self.y = y_min self.height = y_max-y_min return True # Other rectangle is Right/Left from this if self.bottom == other.bottom and self.height == other.height: x_min = min(self.left, other.left) x_max = max(self.right, other.right) self.x = x_min self.width = x_max-x_min return True return False
Try to join a rectangle to this one, if the result is also a rectangle and the operation is successful and this rectangle is modified to the union. Arguments: other (Rectangle): Rectangle to join Returns: bool: True when successfully joined, False otherwise
join
python
secnot/rectpack
rectpack/geometry.py
https://github.com/secnot/rectpack/blob/master/rectpack/geometry.py
Apache-2.0
def __init__(self, rectangles=[], max_width=None, max_height=None, rotation=True): """ Arguments: rectangles (list): Rectangle to be enveloped [(width1, height1), (width2, height2), ...] max_width (number|None): Enveloping rectangle max allowed width. max_height (number|None): Enveloping rectangle max allowed height. rotation (boolean): Enable/Disable rectangle rotation. """ # Enclosing rectangle max width self._max_width = max_width # Encloseing rectangle max height self._max_height = max_height # Enable or disable rectangle rotation self._rotation = rotation # Default packing algorithm self._pack_algo = SkylineBlWm # rectangles to enclose [(width, height), (width, height, ...)] self._rectangles = [] for r in rectangles: self.add_rect(*r)
Arguments: rectangles (list): Rectangle to be enveloped [(width1, height1), (width2, height2), ...] max_width (number|None): Enveloping rectangle max allowed width. max_height (number|None): Enveloping rectangle max allowed height. rotation (boolean): Enable/Disable rectangle rotation.
__init__
python
secnot/rectpack
rectpack/enclose.py
https://github.com/secnot/rectpack/blob/master/rectpack/enclose.py
Apache-2.0
def _container_candidates(self): """Generate container candidate list Returns: tuple list: [(width1, height1), (width2, height2), ...] """ if not self._rectangles: return [] if self._rotation: sides = sorted(side for rect in self._rectangles for side in rect) max_height = sum(max(r[0], r[1]) for r in self._rectangles) min_width = max(min(r[0], r[1]) for r in self._rectangles) max_width = max_height else: sides = sorted(r[0] for r in self._rectangles) max_height = sum(r[1] for r in self._rectangles) min_width = max(r[0] for r in self._rectangles) max_width = sum(sides) if self._max_width and self._max_width < max_width: max_width = self._max_width if self._max_height and self._max_height < max_height: max_height = self._max_height assert(max_width>min_width) # Generate initial container widths candidates = [max_width, min_width] width = 0 for s in reversed(sides): width += s candidates.append(width) width = 0 for s in sides: width += s candidates.append(width) candidates.append(max_width) candidates.append(min_width) # Remove duplicates and widths too big or small seen = set() seen_add = seen.add candidates = [x for x in candidates if not(x in seen or seen_add(x))] candidates = [x for x in candidates if not(x>max_width or x<min_width)] # Remove candidates too small to fit all the rectangles min_area = sum(r[0]*r[1] for r in self._rectangles) return [(c, max_height) for c in candidates if c*max_height>=min_area]
Generate container candidate list Returns: tuple list: [(width1, height1), (width2, height2), ...]
_container_candidates
python
secnot/rectpack
rectpack/enclose.py
https://github.com/secnot/rectpack/blob/master/rectpack/enclose.py
Apache-2.0
def _refine_candidate(self, width, height): """ Use bottom-left packing algorithm to find a lower height for the container. Arguments: width height Returns: tuple (width, height, PackingAlgorithm): """ packer = newPacker(PackingMode.Offline, PackingBin.BFF, pack_algo=self._pack_algo, sort_algo=SORT_LSIDE, rotation=self._rotation) packer.add_bin(width, height) for r in self._rectangles: packer.add_rect(*r) packer.pack() # Check all rectangles where packed if len(packer[0]) != len(self._rectangles): return None # Find highest rectangle new_height = max(packer[0], key=lambda x: x.top).top return(width, new_height, packer)
Use bottom-left packing algorithm to find a lower height for the container. Arguments: width height Returns: tuple (width, height, PackingAlgorithm):
_refine_candidate
python
secnot/rectpack
rectpack/enclose.py
https://github.com/secnot/rectpack/blob/master/rectpack/enclose.py
Apache-2.0
def add_rect(self, width, height): """ Add anoter rectangle to be enclosed Arguments: width (number): Rectangle width height (number): Rectangle height """ self._rectangles.append((width, height))
Add anoter rectangle to be enclosed Arguments: width (number): Rectangle width height (number): Rectangle height
add_rect
python
secnot/rectpack
rectpack/enclose.py
https://github.com/secnot/rectpack/blob/master/rectpack/enclose.py
Apache-2.0
def __init__(self, width, height, rot=True, *args, **kwargs): """ _skyline is the list used to store all the skyline segments, each one is a list with the format [x, y, width] where x is the x coordinate of the left most point of the segment, y the y coordinate of the segment, and width the length of the segment. The initial segment is allways [0, 0, surface_width] Arguments: width (int, float): height (int, float): rot (bool): Enable or disable rectangle rotation """ self._waste_management = False self._waste = WasteManager(rot=rot) super(Skyline, self).__init__(width, height, rot, merge=False, *args, **kwargs)
_skyline is the list used to store all the skyline segments, each one is a list with the format [x, y, width] where x is the x coordinate of the left most point of the segment, y the y coordinate of the segment, and width the length of the segment. The initial segment is allways [0, 0, surface_width] Arguments: width (int, float): height (int, float): rot (bool): Enable or disable rectangle rotation
__init__
python
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/master/rectpack/skyline.py
Apache-2.0
def _placement_points_generator(self, skyline, width): """Returns a generator for the x coordinates of all the placement points on the skyline for a given rectangle. WARNING: In some cases could be duplicated points, but it is faster to compute them twice than to remove them. Arguments: skyline (list): Skyline HSegment list width (int, float): Rectangle width Returns: generator """ skyline_r = skyline[-1].right skyline_l = skyline[0].left # Placements using skyline segment left point ppointsl = (s.left for s in skyline if s.left+width <= skyline_r) # Placements using skyline segment right point ppointsr = (s.right-width for s in skyline if s.right-width >= skyline_l) # Merge positions return heapq.merge(ppointsl, ppointsr)
Returns a generator for the x coordinates of all the placement points on the skyline for a given rectangle. WARNING: In some cases could be duplicated points, but it is faster to compute them twice than to remove them. Arguments: skyline (list): Skyline HSegment list width (int, float): Rectangle width Returns: generator
_placement_points_generator
python
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/master/rectpack/skyline.py
Apache-2.0
def _generate_placements(self, width, height): """ Generate a list with Arguments: skyline (list): SkylineHSegment list width (number): Returns: tuple (Rectangle, fitness): Rectangle: Rectangle in valid position left_skyline: Index for the skyline under the rectangle left edge. right_skyline: Index for the skyline under the rectangle right edte. """ skyline = self._skyline points = collections.deque() left_index = right_index = 0 # Left and right side skyline index support_height = skyline[0].top support_index = 0 placements = self._placement_points_generator(skyline, width) for p in placements: # If Rectangle's right side changed segment, find new support if p+width > skyline[right_index].right: for right_index in range(right_index+1, len(skyline)): if skyline[right_index].top >= support_height: support_index = right_index support_height = skyline[right_index].top if p+width <= skyline[right_index].right: break # If left side changed segment. if p >= skyline[left_index].right: left_index +=1 # Find new support if the previous one was shifted out. if support_index < left_index: support_index = left_index support_height = skyline[left_index].top for i in range(left_index, right_index+1): if skyline[i].top >= support_height: support_index = i support_height = skyline[i].top # Add point if there is enought room at the top if support_height+height <= self.height: points.append((Rectangle(p, support_height, width, height),\ left_index, right_index)) return points
Generate a list with Arguments: skyline (list): SkylineHSegment list width (number): Returns: tuple (Rectangle, fitness): Rectangle: Rectangle in valid position left_skyline: Index for the skyline under the rectangle left edge. right_skyline: Index for the skyline under the rectangle right edte.
_generate_placements
python
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/master/rectpack/skyline.py
Apache-2.0
def _merge_skyline(self, skylineq, segment): """ Arguments: skylineq (collections.deque): segment (HSegment): """ if len(skylineq) == 0: skylineq.append(segment) return if skylineq[-1].top == segment.top: s = skylineq[-1] skylineq[-1] = HSegment(s.start, s.length+segment.length) else: skylineq.append(segment)
Arguments: skylineq (collections.deque): segment (HSegment):
_merge_skyline
python
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/master/rectpack/skyline.py
Apache-2.0
def _add_skyline(self, rect): """ Arguments: seg (Rectangle): """ skylineq = collections.deque([]) # Skyline after adding new one for sky in self._skyline: if sky.right <= rect.left or sky.left >= rect.right: self._merge_skyline(skylineq, sky) continue if sky.left < rect.left and sky.right > rect.left: # Skyline section partially under segment left self._merge_skyline(skylineq, HSegment(sky.start, rect.left-sky.left)) sky = HSegment(P(rect.left, sky.top), sky.right-rect.left) if sky.left < rect.right: if sky.left == rect.left: self._merge_skyline(skylineq, HSegment(P(rect.left, rect.top), rect.width)) # Skyline section partially under segment right if sky.right > rect.right: self._merge_skyline(skylineq, HSegment(P(rect.right, sky.top), sky.right-rect.right)) sky = HSegment(sky.start, rect.right-sky.left) if sky.left >= rect.left and sky.right <= rect.right: # Skyline section fully under segment, account for wasted space if self._waste_management and sky.top < rect.bottom: self._waste.add_waste(sky.left, sky.top, sky.length, rect.bottom - sky.top) else: # Segment self._merge_skyline(skylineq, sky) # Aaaaand ..... Done self._skyline = list(skylineq)
Arguments: seg (Rectangle):
_add_skyline
python
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/master/rectpack/skyline.py
Apache-2.0
def _select_position(self, width, height): """ Search for the placement with the bes fitness for the rectangle. Returns: tuple (Rectangle, fitness) - Rectangle placed in the fittest position None - Rectangle couldn't be placed """ positions = self._generate_placements(width, height) if self.rot and width != height: positions += self._generate_placements(height, width) if not positions: return None, None return min(((p[0], self._rect_fitness(*p))for p in positions), key=operator.itemgetter(1))
Search for the placement with the bes fitness for the rectangle. Returns: tuple (Rectangle, fitness) - Rectangle placed in the fittest position None - Rectangle couldn't be placed
_select_position
python
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/master/rectpack/skyline.py
Apache-2.0
def fitness(self, width, height): """Search for the best fitness """ assert(width > 0 and height >0) if width > max(self.width, self.height) or\ height > max(self.height, self.width): return None # If there is room in wasted space, FREE PACKING!! if self._waste_management: if self._waste.fitness(width, height) is not None: return 0 # Get best fitness segment, for normal rectangle, and for # rotated rectangle if rotation is enabled. rect, fitness = self._select_position(width, height) return fitness
Search for the best fitness
fitness
python
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/master/rectpack/skyline.py
Apache-2.0
def add_rect(self, width, height, rid=None): """ Add new rectangle """ assert(width > 0 and height > 0) if width > max(self.width, self.height) or\ height > max(self.height, self.width): return None rect = None # If Waste managment is enabled, first try to place the rectangle there if self._waste_management: rect = self._waste.add_rect(width, height, rid) # Get best possible rectangle position if not rect: rect, _ = self._select_position(width, height) if rect: self._add_skyline(rect) if rect is None: return None # Store rectangle, and recalculate skyline rect.rid = rid self.rectangles.append(rect) return rect
Add new rectangle
add_rect
python
secnot/rectpack
rectpack/skyline.py
https://github.com/secnot/rectpack/blob/master/rectpack/skyline.py
Apache-2.0
def float2dec(ft, decimal_digits): """ Convert float (or int) to Decimal (rounding up) with the requested number of decimal digits. Arguments: ft (float, int): Number to convert decimal (int): Number of digits after decimal point Return: Decimal: Number converted to decima """ with decimal.localcontext() as ctx: ctx.rounding = decimal.ROUND_UP places = decimal.Decimal(10)**(-decimal_digits) return decimal.Decimal.from_float(float(ft)).quantize(places)
Convert float (or int) to Decimal (rounding up) with the requested number of decimal digits. Arguments: ft (float, int): Number to convert decimal (int): Number of digits after decimal point Return: Decimal: Number converted to decima
float2dec
python
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/master/rectpack/packer.py
Apache-2.0
def __init__(self, pack_algo=MaxRectsBssf, rotation=True): """ Arguments: pack_algo (PackingAlgorithm): What packing algo to use rotation (bool): Enable/Disable rectangle rotation """ self._rotation = rotation self._pack_algo = pack_algo self.reset()
Arguments: pack_algo (PackingAlgorithm): What packing algo to use rotation (bool): Enable/Disable rectangle rotation
__init__
python
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/master/rectpack/packer.py
Apache-2.0
def __getitem__(self, key): """ Return bin in selected position. (excluding empty bins) """ if not isinstance(key, int): raise TypeError("Indices must be integers") size = len(self) # avoid recalulations if key < 0: key += size if not 0 <= key < size: raise IndexError("Index out of range") if key < len(self._closed_bins): return self._closed_bins[key] else: return self._open_bins[key-len(self._closed_bins)]
Return bin in selected position. (excluding empty bins)
__getitem__
python
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/master/rectpack/packer.py
Apache-2.0
def _new_open_bin(self, width=None, height=None, rid=None): """ Extract the next empty bin and append it to open bins Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found """ factories_to_delete = set() # new_bin = None for key, binfac in self._empty_bins.items(): # Only return the new bin if the rect fits. # (If width or height is None, caller doesn't know the size.) if not binfac.fits_inside(width, height): continue # Create bin and add to open_bins new_bin = binfac.new_bin() if new_bin is None: continue self._open_bins.append(new_bin) # If the factory was depleted mark for deletion if binfac.is_empty(): factories_to_delete.add(key) break # Delete marked factories for f in factories_to_delete: del self._empty_bins[f] return new_bin
Extract the next empty bin and append it to open bins Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found
_new_open_bin
python
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/master/rectpack/packer.py
Apache-2.0
def bin_list(self): """ Return a list of the dimmensions of the bins in use, that is closed or open containing at least one rectangle """ return [(b.width, b.height) for b in self]
Return a list of the dimmensions of the bins in use, that is closed or open containing at least one rectangle
bin_list
python
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/master/rectpack/packer.py
Apache-2.0
def _find_best_fit(self, pbin): """ Return best fitness rectangle from rectangles packing _sorted_rect list Arguments: pbin (PackingAlgorithm): Packing bin Returns: key of the rectangle with best fitness """ fit = ((pbin.fitness(r[0], r[1]), k) for k, r in self._sorted_rect.items()) fit = (f for f in fit if f[0] is not None) try: _, rect = min(fit, key=self.first_item) return rect except ValueError: return None
Return best fitness rectangle from rectangles packing _sorted_rect list Arguments: pbin (PackingAlgorithm): Packing bin Returns: key of the rectangle with best fitness
_find_best_fit
python
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/master/rectpack/packer.py
Apache-2.0
def _new_open_bin(self, remaining_rect): """ Extract the next bin where at least one of the rectangles in rem Arguments: remaining_rect (dict): rectangles not placed yet Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found """ factories_to_delete = set() # new_bin = None for key, binfac in self._empty_bins.items(): # Only return the new bin if at least one of the remaining # rectangles fit inside. a_rectangle_fits = False for _, rect in remaining_rect.items(): if binfac.fits_inside(rect[0], rect[1]): a_rectangle_fits = True break if not a_rectangle_fits: factories_to_delete.add(key) continue # Create bin and add to open_bins new_bin = binfac.new_bin() if new_bin is None: continue self._open_bins.append(new_bin) # If the factory was depleted mark for deletion if binfac.is_empty(): factories_to_delete.add(key) break # Delete marked factories for f in factories_to_delete: del self._empty_bins[f] return new_bin
Extract the next bin where at least one of the rectangles in rem Arguments: remaining_rect (dict): rectangles not placed yet Returns: PackingAlgorithm: Initialized empty packing bin. None: No bin big enough for the rectangle was found
_new_open_bin
python
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/master/rectpack/packer.py
Apache-2.0
def newPacker(mode=PackingMode.Offline, bin_algo=PackingBin.BBF, pack_algo=MaxRectsBssf, sort_algo=SORT_AREA, rotation=True): """ Packer factory helper function Arguments: mode (PackingMode): Packing mode Online: Rectangles are packed as soon are they are added Offline: Rectangles aren't packed untils pack() is called bin_algo (PackingBin): Bin selection heuristic pack_algo (PackingAlgorithm): Algorithm used rotation (boolean): Enable or disable rectangle rotation. Returns: Packer: Initialized packer instance. """ packer_class = None # Online Mode if mode == PackingMode.Online: sort_algo=None if bin_algo == PackingBin.BNF: packer_class = PackerOnlineBNF elif bin_algo == PackingBin.BFF: packer_class = PackerOnlineBFF elif bin_algo == PackingBin.BBF: packer_class = PackerOnlineBBF else: raise AttributeError("Unsupported bin selection heuristic") # Offline Mode elif mode == PackingMode.Offline: if bin_algo == PackingBin.BNF: packer_class = PackerBNF elif bin_algo == PackingBin.BFF: packer_class = PackerBFF elif bin_algo == PackingBin.BBF: packer_class = PackerBBF elif bin_algo == PackingBin.Global: packer_class = PackerGlobal sort_algo=None else: raise AttributeError("Unsupported bin selection heuristic") else: raise AttributeError("Unknown packing mode.") if sort_algo: return packer_class(pack_algo=pack_algo, sort_algo=sort_algo, rotation=rotation) else: return packer_class(pack_algo=pack_algo, rotation=rotation)
Packer factory helper function Arguments: mode (PackingMode): Packing mode Online: Rectangles are packed as soon are they are added Offline: Rectangles aren't packed untils pack() is called bin_algo (PackingBin): Bin selection heuristic pack_algo (PackingAlgorithm): Algorithm used rotation (boolean): Enable or disable rectangle rotation. Returns: Packer: Initialized packer instance.
newPacker
python
secnot/rectpack
rectpack/packer.py
https://github.com/secnot/rectpack/blob/master/rectpack/packer.py
Apache-2.0
def start_node_server( server_name: str | None = None, server_port: int | None = None, node_path: str | None = None, ) -> tuple[str | None, subprocess.Popen[bytes] | None, int | None]: """Launches a local server running the provided Interface Parameters: server_name: to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME. server_port: will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT. node_path: the path to the node executable. Can be set by environment variable GRADIO_NODE_PATH. ssr_mode: If False, will not start the node server and will serve the SPA from the Python server Returns: server_name: the name of the server (default is "localhost") node_process: the node process that is running the SSR app node_port: the port the node server is running on """ server_name = server_name or LOCALHOST_NAME # Strip IPv6 brackets from the address if they exist. # This is needed as http://[::1]:port/ is a valid browser address, # but not a valid IPv6 address, so asyncio will throw an exception. if server_name.startswith("[") and server_name.endswith("]"): host = server_name[1:-1] else: host = server_name server_ports = ( [server_port] if server_port is not None else range(INITIAL_PORT_VALUE + 1, INITIAL_PORT_VALUE + 1 + TRY_NUM_PORTS) ) node_process, node_port = start_node_process( node_path=node_path or os.getenv("GRADIO_NODE_PATH"), server_name=host, server_ports=server_ports, ) return server_name, node_process, node_port
Launches a local server running the provided Interface Parameters: server_name: to make app accessible on local network, set this to "0.0.0.0". Can be set by environment variable GRADIO_SERVER_NAME. server_port: will start gradio app on this port (if available). Can be set by environment variable GRADIO_SERVER_PORT. node_path: the path to the node executable. Can be set by environment variable GRADIO_NODE_PATH. ssr_mode: If False, will not start the node server and will serve the SPA from the Python server Returns: server_name: the name of the server (default is "localhost") node_process: the node process that is running the SSR app node_port: the port the node server is running on
start_node_server
python
gradio-app/gradio
gradio/node_server.py
https://github.com/gradio-app/gradio/blob/master/gradio/node_server.py
Apache-2.0
def attempt_connection(host: str, port: int) -> bool: """Attempts a single connection to the server.""" try: with closing(socket.create_connection((host, port), timeout=1)): return True except (TimeoutError, ConnectionRefusedError): return False except Exception: return False
Attempts a single connection to the server.
attempt_connection
python
gradio-app/gradio
gradio/node_server.py
https://github.com/gradio-app/gradio/blob/master/gradio/node_server.py
Apache-2.0
def verify_server_startup(host: str, port: int, timeout: float = 5.0) -> bool: """Verifies if a server is up and running by attempting to connect.""" start_time = time.time() while time.time() - start_time < timeout: try: with socket.create_connection((host, port), timeout=1): return True except (TimeoutError, OSError): time.sleep(0.1) return False
Verifies if a server is up and running by attempting to connect.
verify_server_startup
python
gradio-app/gradio
gradio/node_server.py
https://github.com/gradio-app/gradio/blob/master/gradio/node_server.py
Apache-2.0
def heartbeat( session_hash: str, request: fastapi.Request, background_tasks: BackgroundTasks, username: str = Depends(get_current_user), ): """Clients make a persistent connection to this endpoint to keep the session alive. When the client disconnects, the session state is deleted. """ heartbeat_rate = 0.25 if os.getenv("GRADIO_IS_E2E_TEST", None) else 15 async def wait(): await asyncio.sleep(heartbeat_rate) return "wait" async def stop_stream(): await app.stop_event.wait() return "stop" async def iterator(): while True: try: yield "data: ALIVE\n\n" # We need to close the heartbeat connections as soon as the server stops # otherwise the server can take forever to close wait_task = asyncio.create_task(wait()) stop_stream_task = asyncio.create_task(stop_stream()) done, _ = await asyncio.wait( [wait_task, stop_stream_task], return_when=asyncio.FIRST_COMPLETED, ) done = [d.result() for d in done] if "stop" in done: raise asyncio.CancelledError() except asyncio.CancelledError: req = Request(request, username, session_hash=session_hash) root_path = route_utils.get_root_url( request=request, route_path=f"{API_PREFIX}/hearbeat/{session_hash}", root_path=app.root_path, ) body = PredictBodyInternal( session_hash=session_hash, data=[], request=request ) unload_fn_indices = [ i for i, dep in app.get_blocks().fns.items() if any(t for t in dep.targets if t[1] == "unload") ] for fn_index in unload_fn_indices: # The task runnning this loop has been cancelled # so we add tasks in the background background_tasks.add_task( route_utils.call_process_api, app=app, body=body, gr_request=req, fn=app.get_blocks().fns[fn_index], root_path=root_path, ) # This will mark the state to be deleted in an hour if session_hash in app.state_holder.session_data: app.state_holder.session_data[session_hash].is_closed = True for ( event_id ) in app.get_blocks()._queue.pending_event_ids_session.get( session_hash, [] ): event = app.get_blocks()._queue.event_ids_to_events[ event_id ] event.run_time = math.inf event.signal.set() return return StreamingResponse(iterator(), media_type="text/event-stream")
Clients make a persistent connection to this endpoint to keep the session alive. When the client disconnects, the session state is deleted.
create_app.heartbeat
python
gradio-app/gradio
gradio/routes.py
https://github.com/gradio-app/gradio/blob/master/gradio/routes.py
Apache-2.0
def create_app( blocks: gradio.Blocks, app_kwargs: dict[str, Any] | None = None, auth_dependency: Callable[[fastapi.Request], str | None] | None = None, strict_cors: bool = True, ssr_mode: bool = False, ) -> App: app_kwargs = app_kwargs or {} app_kwargs.setdefault("default_response_class", ORJSONResponse) delete_cache = blocks.delete_cache or (None, None) app_kwargs["lifespan"] = create_lifespan_handler( app_kwargs.get("lifespan", None), *delete_cache ) app = App(auth_dependency=auth_dependency, **app_kwargs, debug=True) router = APIRouter(prefix=API_PREFIX) app.configure_app(blocks) if not wasm_utils.IS_WASM: app.add_middleware(CustomCORSMiddleware, strict_cors=strict_cors) if ssr_mode: @app.middleware("http") async def conditional_routing_middleware( request: fastapi.Request, call_next ): custom_mount_path = blocks.custom_mount_path path = ( request.url.path.replace(blocks.custom_mount_path or "", "") if custom_mount_path is not None else request.url.path ) if ( getattr(blocks, "node_process", None) is not None and blocks.node_port is not None and not any(path.startswith(f"/{url}") for url in INTERNAL_ROUTES) ): if App.app_port is None: App.app_port = request.url.port or int( os.getenv("GRADIO_SERVER_PORT", "7860") ) try: return await App.proxy_to_node( request, blocks.node_server_name or "0.0.0.0", blocks.node_port, App.app_port, request.url.scheme, custom_mount_path or "", ) except Exception as e: print(e) response = await call_next(request) return response @router.get("/user") @router.get("/user/") def get_current_user(request: fastapi.Request) -> Optional[str]: if app.auth_dependency is not None: return app.auth_dependency(request) token = request.cookies.get( f"access-token-{app.cookie_id}" ) or request.cookies.get(f"access-token-unsecure-{app.cookie_id}") return app.tokens.get(token) @router.get("/login_check") @router.get("/login_check/") def login_check(user: str = Depends(get_current_user)): if (app.auth is None and app.auth_dependency is None) or user is not None: return raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated" ) @router.get("/token") @router.get("/token/") def get_token(request: fastapi.Request) -> dict: token = request.cookies.get(f"access-token-{app.cookie_id}") return {"token": token, "user": app.tokens.get(token)} @router.get("/app_id") @router.get("/app_id/") def app_id(request: fastapi.Request) -> dict: # noqa: ARG001 return {"app_id": app.get_blocks().app_id} @router.get("/dev/reload", dependencies=[Depends(login_check)]) async def notify_changes( request: fastapi.Request, ): async def reload_checker(request: fastapi.Request): heartbeat_rate = 15 check_rate = 0.05 last_heartbeat = time.perf_counter() current_count = app.change_count while True: if await request.is_disconnected(): return if app.change_count != current_count: current_count = app.change_count msg = ( json.dumps(f"{app.reload_error_message}") if app.change_type == "error" else "{}" ) yield f"""event: {app.change_type}\ndata: {msg}\n\n""" await asyncio.sleep(check_rate) if time.perf_counter() - last_heartbeat > heartbeat_rate: yield """event: heartbeat\ndata: {}\n\n""" last_heartbeat = time.time() return StreamingResponse( reload_checker(request), media_type="text/event-stream", ) @app.post("/login") @app.post("/login/") def login(form_data: OAuth2PasswordRequestForm = Depends()): username, password = form_data.username.strip(), form_data.password if app.auth is None: return RedirectResponse(url="/", status_code=status.HTTP_302_FOUND) if ( not callable(app.auth) and username in app.auth and compare_passwords_securely(password, app.auth[username]) # type: ignore ) or (callable(app.auth) and app.auth.__call__(username, password)): # type: ignore token = secrets.token_urlsafe(16) app.tokens[token] = username response = JSONResponse(content={"success": True}) response.set_cookie( key=f"access-token-{app.cookie_id}", value=token, httponly=True, samesite="none", secure=True, ) response.set_cookie( key=f"access-token-unsecure-{app.cookie_id}", value=token, httponly=True, ) return response else: raise HTTPException(status_code=400, detail="Incorrect credentials.") ############### # OAuth Routes ############### # Define OAuth routes if the app expects it (i.e. a LoginButton is defined). # It allows users to "Sign in with HuggingFace". Otherwise, add the default # logout route. if app.blocks is not None and app.blocks.expects_oauth: attach_oauth(app) else: @app.get("/logout") def logout(user: str = Depends(get_current_user)): response = RedirectResponse(url="/", status_code=status.HTTP_302_FOUND) response.delete_cookie(key=f"access-token-{app.cookie_id}", path="/") response.delete_cookie( key=f"access-token-unsecure-{app.cookie_id}", path="/" ) # A user may have multiple tokens, so we need to delete all of them. for token in list(app.tokens.keys()): if app.tokens[token] == user: del app.tokens[token] return response ############### # Main Routes ############### @app.get("/svelte/{path:path}") def _(path: str): svelte_path = Path(BUILD_PATH_LIB) / "svelte" return FileResponse( routes_safe_join( DeveloperPath(str(svelte_path)), UserProvidedPath(path) ) ) def attach_page(page): @app.get(f"/{page}", response_class=HTMLResponse) @app.get(f"/{page}/", response_class=HTMLResponse) def page_route( request: fastapi.Request, user: str = Depends(get_current_user), ): return main(request, user, page) for pageset in blocks.pages: page = pageset[0] if page != "": attach_page(page) @app.head("/", response_class=HTMLResponse) @app.get("/", response_class=HTMLResponse) def main( request: fastapi.Request, user: str = Depends(get_current_user), page: str = "", ): mimetypes.add_type("application/javascript", ".js") blocks = app.get_blocks() root = route_utils.get_root_url( request=request, route_path=f"/{page}", root_path=app.root_path, ) if (app.auth is None and app.auth_dependency is None) or user is not None: config = utils.safe_deepcopy(blocks.config) config = route_utils.update_root_in_config(config, root) config["username"] = user config["components"] = [ component for component in config["components"] if component["id"] in config["page"][page]["components"] ] config["dependencies"] = [ dependency for dependency in config.get("dependencies", []) if dependency["id"] in config["page"][page]["dependencies"] ] config["layout"] = config["page"][page]["layout"] config["current_page"] = page elif app.auth_dependency: raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Not authenticated" ) else: config = { "auth_required": True, "auth_message": blocks.auth_message, "space_id": blocks.space_id, "root": root, "page": {"": {"layout": {}}}, "pages": [""], "components": [], "dependencies": [], "current_page": "", } try: template = ( "frontend/share.html" if blocks.share else "frontend/index.html" ) gradio_api_info = api_info(request) resp = templates.TemplateResponse( request=request, name=template, context={ "config": config, "gradio_api_info": gradio_api_info, }, ) return resp except TemplateNotFound as err: if blocks.share: raise ValueError( "Did you install Gradio from source files? Share mode only " "works when Gradio is installed through the pip package." ) from err else: raise ValueError( "Did you install Gradio from source files? You need to build " "the frontend by running /scripts/build_frontend.sh" ) from err @router.get("/info/", dependencies=[Depends(login_check)]) @router.get("/info", dependencies=[Depends(login_check)]) def api_info(request: fastapi.Request): all_endpoints = request.query_params.get("all_endpoints", False) if all_endpoints: if not app.all_app_info: app.all_app_info = app.get_blocks().get_api_info(all_endpoints=True) return app.all_app_info if not app.api_info: api_info = utils.safe_deepcopy(app.get_blocks().get_api_info()) api_info = cast(dict[str, Any], api_info) api_info = route_utils.update_example_values_to_use_public_url(api_info) app.api_info = api_info return app.api_info @app.get("/config/", dependencies=[Depends(login_check)]) @app.get("/config", dependencies=[Depends(login_check)]) def get_config(request: fastapi.Request): config = utils.safe_deepcopy(app.get_blocks().config) root = route_utils.get_root_url( request=request, route_path="/config", root_path=app.root_path ) config = route_utils.update_root_in_config(config, root) config["username"] = get_current_user(request) return ORJSONResponse(content=config) @app.get("/static/{path:path}") def static_resource(path: str): static_file = routes_safe_join(STATIC_PATH_LIB, UserProvidedPath(path)) return FileResponse(static_file) @router.get("/custom_component/{id}/{environment}/{type}/{file_name}") def custom_component_path( id: str, environment: Literal["client", "server"], type: str, file_name: str, req: fastapi.Request, ): if environment not in ["client", "server"]: raise HTTPException( status_code=404, detail="Environment not supported." ) components = utils.get_all_components() location = next( (item for item in components if item.get_component_class_id() == id), None, ) if location is None: raise HTTPException(status_code=404, detail="Component not found.") module_name = location.__module__ module_path = sys.modules[module_name].__file__ if module_path is None: raise HTTPException(status_code=404, detail="Component not found.") try: requested_path = utils.safe_join( location.TEMPLATE_DIR, UserProvidedPath(f"{type}/{file_name}"), ) except InvalidPathError: raise HTTPException( status_code=404, detail="Component not found." ) from None path = routes_safe_join( DeveloperPath(str(Path(module_path).parent)), UserProvidedPath(requested_path), ) # Uncomment when we support custom component SSR # if environment == "server": # return PlainTextResponse(path) key = f"{id}-{type}-{file_name}" if key not in app.custom_component_hashes: app.custom_component_hashes[key] = hashlib.sha256( Path(path).read_text(encoding="utf-8").encode() ).hexdigest() version = app.custom_component_hashes.get(key) headers = {"Cache-Control": "max-age=0, must-revalidate"} if version: headers["ETag"] = version if version and req.headers.get("if-none-match") == version: return PlainTextResponse(status_code=304, headers=headers) return FileResponse(path, headers=headers) @app.get("/assets/{path:path}") def build_resource(path: str): build_file = routes_safe_join(BUILD_PATH_LIB, UserProvidedPath(path)) return FileResponse(build_file) @app.get("/favicon.ico") async def favicon(): blocks = app.get_blocks() if blocks.favicon_path is None: return static_resource("img/logo.svg") else: return FileResponse(blocks.favicon_path) @router.head("/proxy={url_path:path}", dependencies=[Depends(login_check)]) @router.get("/proxy={url_path:path}", dependencies=[Depends(login_check)]) async def reverse_proxy(url_path: str): # Adapted from: https://github.com/tiangolo/fastapi/issues/1788 try: rp_req = app.build_proxy_request(url_path) except PermissionError as err: raise HTTPException(status_code=400, detail=str(err)) from err rp_resp = await client.send(rp_req, stream=True) mime_type, _ = mimetypes.guess_type(url_path) if mime_type not in XSS_SAFE_MIMETYPES: rp_resp.headers.update({"Content-Disposition": "attachment"}) rp_resp.headers.update({"Content-Type": "application/octet-stream"}) return StreamingResponse( rp_resp.aiter_raw(), status_code=rp_resp.status_code, headers=rp_resp.headers, # type: ignore background=BackgroundTask(rp_resp.aclose), ) @router.head("/file={path_or_url:path}", dependencies=[Depends(login_check)]) @router.get("/file={path_or_url:path}", dependencies=[Depends(login_check)]) async def file(path_or_url: str, request: fastapi.Request): blocks = app.get_blocks() if client_utils.is_http_url_like(path_or_url): return RedirectResponse( url=path_or_url, status_code=status.HTTP_302_FOUND ) if route_utils.starts_with_protocol(path_or_url): raise HTTPException(403, f"File not allowed: {path_or_url}.") abs_path = utils.abspath(path_or_url) if abs_path.is_dir() or not abs_path.exists(): raise HTTPException(403, f"File not allowed: {path_or_url}.") from gradio.data_classes import _StaticFiles allowed, reason = utils.is_allowed_file( abs_path, blocked_paths=blocks.blocked_paths, allowed_paths=blocks.allowed_paths + _StaticFiles.all_paths, created_paths=[app.uploaded_file_dir, utils.get_cache_folder()], ) if not allowed: raise HTTPException(403, f"File not allowed: {path_or_url}.") mime_type, _ = mimetypes.guess_type(abs_path) if mime_type in XSS_SAFE_MIMETYPES or reason == "allowed": media_type = mime_type or "application/octet-stream" content_disposition_type = "inline" else: media_type = "application/octet-stream" content_disposition_type = "attachment" range_val = request.headers.get("Range", "").strip() if range_val.startswith("bytes=") and "-" in range_val: range_val = range_val[6:] start, end = range_val.split("-") if start.isnumeric() and end.isnumeric(): start = int(start) end = int(end) headers = dict(request.headers) headers["Content-Disposition"] = content_disposition_type headers["Content-Type"] = media_type response = ranged_response.RangedFileResponse( abs_path, ranged_response.OpenRange(start, end), headers, stat_result=os.stat(abs_path), ) return response return FileResponse( abs_path, headers={"Accept-Ranges": "bytes"}, content_disposition_type=content_disposition_type, media_type=media_type, filename=abs_path.name, ) @router.post("/stream/{event_id}") async def _(event_id: str, body: PredictBody, request: fastapi.Request): event = app.get_blocks()._queue.event_ids_to_events[event_id] body = PredictBodyInternal(**body.model_dump(), request=request) event.data = body event.signal.set() return {"msg": "success"} @router.websocket("/stream/{event_id}") async def websocket_endpoint(websocket: WebSocket, event_id: str): await websocket.accept() try: while True: data = await websocket.receive_json() body = PredictBody(**data) event = app.get_blocks()._queue.event_ids_to_events[event_id] body_internal = PredictBodyInternal( **body.model_dump(), request=None ) event.data = body_internal event.signal.set() await websocket.send_json({"msg": "success"}) except WebSocketDisconnect: pass @router.post("/stream/{event_id}/close") async def _(event_id: str): event = app.get_blocks()._queue.event_ids_to_events[event_id] event.run_time = math.inf event.signal.set() return {"msg": "success"} @router.get("/stream/{session_hash}/{run}/{component_id}/playlist.m3u8") async def _(session_hash: str, run: int, component_id: int): stream: route_utils.MediaStream | None = ( app.get_blocks() .pending_streams[session_hash] .get(run, {}) .get(component_id, None) ) if not stream: return Response(status_code=404) playlist = f"#EXTM3U\n#EXT-X-PLAYLIST-TYPE:EVENT\n#EXT-X-TARGETDURATION:{stream.max_duration}\n#EXT-X-VERSION:4\n#EXT-X-MEDIA-SEQUENCE:0\n" for segment in stream.segments: playlist += f"#EXTINF:{segment['duration']:.3f},\n" playlist += f"{segment['id']}{segment['extension']}\n" # type: ignore if stream.ended: playlist += "#EXT-X-ENDLIST\n" return Response( content=playlist, media_type="application/vnd.apple.mpegurl" ) @router.get("/stream/{session_hash}/{run}/{component_id}/{segment_id}.{ext}") async def _( session_hash: str, run: int, component_id: int, segment_id: str, ext: str ): if ext not in ["aac", "ts"]: return Response(status_code=400, content="Unsupported file extension") stream: route_utils.MediaStream | None = ( app.get_blocks() .pending_streams[session_hash] .get(run, {}) .get(component_id, None) ) if not stream: return Response(status_code=404, content="Stream not found") segment = next((s for s in stream.segments if s["id"] == segment_id), None) # type: ignore if segment is None: return Response(status_code=404, content="Segment not found") if ext == "aac": return Response(content=segment["data"], media_type="audio/aac") else: return Response(content=segment["data"], media_type="video/MP2T") @router.get("/stream/{session_hash}/{run}/{component_id}/playlist-file") async def _(session_hash: str, run: int, component_id: int): stream: route_utils.MediaStream | None = ( app.get_blocks() .pending_streams[session_hash] .get(run, {}) .get(component_id, None) ) if not stream: return Response(status_code=404) if not stream.combined_file: stream_data = [s["data"] for s in stream.segments] combined_file = ( await app.get_blocks() .get_component(component_id) .combine_stream( # type: ignore stream_data, only_file=True, desired_output_format=stream.desired_output_format, ) ) stream.combined_file = combined_file.path return FileResponse(stream.combined_file) @router.get("/file/{path:path}", dependencies=[Depends(login_check)]) async def file_deprecated(path: str, request: fastapi.Request): return await file(path, request) @router.post("/reset/") @router.post("/reset") async def reset_iterator(body: ResetBody): # noqa: ARG001 # No-op, all the cancelling/reset logic handled by /cancel return {"success": True} @router.get("/heartbeat/{session_hash}") def heartbeat( session_hash: str, request: fastapi.Request, background_tasks: BackgroundTasks, username: str = Depends(get_current_user), ): """Clients make a persistent connection to this endpoint to keep the session alive. When the client disconnects, the session state is deleted. """ heartbeat_rate = 0.25 if os.getenv("GRADIO_IS_E2E_TEST", None) else 15 async def wait(): await asyncio.sleep(heartbeat_rate) return "wait" async def stop_stream(): await app.stop_event.wait() return "stop" async def iterator(): while True: try: yield "data: ALIVE\n\n" # We need to close the heartbeat connections as soon as the server stops # otherwise the server can take forever to close wait_task = asyncio.create_task(wait()) stop_stream_task = asyncio.create_task(stop_stream()) done, _ = await asyncio.wait( [wait_task, stop_stream_task], return_when=asyncio.FIRST_COMPLETED, ) done = [d.result() for d in done] if "stop" in done: raise asyncio.CancelledError() except asyncio.CancelledError: req = Request(request, username, session_hash=session_hash) root_path = route_utils.get_root_url( request=request, route_path=f"{API_PREFIX}/hearbeat/{session_hash}", root_path=app.root_path, ) body = PredictBodyInternal( session_hash=session_hash, data=[], request=request ) unload_fn_indices = [ i for i, dep in app.get_blocks().fns.items() if any(t for t in dep.targets if t[1] == "unload") ] for fn_index in unload_fn_indices: # The task runnning this loop has been cancelled # so we add tasks in the background background_tasks.add_task( route_utils.call_process_api, app=app, body=body, gr_request=req, fn=app.get_blocks().fns[fn_index], root_path=root_path, ) # This will mark the state to be deleted in an hour if session_hash in app.state_holder.session_data: app.state_holder.session_data[session_hash].is_closed = True for ( event_id ) in app.get_blocks()._queue.pending_event_ids_session.get( session_hash, [] ): event = app.get_blocks()._queue.event_ids_to_events[ event_id ] event.run_time = math.inf event.signal.set() return return StreamingResponse(iterator(), media_type="text/event-stream") # had to use '/run' endpoint for Colab compatibility, '/api' supported for backwards compatibility @router.post("/run/{api_name}", dependencies=[Depends(login_check)]) @router.post("/run/{api_name}/", dependencies=[Depends(login_check)]) @router.post("/api/{api_name}", dependencies=[Depends(login_check)]) @router.post("/api/{api_name}/", dependencies=[Depends(login_check)]) async def predict( api_name: str, body: PredictBody, request: fastapi.Request, username: str = Depends(get_current_user), ): body = PredictBodyInternal(**body.model_dump(), request=request) fn = route_utils.get_fn( blocks=app.get_blocks(), api_name=api_name, body=body ) if not app.get_blocks().api_open and fn.queue: raise HTTPException( detail="This API endpoint does not accept direct HTTP POST requests. Please join the queue to use this API.", status_code=status.HTTP_404_NOT_FOUND, ) gr_request = route_utils.compile_gr_request( body, fn=fn, username=username, request=request, ) root_path = route_utils.get_root_url( request=request, route_path=f"{API_PREFIX}/api/{api_name}", root_path=app.root_path, ) try: output = await route_utils.call_process_api( app=app, body=body, gr_request=gr_request, fn=fn, root_path=root_path, ) except BaseException as error: content = utils.error_payload(error, app.get_blocks().show_error) if not isinstance(error, Error) or error.print_exception: traceback.print_exc() return JSONResponse( content=content, status_code=500, ) return output @router.post("/call/{api_name}", dependencies=[Depends(login_check)]) @router.post("/call/{api_name}/", dependencies=[Depends(login_check)]) async def simple_predict_post( api_name: str, body: SimplePredictBody, request: fastapi.Request, username: str = Depends(get_current_user), ): full_body = PredictBody(**body.model_dump(), simple_format=True) fn = route_utils.get_fn( blocks=app.get_blocks(), api_name=api_name, body=full_body ) full_body.fn_index = fn._id return await queue_join_helper(full_body, request, username) @router.post("/queue/join", dependencies=[Depends(login_check)]) async def queue_join( body: PredictBody, request: fastapi.Request, username: str = Depends(get_current_user), ): if body.session_hash is None: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Session hash not found.", ) return await queue_join_helper(body, request, username) async def queue_join_helper( body: PredictBody, request: fastapi.Request, username: str, ): blocks = app.get_blocks() if blocks._queue.server_app is None: blocks._queue.set_server_app(app) if blocks._queue.stopped: raise HTTPException( status_code=status.HTTP_503_SERVICE_UNAVAILABLE, detail="Queue is stopped.", ) body = PredictBodyInternal(**body.model_dump(), request=request) success, event_id = await blocks._queue.push( body=body, request=request, username=username ) if not success: status_code = ( status.HTTP_503_SERVICE_UNAVAILABLE if "Queue is full." in event_id else status.HTTP_400_BAD_REQUEST ) raise HTTPException(status_code=status_code, detail=event_id) return {"event_id": event_id} @router.post("/cancel") async def cancel_event(body: CancelBody): await cancel_tasks({f"{body.session_hash}_{body.fn_index}"}) blocks = app.get_blocks() # Need to complete the job so that the client disconnects session_open = ( body.session_hash in blocks._queue.pending_messages_per_session ) event_running = ( body.event_id in blocks._queue.pending_event_ids_session.get(body.session_hash, {}) ) if session_open and event_running: message = ProcessCompletedMessage( output={}, success=True, event_id=body.event_id ) blocks._queue.pending_messages_per_session[ body.session_hash ].put_nowait(message) if body.event_id in app.iterators: async with app.lock: del app.iterators[body.event_id] app.iterators_to_reset.add(body.event_id) return {"success": True} @router.get("/call/{api_name}/{event_id}", dependencies=[Depends(login_check)]) async def simple_predict_get( request: fastapi.Request, event_id: str, ): def process_msg(message: EventMessage) -> str | None: msg = message.model_dump() if isinstance(message, ProcessCompletedMessage): event = "complete" if message.success else "error" data = msg["output"].get("data") elif isinstance(message, ProcessGeneratingMessage): event = "generating" if message.success else "error" data = msg["output"].get("data") elif isinstance(message, HeartbeatMessage): event = "heartbeat" data = None elif isinstance(message, UnexpectedErrorMessage): event = "error" data = message.message else: return None return f"event: {event}\ndata: {json.dumps(data)}\n\n" return await queue_data_helper(request, event_id, process_msg) @router.get("/queue/data", dependencies=[Depends(login_check)]) async def queue_data( request: fastapi.Request, session_hash: str, ): def process_msg(message: EventMessage) -> str: return f"data: {orjson.dumps(message.model_dump(), default=str).decode('utf-8')}\n\n" return await queue_data_helper(request, session_hash, process_msg) async def queue_data_helper( request: fastapi.Request, session_hash: str, process_msg: Callable[[EventMessage], str | None], ): blocks = app.get_blocks() async def sse_stream(request: fastapi.Request): try: last_heartbeat = time.perf_counter() while True: if await request.is_disconnected(): await blocks._queue.clean_events(session_hash=session_hash) return if ( session_hash not in blocks._queue.pending_messages_per_session ): raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Session not found.", ) heartbeat_rate = 15 check_rate = 0.05 message = None try: messages = blocks._queue.pending_messages_per_session[ session_hash ] message = messages.get_nowait() except EmptyQueue: await asyncio.sleep(check_rate) if time.perf_counter() - last_heartbeat > heartbeat_rate: # Fix this message = HeartbeatMessage() # Need to reset last_heartbeat with perf_counter # otherwise only a single hearbeat msg will be sent # and then the stream will retry leading to infinite queue 😬 last_heartbeat = time.perf_counter() if blocks._queue.stopped: message = UnexpectedErrorMessage( message="Server stopped unexpectedly.", success=False, ) if message: response = process_msg(message) if response is not None: yield response if ( isinstance(message, ProcessCompletedMessage) and message.event_id ): blocks._queue.pending_event_ids_session[ session_hash ].remove(message.event_id) if message.msg == ServerMessage.server_stopped or ( message.msg == ServerMessage.process_completed and ( len( blocks._queue.pending_event_ids_session[ session_hash ] ) == 0 ) ): message = CloseStreamMessage() response = process_msg(message) if response is not None: yield response return except BaseException as e: message = UnexpectedErrorMessage( message=str(e), ) response = process_msg(message) if isinstance(e, asyncio.CancelledError): del blocks._queue.pending_messages_per_session[session_hash] await blocks._queue.clean_events(session_hash=session_hash) if response is not None: yield response raise e return StreamingResponse( sse_stream(request), media_type="text/event-stream", ) async def get_item_or_file( request: fastapi.Request, ) -> Union[ComponentServerJSONBody, ComponentServerBlobBody]: content_type = request.headers.get("Content-Type") if isinstance(content_type, str) and content_type.startswith( "multipart/form-data" ): files = [] data = {} async with request.form() as form: for key, value in form.items(): if ( isinstance(value, list) and len(value) > 1 and isinstance(value[0], StarletteUploadFile) ): for i, v in enumerate(value): if isinstance(v, StarletteUploadFile): filename = v.filename contents = await v.read() files.append((filename, contents)) else: data[f"{key}-{i}"] = v elif isinstance(value, StarletteUploadFile): filename = value.filename contents = await value.read() files.append((filename, contents)) else: data[key] = value return ComponentServerBlobBody( data=DataWithFiles(data=data, files=files), component_id=data["component_id"], session_hash=data["session_hash"], fn_name=data["fn_name"], ) else: try: data = await request.json() return ComponentServerJSONBody( data=data["data"], component_id=data["component_id"], session_hash=data["session_hash"], fn_name=data["fn_name"], ) except Exception: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid JSON body.", ) from None @router.post( "/component_server", dependencies=[Depends(login_check)], ) @router.post( "/component_server/", dependencies=[Depends(login_check)], ) async def component_server( request: fastapi.Request, ): body = await get_item_or_file(request) state = app.state_holder[body.session_hash] component_id = body.component_id block: Block if component_id in state: block = state[component_id] else: block = app.get_blocks().blocks[component_id] fn = getattr(block, body.fn_name, None) if fn is None or not getattr(fn, "_is_server_fn", False): raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Function not found.", ) if inspect.iscoroutinefunction(fn): return await fn(body.data) else: return fn(body.data) @router.get( "/queue/status", dependencies=[Depends(login_check)], response_model=EstimationMessage, ) async def get_queue_status(): return app.get_blocks()._queue.get_status() @router.get("/upload_progress") def get_upload_progress(upload_id: str, request: fastapi.Request): async def sse_stream(request: fastapi.Request): last_heartbeat = time.perf_counter() is_done = False while True: if await request.is_disconnected(): file_upload_statuses.stop_tracking(upload_id) return if is_done: file_upload_statuses.stop_tracking(upload_id) return heartbeat_rate = 15 check_rate = 0.05 try: if file_upload_statuses.is_done(upload_id): message = {"msg": "done"} is_done = True else: update = file_upload_statuses.pop(upload_id) message = { "msg": "update", "orig_name": update.filename, "chunk_size": update.chunk_size, } yield f"data: {json.dumps(message)}\n\n" except FileUploadProgressNotTrackedError: return except FileUploadProgressNotQueuedError: await asyncio.sleep(check_rate) if time.perf_counter() - last_heartbeat > heartbeat_rate: message = {"msg": "heartbeat"} yield f"data: {json.dumps(message)}\n\n" last_heartbeat = time.perf_counter() return StreamingResponse( sse_stream(request), media_type="text/event-stream", ) @router.post("/upload", dependencies=[Depends(login_check)]) async def upload_file( request: fastapi.Request, bg_tasks: BackgroundTasks, upload_id: Optional[str] = None, ): content_type_header = request.headers.get("Content-Type") content_type: bytes content_type, _ = parse_options_header(content_type_header or "") if content_type != b"multipart/form-data": raise HTTPException(status_code=400, detail="Invalid content type.") try: if upload_id: file_upload_statuses.track(upload_id) max_file_size = app.get_blocks().max_file_size max_file_size = max_file_size if max_file_size is not None else math.inf multipart_parser = GradioMultiPartParser( request.headers, request.stream(), max_files=1000, max_fields=1000, max_file_size=max_file_size, upload_id=upload_id if upload_id else None, upload_progress=file_upload_statuses if upload_id else None, ) form = await multipart_parser.parse() except MultiPartException as exc: code = 413 if "maximum allowed size" in exc.message else 400 return PlainTextResponse(exc.message, status_code=code) output_files = [] files_to_copy = [] locations: list[str] = [] for temp_file in form.getlist("files"): if not isinstance(temp_file, GradioUploadFile): raise TypeError("File is not an instance of GradioUploadFile") if temp_file.filename: file_name = Path(temp_file.filename).name name = client_utils.strip_invalid_filename_characters(file_name) else: name = f"tmp{secrets.token_hex(5)}" directory = Path(app.uploaded_file_dir) / temp_file.sha.hexdigest() directory.mkdir(exist_ok=True, parents=True) try: dest = utils.safe_join( DeveloperPath(str(directory)), UserProvidedPath(name) ) except InvalidPathError as err: raise HTTPException( status_code=400, detail=f"Invalid file name: {name}" ) from err temp_file.file.close() # we need to move the temp file to the cache directory # but that's possibly blocking and we're in an async function # so we try to rename (this is what shutil.move tries first) # which should be super fast. # if that fails, we move in the background. try: os.rename(temp_file.file.name, dest) except OSError: files_to_copy.append(temp_file.file.name) locations.append(dest) output_files.append(dest) blocks.upload_file_set.add(dest) if files_to_copy: bg_tasks.add_task( move_uploaded_files_to_cache, files_to_copy, locations ) return output_files @router.get("/startup-events") async def startup_events(): if not app.startup_events_triggered: app.get_blocks().run_startup_events() await app.get_blocks().run_extra_startup_events() app.startup_events_triggered = True return True return False @router.get("/theme.css", response_class=PlainTextResponse) @app.get("/theme.css", response_class=PlainTextResponse) def theme_css(): return PlainTextResponse(app.get_blocks().theme_css, media_type="text/css") @app.get("/robots.txt", response_class=PlainTextResponse) def robots_txt(): if app.get_blocks().share: return "User-agent: *\nDisallow: /" else: return "User-agent: *\nDisallow: " @app.get("/pwa_icon") @app.get("/pwa_icon/{size}") async def pwa_icon(size: int | None = None): blocks = app.get_blocks() favicon_path = blocks.favicon_path if favicon_path is None: raise HTTPException(status_code=404) if size is None: return FileResponse(favicon_path) import PIL.Image img = PIL.Image.open(favicon_path) img = img.resize((size, size)) img_byte_array = io.BytesIO() img.save(img_byte_array, format="PNG") img_byte_array.seek(0) return StreamingResponse( io.BytesIO(img_byte_array.read()), media_type="image/png" ) @app.get("/manifest.json") def manifest_json(): if not blocks.pwa: raise HTTPException(status_code=404) favicon_path = blocks.favicon_path if favicon_path is None: icons = [ { "src": "static/img/logo_nosize.svg", "sizes": "any", "type": "image/svg+xml", "purpose": "any", }, ] elif favicon_path.endswith(".svg"): icons = [ { "src": app.url_path_for("pwa_icon"), "sizes": "any", "type": "image/svg+xml", "purpose": "any", }, ] else: icons = [ { "src": app.url_path_for("pwa_icon", size=192), "sizes": "192x192", "type": "image/png", "purpose": "any", }, { "src": app.url_path_for("pwa_icon", size=512), "sizes": "512x512", "type": "image/png", "purpose": "any", }, ] return ORJSONResponse( content={ # NOTE: Required members: https://developer.mozilla.org/en-US/docs/Web/Progressive_web_apps/Guides/Making_PWAs_installable#required_manifest_members "name": app.get_blocks().title or "Gradio", "icons": icons, "start_url": "./", "display": "standalone", }, media_type="application/manifest+json", ) @router.get("/monitoring", dependencies=[Depends(login_check)]) async def analytics_login(request: fastapi.Request): if not blocks.enable_monitoring: raise HTTPException( status_code=403, detail="Monitoring is not enabled." ) root_url = route_utils.get_root_url( request=request, route_path=f"{API_PREFIX}/monitoring", root_path=app.root_path, ) monitoring_url = f"{root_url}/monitoring/{app.analytics_key}" print(f"* Monitoring URL: {monitoring_url} *") return HTMLResponse("See console for monitoring URL.") @router.get("/monitoring/{key}") async def analytics_dashboard(key: str): if not blocks.enable_monitoring: raise HTTPException( status_code=403, detail="Monitoring is not enabled." ) if compare_passwords_securely(key, app.analytics_key): analytics_url = f"/monitoring/{app.analytics_key}/dashboard" if not app.monitoring_enabled: from gradio.monitoring_dashboard import data from gradio.monitoring_dashboard import demo as dashboard mount_gradio_app(app, dashboard, path=analytics_url) dashboard._queue.start() analytics = app.get_blocks()._queue.event_analytics data["data"] = analytics app.monitoring_enabled = True return RedirectResponse( url=analytics_url, status_code=status.HTTP_302_FOUND ) else: raise HTTPException(status_code=403, detail="Invalid key.") app.include_router(router) return app
Clients make a persistent connection to this endpoint to keep the session alive. When the client disconnects, the session state is deleted.
create_app
python
gradio-app/gradio
gradio/routes.py
https://github.com/gradio-app/gradio/blob/master/gradio/routes.py
Apache-2.0
def routes_safe_join(directory: DeveloperPath, path: UserProvidedPath) -> str: """Safely join the user path to the directory while performing some additional http-related checks, e.g. ensuring that the full path exists on the local file system and is not a directory """ if path == "": raise fastapi.HTTPException(400) if route_utils.starts_with_protocol(path): raise fastapi.HTTPException(403) try: fullpath = Path(utils.safe_join(directory, path)) except InvalidPathError as e: raise fastapi.HTTPException(403) from e if fullpath.is_dir(): raise fastapi.HTTPException(403) if not fullpath.exists(): raise fastapi.HTTPException(404) return str(fullpath)
Safely join the user path to the directory while performing some additional http-related checks, e.g. ensuring that the full path exists on the local file system and is not a directory
routes_safe_join
python
gradio-app/gradio
gradio/routes.py
https://github.com/gradio-app/gradio/blob/master/gradio/routes.py
Apache-2.0
def mount_gradio_app( app: fastapi.FastAPI, blocks: gradio.Blocks, path: str, server_name: str = "0.0.0.0", server_port: int = 7860, show_api: bool | None = None, app_kwargs: dict[str, Any] | None = None, *, auth: Callable | tuple[str, str] | list[tuple[str, str]] | None = None, auth_message: str | None = None, auth_dependency: Callable[[fastapi.Request], str | None] | None = None, root_path: str | None = None, allowed_paths: list[str] | None = None, blocked_paths: list[str] | None = None, favicon_path: str | None = None, show_error: bool = True, max_file_size: str | int | None = None, ssr_mode: bool | None = None, node_server_name: str | None = None, node_port: int | None = None, enable_monitoring: bool | None = None, pwa: bool | None = None, ) -> fastapi.FastAPI: """Mount a gradio.Blocks to an existing FastAPI application. Parameters: app: The parent FastAPI application. blocks: The blocks object we want to mount to the parent app. path: The path at which the gradio application will be mounted, e.g. "/gradio". server_name: The server name on which the Gradio app will be run. server_port: The port on which the Gradio app will be run. app_kwargs: Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{"docs_url": "/docs"}` auth: If provided, username and password (or list of username-password tuples) required to access the gradio app. Can also provide function that takes username and password and returns True if valid login. auth_message: If provided, HTML message provided on login page for this gradio app. auth_dependency: A function that takes a FastAPI request and returns a string user ID or None. If the function returns None for a specific request, that user is not authorized to access the gradio app (they will see a 401 Unauthorized response). To be used with external authentication systems like OAuth. Cannot be used with `auth`. root_path: The subpath corresponding to the public deployment of this FastAPI application. For example, if the application is served at "https://example.com/myapp", the `root_path` should be set to "/myapp". A full URL beginning with http:// or https:// can be provided, which will be used in its entirety. Normally, this does not need to provided (even if you are using a custom `path`). However, if you are serving the FastAPI app behind a proxy, the proxy may not provide the full path to the Gradio app in the request headers. In which case, you can provide the root path here. allowed_paths: List of complete filepaths or parent directories that this gradio app is allowed to serve. Must be absolute paths. Warning: if you provide directories, any files in these directories or their subdirectories are accessible to all users of your app. blocked_paths: List of complete filepaths or parent directories that this gradio app is not allowed to serve (i.e. users of your app are not allowed to access). Must be absolute paths. Warning: takes precedence over `allowed_paths` and all other directories exposed by Gradio by default. favicon_path: If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for this gradio app's page. show_error: If True, any errors in the gradio app will be displayed in an alert modal and printed in the browser console log. Otherwise, errors will only be visible in the terminal session running the Gradio app. max_file_size: The maximum file size in bytes that can be uploaded. Can be a string of the form "<value><unit>", where value is any positive integer and unit is one of "b", "kb", "mb", "gb", "tb". If None, no limit is set. show_api: If False, hides the "Use via API" button on the Gradio interface. ssr_mode: If True, the Gradio app will be rendered using server-side rendering mode, which is typically more performant and provides better SEO, but this requires Node 20+ to be installed on the system. If False, the app will be rendered using client-side rendering mode. If None, will use GRADIO_SSR_MODE environment variable or default to False. node_server_name: The name of the Node server to use for SSR. If None, will use GRADIO_NODE_SERVER_NAME environment variable or search for a node binary in the system. node_port: The port on which the Node server should run. If None, will use GRADIO_NODE_SERVER_PORT environment variable or find a free port. Example: from fastapi import FastAPI import gradio as gr app = FastAPI() @app.get("/") def read_main(): return {"message": "This is your main app"} io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox") app = gr.mount_gradio_app(app, io, path="/gradio") # Then run `uvicorn run:app` from the terminal and navigate to http://localhost:8000/gradio. """ if favicon_path is not None and path != "/": warnings.warn( "The 'favicon_path' parameter is set but will be ignored because 'path' is not '/'. " "Please add the favicon directly to your FastAPI app." ) blocks.dev_mode = False if show_api is not None: blocks.show_api = show_api blocks.max_file_size = utils._parse_file_size(max_file_size) blocks.config = blocks.get_config_file() blocks.validate_queue_settings() blocks.custom_mount_path = path blocks.server_port = server_port blocks.server_name = server_name blocks.enable_monitoring = enable_monitoring if pwa is not None: blocks.pwa = pwa if auth is not None and auth_dependency is not None: raise ValueError( "You cannot provide both `auth` and `auth_dependency` in mount_gradio_app(). Please choose one." ) if ( auth and not callable(auth) and not isinstance(auth[0], tuple) and not isinstance(auth[0], list) ): blocks.auth = [auth] else: blocks.auth = auth blocks.auth_message = auth_message blocks.favicon_path = favicon_path blocks.allowed_paths = allowed_paths or [] blocks.blocked_paths = blocked_paths or [] blocks.show_error = show_error if not isinstance(blocks.allowed_paths, list): raise ValueError("`allowed_paths` must be a list of directories.") if not isinstance(blocks.blocked_paths, list): raise ValueError("`blocked_paths` must be a list of directories.") if root_path is not None: blocks.root_path = root_path blocks.ssr_mode = ( False if wasm_utils.IS_WASM else ( ssr_mode if ssr_mode is not None else os.getenv("GRADIO_SSR_MODE", "False").lower() == "true" ) ) if blocks.ssr_mode: blocks.node_path = os.environ.get( "GRADIO_NODE_PATH", "" if wasm_utils.IS_WASM else get_node_path() ) blocks.node_server_name = node_server_name blocks.node_port = node_port blocks.node_server_name, blocks.node_process, blocks.node_port = ( start_node_server( server_name=blocks.node_server_name, server_port=blocks.node_port, node_path=blocks.node_path, ) ) gradio_app = App.create_app( blocks, app_kwargs=app_kwargs, auth_dependency=auth_dependency, ssr_mode=blocks.ssr_mode, ) old_lifespan = app.router.lifespan_context @contextlib.asynccontextmanager async def new_lifespan(app: FastAPI): async with old_lifespan( app ): # Insert the startup events inside the FastAPI context manager async with gradio_app.router.lifespan_context(gradio_app): gradio_app.get_blocks().run_startup_events() await gradio_app.get_blocks().run_extra_startup_events() yield app.router.lifespan_context = new_lifespan app.mount(path, gradio_app) return app
Mount a gradio.Blocks to an existing FastAPI application. Parameters: app: The parent FastAPI application. blocks: The blocks object we want to mount to the parent app. path: The path at which the gradio application will be mounted, e.g. "/gradio". server_name: The server name on which the Gradio app will be run. server_port: The port on which the Gradio app will be run. app_kwargs: Additional keyword arguments to pass to the underlying FastAPI app as a dictionary of parameter keys and argument values. For example, `{"docs_url": "/docs"}` auth: If provided, username and password (or list of username-password tuples) required to access the gradio app. Can also provide function that takes username and password and returns True if valid login. auth_message: If provided, HTML message provided on login page for this gradio app. auth_dependency: A function that takes a FastAPI request and returns a string user ID or None. If the function returns None for a specific request, that user is not authorized to access the gradio app (they will see a 401 Unauthorized response). To be used with external authentication systems like OAuth. Cannot be used with `auth`. root_path: The subpath corresponding to the public deployment of this FastAPI application. For example, if the application is served at "https://example.com/myapp", the `root_path` should be set to "/myapp". A full URL beginning with http:// or https:// can be provided, which will be used in its entirety. Normally, this does not need to provided (even if you are using a custom `path`). However, if you are serving the FastAPI app behind a proxy, the proxy may not provide the full path to the Gradio app in the request headers. In which case, you can provide the root path here. allowed_paths: List of complete filepaths or parent directories that this gradio app is allowed to serve. Must be absolute paths. Warning: if you provide directories, any files in these directories or their subdirectories are accessible to all users of your app. blocked_paths: List of complete filepaths or parent directories that this gradio app is not allowed to serve (i.e. users of your app are not allowed to access). Must be absolute paths. Warning: takes precedence over `allowed_paths` and all other directories exposed by Gradio by default. favicon_path: If a path to a file (.png, .gif, or .ico) is provided, it will be used as the favicon for this gradio app's page. show_error: If True, any errors in the gradio app will be displayed in an alert modal and printed in the browser console log. Otherwise, errors will only be visible in the terminal session running the Gradio app. max_file_size: The maximum file size in bytes that can be uploaded. Can be a string of the form "<value><unit>", where value is any positive integer and unit is one of "b", "kb", "mb", "gb", "tb". If None, no limit is set. show_api: If False, hides the "Use via API" button on the Gradio interface. ssr_mode: If True, the Gradio app will be rendered using server-side rendering mode, which is typically more performant and provides better SEO, but this requires Node 20+ to be installed on the system. If False, the app will be rendered using client-side rendering mode. If None, will use GRADIO_SSR_MODE environment variable or default to False. node_server_name: The name of the Node server to use for SSR. If None, will use GRADIO_NODE_SERVER_NAME environment variable or search for a node binary in the system. node_port: The port on which the Node server should run. If None, will use GRADIO_NODE_SERVER_PORT environment variable or find a free port. Example: from fastapi import FastAPI import gradio as gr app = FastAPI() @app.get("/") def read_main(): return {"message": "This is your main app"} io = gr.Interface(lambda x: "Hello, " + x + "!", "textbox", "textbox") app = gr.mount_gradio_app(app, io, path="/gradio") # Then run `uvicorn run:app` from the terminal and navigate to http://localhost:8000/gradio.
mount_gradio_app
python
gradio-app/gradio
gradio/routes.py
https://github.com/gradio-app/gradio/blob/master/gradio/routes.py
Apache-2.0
def __init__( self, fn: Callable, *, multimodal: bool = False, type: Literal["messages", "tuples"] | None = None, chatbot: Chatbot | None = None, textbox: Textbox | MultimodalTextbox | None = None, additional_inputs: str | Component | list[str | Component] | None = None, additional_inputs_accordion: str | Accordion | None = None, additional_outputs: Component | list[Component] | None = None, editable: bool = False, examples: list[str] | list[MultimodalValue] | list[list] | None = None, example_labels: list[str] | None = None, example_icons: list[str] | None = None, run_examples_on_click: bool = True, cache_examples: bool | None = None, cache_mode: Literal["eager", "lazy"] | None = None, title: str | None = None, description: str | None = None, theme: Theme | str | None = None, flagging_mode: Literal["never", "manual"] | None = None, flagging_options: list[str] | tuple[str, ...] | None = ("Like", "Dislike"), flagging_dir: str = ".gradio/flagged", css: str | None = None, css_paths: str | Path | Sequence[str | Path] | None = None, js: str | Literal[True] | None = None, head: str | None = None, head_paths: str | Path | Sequence[str | Path] | None = None, analytics_enabled: bool | None = None, autofocus: bool = True, autoscroll: bool = True, submit_btn: str | bool | None = True, stop_btn: str | bool | None = True, concurrency_limit: int | None | Literal["default"] = "default", delete_cache: tuple[int, int] | None = None, show_progress: Literal["full", "minimal", "hidden"] = "minimal", fill_height: bool = True, fill_width: bool = False, api_name: str | Literal[False] = "chat", save_history: bool = False, ): """ Parameters: fn: the function to wrap the chat interface around. Normally (assuming `type` is set to "messages"), the function should accept two parameters: a `str` representing the input message and `list` of openai-style dictionaries: {"role": "user" | "assistant", "content": `str` | {"path": `str`} | `gr.Component`} representing the chat history. The function should return/yield a `str` (for a simple message), a supported Gradio component (e.g. gr.Image to return an image), a `dict` (for a complete openai-style message response), or a `list` of such messages. multimodal: if True, the chat interface will use a `gr.MultimodalTextbox` component for the input, which allows for the uploading of multimedia files. If False, the chat interface will use a gr.Textbox component for the input. If this is True, the first argument of `fn` should accept not a `str` message but a `dict` message with keys "text" and "files" type: The format of the messages passed into the chat history parameter of `fn`. If "messages", passes the history as a list of dictionaries with openai-style "role" and "content" keys. The "content" key's value should be one of the following - (1) strings in valid Markdown (2) a dictionary with a "path" key and value corresponding to the file to display or (3) an instance of a Gradio component: at the moment gr.Image, gr.Plot, gr.Video, gr.Gallery, gr.Audio, and gr.HTML are supported. The "role" key should be one of 'user' or 'assistant'. Any other roles will not be displayed in the output. If this parameter is 'tuples' (deprecated), passes the chat history as a `list[list[str | None | tuple]]`, i.e. a list of lists. The inner list should have 2 elements: the user message and the response message. chatbot: an instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created. textbox: an instance of the gr.Textbox or gr.MultimodalTextbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox or gr.MultimodalTextbox component will be created. editable: if True, users can edit past messages to regenerate responses. additional_inputs: an instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If the components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion. The values of these components will be passed into `fn` as arguments in order after the chat history. additional_inputs_accordion: if a string is provided, this is the label of the `gr.Accordion` to use to contain additional inputs. A `gr.Accordion` object can be provided as well to configure other properties of the container holding the additional inputs. Defaults to a `gr.Accordion(label="Additional Inputs", open=False)`. This parameter is only used if `additional_inputs` is provided. additional_outputs: an instance or list of instances of gradio components to use as additional outputs from the chat function. These must be components that are already defined in the same Blocks scope. If provided, the chat function should return additional values for these components. See $demo/chatinterface_artifacts. examples: sample inputs for the function; if provided, appear within the chatbot and can be clicked to populate the chatbot input. Should be a list of strings representing text-only examples, or a list of dictionaries (with keys `text` and `files`) representing multimodal examples. If `additional_inputs` are provided, the examples must be a list of lists, where the first element of each inner list is the string or dictionary example message and the remaining elements are the example values for the additional inputs -- in this case, the examples will appear under the chatbot. example_labels: labels for the examples, to be displayed instead of the examples themselves. If provided, should be a list of strings with the same length as the examples list. Only applies when examples are displayed within the chatbot (i.e. when `additional_inputs` is not provided). example_icons: icons for the examples, to be displayed above the examples. If provided, should be a list of string URLs or local paths with the same length as the examples list. Only applies when examples are displayed within the chatbot (i.e. when `additional_inputs` is not provided). cache_examples: if True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False. cache_mode: if "eager", all examples are cached at app launch. If "lazy", examples are cached for all users after the first use by any user of the app. If None, will use the GRADIO_CACHE_MODE environment variable if defined, or default to "eager". run_examples_on_click: if True, clicking on an example will run the example through the chatbot fn and the response will be displayed in the chatbot. If False, clicking on an example will only populate the chatbot input with the example message. Has no effect if `cache_examples` is True title: a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window. description: a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content. theme: a Theme object or a string representing a theme. If a string, will look for a built-in theme with that name (e.g. "soft" or "default"), or will attempt to load a theme from the Hugging Face Hub (e.g. "gradio/monochrome"). If None, will use the Default theme. flagging_mode: one of "never", "manual". If "never", users will not see a button to flag an input and output. If "manual", users will see a button to flag. flagging_options: a list of strings representing the options that users can choose from when flagging a message. Defaults to ["Like", "Dislike"]. These two case-sensitive strings will render as "thumbs up" and "thumbs down" icon respectively next to each bot message, but any other strings appear under a separate flag icon. flagging_dir: path to the the directory where flagged data is stored. If the directory does not exist, it will be created. css: Custom css as a code string. This css will be included in the demo webpage. css_paths: Custom css as a pathlib.Path to a css file or a list of such paths. This css files will be read, concatenated, and included in the demo webpage. If the `css` parameter is also set, the css from `css` will be included first. js: Custom js as a code string. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside <script> tags. head: Custom html code to insert into the head of the demo webpage. This can be used to add custom meta tags, multiple scripts, stylesheets, etc. to the page. head_paths: Custom html code as a pathlib.Path to a html file or a list of such paths. This html files will be read, concatenated, and included in the head of the demo webpage. If the `head` parameter is also set, the html from `head` will be included first. analytics_enabled: whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True. autofocus: if True, autofocuses to the textbox when the page loads. autoscroll: If True, will automatically scroll to the bottom of the chatbot when a new message appears, unless the user scrolls up. If False, will not scroll to the bottom of the chatbot automatically. submit_btn: If True, will show a submit button with a submit icon within the textbox. If a string, will use that string as the submit button text in place of the icon. If False, will not show a submit button. stop_btn: If True, will show a button with a stop icon during generator executions, to stop generating. If a string, will use that string as the submit button text in place of the stop icon. If False, will not show a stop button. concurrency_limit: if set, this is the maximum number of chatbot submissions that can be running simultaneously. Can be set to None to mean no limit (any number of chatbot submissions can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `.queue()`, which is 1 by default). delete_cache: a tuple corresponding [frequency, age] both expressed in number of seconds. Every `frequency` seconds, the temporary files created by this Blocks instance will be deleted if more than `age` seconds have passed since the file was created. For example, setting this to (86400, 86400) will delete temporary files every day. The cache will be deleted entirely when the server restarts. If None, no cache deletion will occur. show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all fill_height: if True, the chat interface will expand to the height of window. fill_width: Whether to horizontally expand to fill container fully. If False, centers and constrains app to a maximum width. api_name: the name of the API endpoint to use for the chat interface. Defaults to "chat". Set to False to disable the API endpoint. save_history: if True, will save the chat history to the browser's local storage and display previous conversations in a side panel. """ super().__init__( analytics_enabled=analytics_enabled, mode="chat_interface", title=title or "Gradio", theme=theme, css=css, css_paths=css_paths, js=js, head=head, head_paths=head_paths, fill_height=fill_height, fill_width=fill_width, delete_cache=delete_cache, ) self.api_name = api_name self.type = type self.multimodal = multimodal self.concurrency_limit = concurrency_limit if isinstance(fn, ChatInterface): self.fn = fn.fn else: self.fn = fn self.is_async = inspect.iscoroutinefunction( self.fn ) or inspect.isasyncgenfunction(self.fn) self.is_generator = inspect.isgeneratorfunction( self.fn ) or inspect.isasyncgenfunction(self.fn) self.provided_chatbot = chatbot is not None self.examples = examples self.examples_messages = self._setup_example_messages( examples, example_labels, example_icons ) self.run_examples_on_click = run_examples_on_click self.cache_examples = cache_examples self.cache_mode = cache_mode self.editable = editable self.fill_height = fill_height self.autoscroll = autoscroll self.autofocus = autofocus self.title = title self.description = description self.show_progress = show_progress if save_history and not type == "messages": raise ValueError("save_history is only supported for type='messages'") self.save_history = save_history self.additional_inputs = [ get_component_instance(i) for i in utils.none_or_singleton_to_list(additional_inputs) ] self.additional_outputs = utils.none_or_singleton_to_list(additional_outputs) if additional_inputs_accordion is None: self.additional_inputs_accordion_params = { "label": "Additional Inputs", "open": False, } elif isinstance(additional_inputs_accordion, str): self.additional_inputs_accordion_params = { "label": additional_inputs_accordion } elif isinstance(additional_inputs_accordion, Accordion): self.additional_inputs_accordion_params = ( additional_inputs_accordion.recover_kwargs( additional_inputs_accordion.get_config() ) ) else: raise ValueError( f"The `additional_inputs_accordion` parameter must be a string or gr.Accordion, not {builtins.type(additional_inputs_accordion)}" ) self._additional_inputs_in_examples = False if self.additional_inputs and self.examples is not None: for example in self.examples: if not isinstance(example, list): raise ValueError( "Examples must be a list of lists when additional inputs are provided." ) for idx, example_for_input in enumerate(example): if example_for_input is not None and idx > 0: self._additional_inputs_in_examples = True break if self._additional_inputs_in_examples: break if flagging_mode is None: flagging_mode = os.getenv("GRADIO_CHAT_FLAGGING_MODE", "never") # type: ignore if flagging_mode in ["manual", "never"]: self.flagging_mode = flagging_mode else: raise ValueError( "Invalid value for `flagging_mode` parameter." "Must be: 'manual' or 'never'." ) self.flagging_options = flagging_options self.flagging_dir = flagging_dir with self: self.saved_conversations = BrowserState( [], storage_key=f"_saved_conversations_{self._id}" ) self.conversation_id = State(None) self.saved_input = State() # Stores the most recent user message self.null_component = State() # Used to discard unneeded values with Column(): self._render_header() if self.save_history: with Row(scale=1): self._render_history_area() with Column(scale=6): self._render_chatbot_area( chatbot, textbox, submit_btn, stop_btn ) self._render_footer() else: self._render_chatbot_area(chatbot, textbox, submit_btn, stop_btn) self._render_footer() self._setup_events()
Parameters: fn: the function to wrap the chat interface around. Normally (assuming `type` is set to "messages"), the function should accept two parameters: a `str` representing the input message and `list` of openai-style dictionaries: {"role": "user" | "assistant", "content": `str` | {"path": `str`} | `gr.Component`} representing the chat history. The function should return/yield a `str` (for a simple message), a supported Gradio component (e.g. gr.Image to return an image), a `dict` (for a complete openai-style message response), or a `list` of such messages. multimodal: if True, the chat interface will use a `gr.MultimodalTextbox` component for the input, which allows for the uploading of multimedia files. If False, the chat interface will use a gr.Textbox component for the input. If this is True, the first argument of `fn` should accept not a `str` message but a `dict` message with keys "text" and "files" type: The format of the messages passed into the chat history parameter of `fn`. If "messages", passes the history as a list of dictionaries with openai-style "role" and "content" keys. The "content" key's value should be one of the following - (1) strings in valid Markdown (2) a dictionary with a "path" key and value corresponding to the file to display or (3) an instance of a Gradio component: at the moment gr.Image, gr.Plot, gr.Video, gr.Gallery, gr.Audio, and gr.HTML are supported. The "role" key should be one of 'user' or 'assistant'. Any other roles will not be displayed in the output. If this parameter is 'tuples' (deprecated), passes the chat history as a `list[list[str | None | tuple]]`, i.e. a list of lists. The inner list should have 2 elements: the user message and the response message. chatbot: an instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created. textbox: an instance of the gr.Textbox or gr.MultimodalTextbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox or gr.MultimodalTextbox component will be created. editable: if True, users can edit past messages to regenerate responses. additional_inputs: an instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If the components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion. The values of these components will be passed into `fn` as arguments in order after the chat history. additional_inputs_accordion: if a string is provided, this is the label of the `gr.Accordion` to use to contain additional inputs. A `gr.Accordion` object can be provided as well to configure other properties of the container holding the additional inputs. Defaults to a `gr.Accordion(label="Additional Inputs", open=False)`. This parameter is only used if `additional_inputs` is provided. additional_outputs: an instance or list of instances of gradio components to use as additional outputs from the chat function. These must be components that are already defined in the same Blocks scope. If provided, the chat function should return additional values for these components. See $demo/chatinterface_artifacts. examples: sample inputs for the function; if provided, appear within the chatbot and can be clicked to populate the chatbot input. Should be a list of strings representing text-only examples, or a list of dictionaries (with keys `text` and `files`) representing multimodal examples. If `additional_inputs` are provided, the examples must be a list of lists, where the first element of each inner list is the string or dictionary example message and the remaining elements are the example values for the additional inputs -- in this case, the examples will appear under the chatbot. example_labels: labels for the examples, to be displayed instead of the examples themselves. If provided, should be a list of strings with the same length as the examples list. Only applies when examples are displayed within the chatbot (i.e. when `additional_inputs` is not provided). example_icons: icons for the examples, to be displayed above the examples. If provided, should be a list of string URLs or local paths with the same length as the examples list. Only applies when examples are displayed within the chatbot (i.e. when `additional_inputs` is not provided). cache_examples: if True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False. cache_mode: if "eager", all examples are cached at app launch. If "lazy", examples are cached for all users after the first use by any user of the app. If None, will use the GRADIO_CACHE_MODE environment variable if defined, or default to "eager". run_examples_on_click: if True, clicking on an example will run the example through the chatbot fn and the response will be displayed in the chatbot. If False, clicking on an example will only populate the chatbot input with the example message. Has no effect if `cache_examples` is True title: a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window. description: a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content. theme: a Theme object or a string representing a theme. If a string, will look for a built-in theme with that name (e.g. "soft" or "default"), or will attempt to load a theme from the Hugging Face Hub (e.g. "gradio/monochrome"). If None, will use the Default theme. flagging_mode: one of "never", "manual". If "never", users will not see a button to flag an input and output. If "manual", users will see a button to flag. flagging_options: a list of strings representing the options that users can choose from when flagging a message. Defaults to ["Like", "Dislike"]. These two case-sensitive strings will render as "thumbs up" and "thumbs down" icon respectively next to each bot message, but any other strings appear under a separate flag icon. flagging_dir: path to the the directory where flagged data is stored. If the directory does not exist, it will be created. css: Custom css as a code string. This css will be included in the demo webpage. css_paths: Custom css as a pathlib.Path to a css file or a list of such paths. This css files will be read, concatenated, and included in the demo webpage. If the `css` parameter is also set, the css from `css` will be included first. js: Custom js as a code string. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside <script> tags. head: Custom html code to insert into the head of the demo webpage. This can be used to add custom meta tags, multiple scripts, stylesheets, etc. to the page. head_paths: Custom html code as a pathlib.Path to a html file or a list of such paths. This html files will be read, concatenated, and included in the head of the demo webpage. If the `head` parameter is also set, the html from `head` will be included first. analytics_enabled: whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True. autofocus: if True, autofocuses to the textbox when the page loads. autoscroll: If True, will automatically scroll to the bottom of the chatbot when a new message appears, unless the user scrolls up. If False, will not scroll to the bottom of the chatbot automatically. submit_btn: If True, will show a submit button with a submit icon within the textbox. If a string, will use that string as the submit button text in place of the icon. If False, will not show a submit button. stop_btn: If True, will show a button with a stop icon during generator executions, to stop generating. If a string, will use that string as the submit button text in place of the stop icon. If False, will not show a stop button. concurrency_limit: if set, this is the maximum number of chatbot submissions that can be running simultaneously. Can be set to None to mean no limit (any number of chatbot submissions can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `.queue()`, which is 1 by default). delete_cache: a tuple corresponding [frequency, age] both expressed in number of seconds. Every `frequency` seconds, the temporary files created by this Blocks instance will be deleted if more than `age` seconds have passed since the file was created. For example, setting this to (86400, 86400) will delete temporary files every day. The cache will be deleted entirely when the server restarts. If None, no cache deletion will occur. show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all fill_height: if True, the chat interface will expand to the height of window. fill_width: Whether to horizontally expand to fill container fully. If False, centers and constrains app to a maximum width. api_name: the name of the API endpoint to use for the chat interface. Defaults to "chat". Set to False to disable the API endpoint. save_history: if True, will save the chat history to the browser's local storage and display previous conversations in a side panel.
__init__
python
gradio-app/gradio
gradio/chat_interface.py
https://github.com/gradio-app/gradio/blob/master/gradio/chat_interface.py
Apache-2.0
def _generate_chat_title(self, conversation: list[MessageDict]) -> str: """ Generate a title for a conversation by taking the first user message that is a string and truncating it to 40 characters. If files are present, add a 📎 to the title. """ title = "" for message in conversation: if message["role"] == "user": if isinstance(message["content"], str): title += message["content"] break else: title += "📎 " if len(title) > 40: title = title[:40] + "..." return title or "Conversation"
Generate a title for a conversation by taking the first user message that is a string and truncating it to 40 characters. If files are present, add a 📎 to the title.
_generate_chat_title
python
gradio-app/gradio
gradio/chat_interface.py
https://github.com/gradio-app/gradio/blob/master/gradio/chat_interface.py
Apache-2.0
def _message_as_message_dict( self, message: MessageDict | Message | str | Component | MultimodalPostprocess | list, role: Literal["user", "assistant"], ) -> list[MessageDict]: """ Converts a user message, example message, or response from the chat function to a list of MessageDict objects that can be appended to the chat history. """ message_dicts = [] if not isinstance(message, list): message = [message] for msg in message: if isinstance(msg, Message): message_dicts.append(msg.model_dump()) elif isinstance(msg, ChatMessage): msg.role = role message_dicts.append( dataclasses.asdict(msg, dict_factory=utils.dict_factory) ) elif isinstance(msg, (str, Component)): message_dicts.append({"role": role, "content": msg}) elif ( isinstance(msg, dict) and "content" in msg ): # in MessageDict format already msg["role"] = role message_dicts.append(msg) else: # in MultimodalPostprocess format for x in msg.get("files", []): if isinstance(x, dict): x = x.get("path") message_dicts.append({"role": role, "content": (x,)}) if msg["text"] is None or not isinstance(msg["text"], str): pass else: message_dicts.append({"role": role, "content": msg["text"]}) return message_dicts
Converts a user message, example message, or response from the chat function to a list of MessageDict objects that can be appended to the chat history.
_message_as_message_dict
python
gradio-app/gradio
gradio/chat_interface.py
https://github.com/gradio-app/gradio/blob/master/gradio/chat_interface.py
Apache-2.0
def option_clicked( self, history: list[MessageDict], option: SelectData ) -> tuple[TupleFormat | list[MessageDict], str | MultimodalPostprocess]: """ When an option is clicked, the chat history is appended with the option value. The saved input value is also set to option value. Note that event can only be called if self.type is "messages" since options are only available for this chatbot type. """ history.append({"role": "user", "content": option.value}) return history, option.value
When an option is clicked, the chat history is appended with the option value. The saved input value is also set to option value. Note that event can only be called if self.type is "messages" since options are only available for this chatbot type.
option_clicked
python
gradio-app/gradio
gradio/chat_interface.py
https://github.com/gradio-app/gradio/blob/master/gradio/chat_interface.py
Apache-2.0
def _flatten_example_files(self, example: SelectData): """ Returns an example with the files flattened to just the file path. Also ensures that the `files` key is always present in the example. """ example.value["files"] = [f["path"] for f in example.value.get("files", [])] return example
Returns an example with the files flattened to just the file path. Also ensures that the `files` key is always present in the example.
_flatten_example_files
python
gradio-app/gradio
gradio/chat_interface.py
https://github.com/gradio-app/gradio/blob/master/gradio/chat_interface.py
Apache-2.0
def _pop_last_user_message( self, history: list[MessageDict] | TupleFormat, ) -> tuple[list[MessageDict] | TupleFormat, str | MultimodalPostprocess]: """ Removes the message (or set of messages) that the user last sent from the chat history and returns them. If self.multimodal is True, returns a MultimodalPostprocess (dict) object with text and files. If self.multimodal is False, returns just the message text as a string. """ if not history: return history, "" if not self.multimodal else {"text": "", "files": []} if self.type == "tuples": history = self._tuples_to_messages(history) # type: ignore i = len(history) - 1 while i >= 0 and history[i]["role"] == "assistant": # type: ignore i -= 1 while i >= 0 and history[i]["role"] == "user": # type: ignore i -= 1 last_messages = history[i + 1 :] last_user_message = "" files = [] for msg in last_messages: assert isinstance(msg, dict) # noqa: S101 if msg["role"] == "user": content = msg["content"] if isinstance(content, tuple): files.append(content[0]) else: last_user_message = content return_message = ( {"text": last_user_message, "files": files} if self.multimodal else last_user_message ) history_ = history[: i + 1] if self.type == "tuples": history_ = self._messages_to_tuples(history_) # type: ignore return history_, return_message # type: ignore
Removes the message (or set of messages) that the user last sent from the chat history and returns them. If self.multimodal is True, returns a MultimodalPostprocess (dict) object with text and files. If self.multimodal is False, returns just the message text as a string.
_pop_last_user_message
python
gradio-app/gradio
gradio/chat_interface.py
https://github.com/gradio-app/gradio/blob/master/gradio/chat_interface.py
Apache-2.0
def constructor_args(self) -> dict[str, Any]: """Get the arguments passed to the component's initializer. Only set classes whose metaclass is ComponentMeta """ # the _constructor_args list is appended based on the mro of the class # so the first entry is for the bottom of the hierarchy return self._constructor_args[0] if self._constructor_args else {}
Get the arguments passed to the component's initializer. Only set classes whose metaclass is ComponentMeta
constructor_args
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def render(self): """ Adds self into appropriate BlockContext """ root_context = get_blocks_context() render_context = get_render_context() self.rendered_in = LocalContext.renderable.get() if root_context is not None and self._id in root_context.blocks: raise DuplicateBlockError( f"A block with id: {self._id} has already been rendered in the current Blocks." ) if render_context is not None: if root_context: self.page = root_context.root_block.current_page render_context.add(self) if root_context is not None: root_context.blocks[self._id] = self self.is_rendered = True if isinstance(self, components.Component): root_context.root_block.temp_file_sets.append(self.temp_files) return self
Adds self into appropriate BlockContext
render
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def unrender(self): """ Removes self from BlockContext if it has been rendered (otherwise does nothing). Removes self from the layout and collection of blocks, but does not delete any event triggers. """ root_context = get_blocks_context() render_context = get_render_context() if render_context is not None: try: render_context.children.remove(self) except ValueError: pass if root_context is not None: try: del root_context.blocks[self._id] self.is_rendered = False except KeyError: pass return self
Removes self from BlockContext if it has been rendered (otherwise does nothing). Removes self from the layout and collection of blocks, but does not delete any event triggers.
unrender
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def get_block_name(self) -> str: """ Gets block's class name. If it is template component it gets the parent's class name. This is used to identify the Svelte file to use in the frontend. Override this method if a component should use a different Svelte file than the default naming convention. """ return ( self.__class__.__base__.__name__.lower() # type: ignore if hasattr(self, "is_template") else self.__class__.__name__.lower() )
Gets block's class name. If it is template component it gets the parent's class name. This is used to identify the Svelte file to use in the frontend. Override this method if a component should use a different Svelte file than the default naming convention.
get_block_name
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def get_block_class(self) -> str: """ Gets block's class name. If it is template component it gets the parent's class name. Very similar to the get_block_name method, but this method is used to reconstruct a Gradio app that is loaded from a Space using gr.load(). This should generally NOT be overridden. """ return ( self.__class__.__base__.__name__.lower() # type: ignore if hasattr(self, "is_template") else self.__class__.__name__.lower() )
Gets block's class name. If it is template component it gets the parent's class name. Very similar to the get_block_name method, but this method is used to reconstruct a Gradio app that is loaded from a Space using gr.load(). This should generally NOT be overridden.
get_block_class
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def recover_kwargs( cls, props: dict[str, Any], additional_keys: list[str] | None = None ): """ Recovers kwargs from a dict of props. """ additional_keys = additional_keys or [] signature = inspect.signature(cls.__init__) kwargs = {} for parameter in signature.parameters.values(): if parameter.name in props and parameter.name not in additional_keys: kwargs[parameter.name] = props[parameter.name] return kwargs
Recovers kwargs from a dict of props.
recover_kwargs
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
async def async_move_resource_to_block_cache( self, url_or_file_path: str | Path | None ) -> str | None: """Moves a file or downloads a file from a url to a block's cache directory, adds to to the block's temp_files, and returns the path to the file in cache. This ensures that the file is accessible to the Block and can be served to users. This async version of the function is used when this is being called within a FastAPI route, as this is not blocking. """ if url_or_file_path is None: return None if isinstance(url_or_file_path, Path): url_or_file_path = str(url_or_file_path) if client_utils.is_http_url_like(url_or_file_path): temp_file_path = await processing_utils.async_ssrf_protected_download( url_or_file_path, cache_dir=self.GRADIO_CACHE ) self.temp_files.add(temp_file_path) else: url_or_file_path = str(utils.abspath(url_or_file_path)) if not utils.is_in_or_equal(url_or_file_path, self.GRADIO_CACHE): try: temp_file_path = processing_utils.save_file_to_cache( url_or_file_path, cache_dir=self.GRADIO_CACHE ) except FileNotFoundError: # This can happen if when using gr.load() and the file is on a remote Space # but the file is not the `value` of the component. For example, if the file # is the `avatar_image` of the `Chatbot` component. In this case, we skip # copying the file to the cache and just use the remote file path. return url_or_file_path else: temp_file_path = url_or_file_path self.temp_files.add(temp_file_path) return temp_file_path
Moves a file or downloads a file from a url to a block's cache directory, adds to to the block's temp_files, and returns the path to the file in cache. This ensures that the file is accessible to the Block and can be served to users. This async version of the function is used when this is being called within a FastAPI route, as this is not blocking.
async_move_resource_to_block_cache
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def move_resource_to_block_cache( self, url_or_file_path: str | Path | None ) -> str | None: """Moves a file or downloads a file from a url to a block's cache directory, adds to to the block's temp_files, and returns the path to the file in cache. This ensures that the file is accessible to the Block and can be served to users. This sync version of the function is used when this is being called outside of a FastAPI route, e.g. when examples are being cached. """ if url_or_file_path is None: return None if isinstance(url_or_file_path, Path): url_or_file_path = str(url_or_file_path) if client_utils.is_http_url_like(url_or_file_path): temp_file_path = processing_utils.save_url_to_cache( url_or_file_path, cache_dir=self.GRADIO_CACHE ) self.temp_files.add(temp_file_path) else: url_or_file_path = str(utils.abspath(url_or_file_path)) if not utils.is_in_or_equal(url_or_file_path, self.GRADIO_CACHE): try: temp_file_path = processing_utils.save_file_to_cache( url_or_file_path, cache_dir=self.GRADIO_CACHE ) except FileNotFoundError: # This can happen if when using gr.load() and the file is on a remote Space # but the file is not the `value` of the component. For example, if the file # is the `avatar_image` of the `Chatbot` component. In this case, we skip # copying the file to the cache and just use the remote file path. return url_or_file_path else: temp_file_path = url_or_file_path self.temp_files.add(temp_file_path) return temp_file_path
Moves a file or downloads a file from a url to a block's cache directory, adds to to the block's temp_files, and returns the path to the file in cache. This ensures that the file is accessible to the Block and can be served to users. This sync version of the function is used when this is being called outside of a FastAPI route, e.g. when examples are being cached.
move_resource_to_block_cache
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def serve_static_file( self, url_or_file_path: str | Path | dict | None ) -> dict | None: """If a file is a local file, moves it to the block's cache directory and returns a FileData-type dictionary corresponding to the file. If the file is a URL, returns a FileData-type dictionary corresponding to the URL. This ensures that the file is accessible in the frontend and can be served to users. Examples: >>> block.serve_static_file("https://gradio.app/logo.png") -> {"path": "https://gradio.app/logo.png", "url": "https://gradio.app/logo.png"} >>> block.serve_static_file("logo.png") -> {"path": "logo.png", "url": "/file=logo.png"} >>> block.serve_static_file({"path": "logo.png", "url": "/file=logo.png"}) -> {"path": "logo.png", "url": "/file=logo.png"} """ if url_or_file_path is None: return None if isinstance(url_or_file_path, dict): return url_or_file_path if isinstance(url_or_file_path, Path): url_or_file_path = str(url_or_file_path) if client_utils.is_http_url_like(url_or_file_path): return FileData(path=url_or_file_path, url=url_or_file_path).model_dump() else: data = {"path": url_or_file_path, "meta": {"_type": "gradio.FileData"}} try: return processing_utils.move_files_to_cache(data, self) except AttributeError: # Can be raised if this function is called before the Block is fully initialized. return data
If a file is a local file, moves it to the block's cache directory and returns a FileData-type dictionary corresponding to the file. If the file is a URL, returns a FileData-type dictionary corresponding to the URL. This ensures that the file is accessible in the frontend and can be served to users. Examples: >>> block.serve_static_file("https://gradio.app/logo.png") -> {"path": "https://gradio.app/logo.png", "url": "https://gradio.app/logo.png"} >>> block.serve_static_file("logo.png") -> {"path": "logo.png", "url": "/file=logo.png"} >>> block.serve_static_file({"path": "logo.png", "url": "/file=logo.png"}) -> {"path": "logo.png", "url": "/file=logo.png"}
serve_static_file
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def __init__( self, elem_id: str | None = None, elem_classes: list[str] | str | None = None, visible: bool = True, render: bool = True, ): """ Parameters: elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. visible: If False, this will be hidden but included in the Blocks config file (its visibility can later be updated). render: If False, this will not be included in the Blocks config file at all. """ self.children: list[Block] = [] Block.__init__( self, elem_id=elem_id, elem_classes=elem_classes, visible=visible, render=render, )
Parameters: elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles. elem_classes: An optional string or list of strings that are assigned as the class of this component in the HTML DOM. Can be used for targeting CSS styles. visible: If False, this will be hidden but included in the Blocks config file (its visibility can later be updated). render: If False, this will not be included in the Blocks config file at all.
__init__
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def postprocess(self, y): """ Any postprocessing needed to be performed on a block context. """ return y
Any postprocessing needed to be performed on a block context.
postprocess
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def postprocess_update_dict( block: Component | BlockContext, update_dict: dict, postprocess: bool = True ): """ Converts a dictionary of updates into a format that can be sent to the frontend to update the component. E.g. {"value": "2", "visible": True, "invalid_arg": "hello"} Into -> {"__type__": "update", "value": 2.0, "visible": True} Parameters: block: The Block that is being updated with this update dictionary. update_dict: The original update dictionary postprocess: Whether to postprocess the "value" key of the update dictionary. """ value = update_dict.pop("value", components._Keywords.NO_VALUE) update_dict = {k: getattr(block, k) for k in update_dict if hasattr(block, k)} if value is not components._Keywords.NO_VALUE: if postprocess: update_dict["value"] = block.postprocess(value) if isinstance(update_dict["value"], (GradioModel, GradioRootModel)): update_dict["value"] = update_dict["value"].model_dump() else: update_dict["value"] = value update_dict["__type__"] = "update" return update_dict
Converts a dictionary of updates into a format that can be sent to the frontend to update the component. E.g. {"value": "2", "visible": True, "invalid_arg": "hello"} Into -> {"__type__": "update", "value": 2.0, "visible": True} Parameters: block: The Block that is being updated with this update dictionary. update_dict: The original update dictionary postprocess: Whether to postprocess the "value" key of the update dictionary.
postprocess_update_dict
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def convert_component_dict_to_list( outputs_ids: list[int], predictions: dict ) -> list | dict: """ Converts a dictionary of component updates into a list of updates in the order of the outputs_ids and including every output component. Leaves other types of dictionaries unchanged. E.g. {"textbox": "hello", "number": {"__type__": "generic_update", "value": "2"}} Into -> ["hello", {"__type__": "generic_update"}, {"__type__": "generic_update", "value": "2"}] """ keys_are_blocks = [isinstance(key, Block) for key in predictions] if all(keys_are_blocks): reordered_predictions = [skip() for _ in outputs_ids] for component, value in predictions.items(): if component._id not in outputs_ids: raise ValueError( f"Returned component {component} not specified as output of function." ) output_index = outputs_ids.index(component._id) reordered_predictions[output_index] = value predictions = utils.resolve_singleton(reordered_predictions) elif any(keys_are_blocks): raise ValueError( "Returned dictionary included some keys as Components. Either all keys must be Components to assign Component values, or return a List of values to assign output values in order." ) return predictions
Converts a dictionary of component updates into a list of updates in the order of the outputs_ids and including every output component. Leaves other types of dictionaries unchanged. E.g. {"textbox": "hello", "number": {"__type__": "generic_update", "value": "2"}} Into -> ["hello", {"__type__": "generic_update"}, {"__type__": "generic_update", "value": "2"}]
convert_component_dict_to_list
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def set_event_trigger( self, targets: Sequence[EventListenerMethod], fn: Callable | None, inputs: ( Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None ), outputs: ( Component | BlockContext | Sequence[Component | BlockContext] | Set[Component | BlockContext] | None ), preprocess: bool = True, postprocess: bool = True, scroll_to_output: bool = False, show_progress: Literal["full", "minimal", "hidden"] = "full", show_progress_on: Component | Sequence[Component] | None = None, api_name: str | None | Literal[False] = None, js: str | Literal[True] | None = None, no_target: bool = False, queue: bool = True, batch: bool = False, max_batch_size: int = 4, cancels: list[int] | None = None, collects_event_data: bool | None = None, trigger_after: int | None = None, trigger_only_on_success: bool = False, trigger_mode: Literal["once", "multiple", "always_last"] | None = "once", concurrency_limit: int | None | Literal["default"] = "default", concurrency_id: str | None = None, show_api: bool = True, renderable: Renderable | None = None, is_cancel_function: bool = False, connection: Literal["stream", "sse"] = "sse", time_limit: float | None = None, stream_every: float = 0.5, like_user_message: bool = False, event_specific_args: list[str] | None = None, js_implementation: str | None = None, ) -> tuple[BlockFunction, int]: """ Adds an event to the component's dependencies. Parameters: targets: a list of EventListenerMethod objects that define the event trigger fn: the function to run when the event is triggered inputs: the list of input components whose values will be passed to the function outputs: the list of output components whose values will be updated by the function preprocess: whether to run the preprocess methods of the input components before running the function postprocess: whether to run the postprocess methods of the output components after running the function scroll_to_output: whether to scroll to output of dependency on trigger show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components. api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event. js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components no_target: if True, sets "targets" to [], used for the Blocks.load() event and .then() events queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app. batch: whether this function takes in a batch of inputs max_batch_size: the maximum batch size to send to the function cancels: a list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. collects_event_data: whether to collect event data for this event trigger_after: if set, this event will be triggered after 'trigger_after' function index trigger_only_on_success: if True, this event will only be triggered if the previous event was successful (only applies if `trigger_after` is set) trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete. concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `queue()`, which itself is 1 by default). concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit. show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False. is_cancel_function: whether this event cancels another running event. connection: The connection format, either "sse" or "stream". time_limit: The time limit for the function to run. Parameter only used for the `.stream()` event. stream_every: The latency (in seconds) at which stream chunks are sent to the backend. Defaults to 0.5 seconds. Parameter only used for the `.stream()` event. Returns: dependency information, dependency index """ # Support for singular parameter _targets = [ ( target.block._id if not no_target and target.block else None, target.event_name, ) for target in targets ] if isinstance(inputs, Set): inputs_as_dict = True inputs = sorted(inputs, key=lambda x: x._id) else: inputs_as_dict = False if inputs is None: inputs = [] elif not isinstance(inputs, Sequence): inputs = [inputs] if isinstance(outputs, Set): outputs = sorted(outputs, key=lambda x: x._id) elif outputs is None: outputs = [] elif not isinstance(outputs, Sequence): outputs = [outputs] if show_progress_on and not isinstance(show_progress_on, Sequence): show_progress_on = [show_progress_on] if fn is not None and not cancels: check_function_inputs_match(fn, inputs, inputs_as_dict) if len(_targets) and trigger_mode is None: if _targets[0][1] in ["change", "key_up"]: trigger_mode = "always_last" elif _targets[0][1] in ["stream"]: trigger_mode = "multiple" if trigger_mode is None: trigger_mode = "once" elif trigger_mode not in ["once", "multiple", "always_last"]: raise ValueError( f"Invalid value for parameter `trigger_mode`: {trigger_mode}. Please choose from: {['once', 'multiple', 'always_last']}" ) fn_to_analyze = renderable.fn if renderable else fn _, progress_index, event_data_index = ( special_args(fn_to_analyze) if fn_to_analyze else (None, None, None) ) # If api_name is None or empty string, use the function name if api_name is None or isinstance(api_name, str) and api_name.strip() == "": if fn is not None: if not hasattr(fn, "__name__"): if hasattr(fn, "__class__") and hasattr(fn.__class__, "__name__"): name = fn.__class__.__name__ else: name = "unnamed" else: name = fn.__name__ api_name = "".join( [s for s in name if s not in set(string.punctuation) - {"-", "_"}] ) elif js is not None: api_name = "js_fn" show_api = False else: api_name = "unnamed" show_api = False if api_name is not False: api_name = utils.append_unique_suffix( api_name, [ fn.api_name for fn in self.fns.values() if isinstance(fn.api_name, str) ], ) else: show_api = False # The `show_api` parameter is False if: (1) the user explicitly sets it (2) the user sets `api_name` to False # or (3) the user sets `fn` to None (there's no backend function) if collects_event_data is None: collects_event_data = event_data_index is not None rendered_in = LocalContext.renderable.get() if js is True and inputs: raise ValueError( "Cannot create event: events with js=True cannot have inputs." ) block_fn = BlockFunction( fn, inputs, outputs, preprocess, postprocess, _id=self.fn_id, inputs_as_dict=inputs_as_dict, targets=_targets, batch=batch, max_batch_size=max_batch_size, concurrency_limit=concurrency_limit, concurrency_id=concurrency_id, tracks_progress=progress_index is not None, api_name=api_name, js=js, show_progress=show_progress, show_progress_on=show_progress_on, cancels=cancels, collects_event_data=collects_event_data, trigger_after=trigger_after, trigger_only_on_success=trigger_only_on_success, trigger_mode=trigger_mode, queue=queue, scroll_to_output=scroll_to_output, show_api=show_api, renderable=renderable, rendered_in=rendered_in, is_cancel_function=is_cancel_function, connection=connection, time_limit=time_limit, stream_every=stream_every, like_user_message=like_user_message, event_specific_args=event_specific_args, page=self.root_block.current_page, js_implementation=js_implementation, ) self.fns[self.fn_id] = block_fn self.fn_id += 1 return block_fn, block_fn._id
Adds an event to the component's dependencies. Parameters: targets: a list of EventListenerMethod objects that define the event trigger fn: the function to run when the event is triggered inputs: the list of input components whose values will be passed to the function outputs: the list of output components whose values will be updated by the function preprocess: whether to run the preprocess methods of the input components before running the function postprocess: whether to run the postprocess methods of the output components after running the function scroll_to_output: whether to scroll to output of dependency on trigger show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all show_progress_on: Component or list of components to show the progress animation on. If None, will show the progress animation on all of the output components. api_name: defines how the endpoint appears in the API docs. Can be a string, None, or False. If set to a string, the endpoint will be exposed in the API docs with the given name. If None (default), the name of the function will be used as the API endpoint. If False, the endpoint will not be exposed in the API docs and downstream apps (including those that `gr.load` this app) will not be able to use this event. js: Optional frontend js method to run before running 'fn'. Input arguments for js method are values of 'inputs' and 'outputs', return should be a list of values for output components no_target: if True, sets "targets" to [], used for the Blocks.load() event and .then() events queue: If True, will place the request on the queue, if the queue has been enabled. If False, will not put this event on the queue, even if the queue has been enabled. If None, will use the queue setting of the gradio app. batch: whether this function takes in a batch of inputs max_batch_size: the maximum batch size to send to the function cancels: a list of other events to cancel when this event is triggered. For example, setting cancels=[click_event] will cancel the click_event, where click_event is the return value of another components .click method. collects_event_data: whether to collect event data for this event trigger_after: if set, this event will be triggered after 'trigger_after' function index trigger_only_on_success: if True, this event will only be triggered if the previous event was successful (only applies if `trigger_after` is set) trigger_mode: If "once" (default for all events except `.change()`) would not allow any submissions while an event is pending. If set to "multiple", unlimited submissions are allowed while pending, and "always_last" (default for `.change()` and `.key_up()` events) would allow a second submission after the pending event is complete. concurrency_limit: If set, this is the maximum number of this event that can be running simultaneously. Can be set to None to mean no concurrency_limit (any number of this event can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `queue()`, which itself is 1 by default). concurrency_id: If set, this is the id of the concurrency group. Events with the same concurrency_id will be limited by the lowest set concurrency_limit. show_api: whether to show this event in the "view API" page of the Gradio app, or in the ".view_api()" method of the Gradio clients. Unlike setting api_name to False, setting show_api to False will still allow downstream apps as well as the Clients to use this event. If fn is None, show_api will automatically be set to False. is_cancel_function: whether this event cancels another running event. connection: The connection format, either "sse" or "stream". time_limit: The time limit for the function to run. Parameter only used for the `.stream()` event. stream_every: The latency (in seconds) at which stream chunks are sent to the backend. Defaults to 0.5 seconds. Parameter only used for the `.stream()` event. Returns: dependency information, dependency index
set_event_trigger
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def attach_load_events(self, rendered_in: Renderable | None = None): """Add a load event for every component whose initial value requires a function call to set.""" for component in self.blocks.values(): if rendered_in is not None and component.rendered_in != rendered_in: continue if ( isinstance(component, components.Component) and component.load_event_to_attach ): load_fn, triggers, inputs = component.load_event_to_attach has_target = len(triggers) > 0 triggers += [(self.root_block, "load")] # Use set_event_trigger to avoid ambiguity between load class/instance method dep = self.set_event_trigger( [EventListenerMethod(*trigger) for trigger in triggers], load_fn, inputs, component, no_target=not has_target, show_progress="hidden" if has_target else "full", )[0] component.load_event = dep.get_config()
Add a load event for every component whose initial value requires a function call to set.
attach_load_events
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0
def __init__( self, theme: Theme | str | None = None, analytics_enabled: bool | None = None, mode: str = "blocks", title: str = "Gradio", css: str | None = None, css_paths: str | Path | Sequence[str | Path] | None = None, js: str | Literal[True] | None = None, head: str | None = None, head_paths: str | Path | Sequence[str | Path] | None = None, fill_height: bool = False, fill_width: bool = False, delete_cache: tuple[int, int] | None = None, **kwargs, ): """ Parameters: theme: A Theme object or a string representing a theme. If a string, will look for a built-in theme with that name (e.g. "soft" or "default"), or will attempt to load a theme from the Hugging Face Hub (e.g. "gradio/monochrome"). If None, will use the Default theme. analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True. mode: A human-friendly name for the kind of Blocks or Interface being created. Used internally for analytics. title: The tab title to display when this is opened in a browser window. css: Custom css as a code string. This css will be included in the demo webpage. css_paths: Custom css as a pathlib.Path to a css file or a list of such paths. This css files will be read, concatenated, and included in the demo webpage. If the `css` parameter is also set, the css from `css` will be included first. js: Custom js as a code string. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside <script> tags. head: Custom html code to insert into the head of the demo webpage. This can be used to add custom meta tags, multiple scripts, stylesheets, etc. to the page. head_paths: Custom html code as a pathlib.Path to a html file or a list of such paths. This html files will be read, concatenated, and included in the head of the demo webpage. If the `head` parameter is also set, the html from `head` will be included first. fill_height: Whether to vertically expand top-level child components to the height of the window. If True, expansion occurs when the scale value of the child components >= 1. fill_width: Whether to horizontally expand to fill container fully. If False, centers and constrains app to a maximum width. Only applies if this is the outermost `Blocks` in your Gradio app. delete_cache: A tuple corresponding [frequency, age] both expressed in number of seconds. Every `frequency` seconds, the temporary files created by this Blocks instance will be deleted if more than `age` seconds have passed since the file was created. For example, setting this to (86400, 86400) will delete temporary files every day. The cache will be deleted entirely when the server restarts. If None, no cache deletion will occur. """ self.limiter = None if theme is None: theme = DefaultTheme() elif isinstance(theme, str): if theme.lower() in BUILT_IN_THEMES: theme = BUILT_IN_THEMES[theme.lower()] else: try: theme = Theme.from_hub(theme) except Exception as e: warnings.warn(f"Cannot load {theme}. Caught Exception: {str(e)}") theme = DefaultTheme() if not isinstance(theme, Theme): warnings.warn("Theme should be a class loaded from gradio.themes") theme = DefaultTheme() self.theme: Theme = theme self.theme_css = theme._get_theme_css() self.stylesheets = theme._stylesheets theme_hasher = hashlib.sha256() theme_hasher.update(self.theme_css.encode("utf-8")) self.theme_hash = theme_hasher.hexdigest() self.encrypt = False self.share = False self.enable_queue = True self.max_threads = 40 self.pending_streams = defaultdict(dict) self.pending_diff_streams = defaultdict(dict) self.show_error = True self.fill_height = fill_height self.fill_width = fill_width self.delete_cache = delete_cache self.extra_startup_events: list[Callable[..., Coroutine[Any, Any, Any]]] = [] self.css = css or "" css_paths = utils.none_or_singleton_to_list(css_paths) for css_path in css_paths or []: with open(css_path, encoding="utf-8") as css_file: self.css += "\n" + css_file.read() self.js = js or "" self.head = head or "" head_paths = utils.none_or_singleton_to_list(head_paths) for head_path in head_paths or []: with open(head_path, encoding="utf-8") as head_file: self.head += "\n" + head_file.read() self.renderables: list[Renderable] = [] self.state_holder: StateHolder self.custom_mount_path: str | None = None self.pwa = False # For analytics_enabled and allow_flagging: (1) first check for # parameter, (2) check for env variable, (3) default to True/"manual" self.analytics_enabled = ( analytics_enabled if analytics_enabled is not None else analytics.analytics_enabled() ) if self.analytics_enabled: if not wasm_utils.IS_WASM: t = threading.Thread(target=analytics.version_check) t.start() else: os.environ["HF_HUB_DISABLE_TELEMETRY"] = "True" self.enable_monitoring: bool | None = None self.default_config = BlocksConfig(self) super().__init__(render=False, **kwargs) self.mode = mode self.is_running = False self.local_url = None self.share_url = None self.width = None self.height = None self.api_open = utils.get_space() is None self.space_id = utils.get_space() self.favicon_path = None self.auth = None self.dev_mode = bool(os.getenv("GRADIO_WATCH_DIRS", "")) self.app_id = random.getrandbits(64) self.upload_file_set = set() self.temp_file_sets = [self.upload_file_set] self.title = title self.show_api = not wasm_utils.IS_WASM # Only used when an Interface is loaded from a config self.predict = None self.input_components = None self.output_components = None self.__name__ = None # type: ignore self.api_mode = None self.progress_tracking = None self.ssl_verify = True self.allowed_paths = [] self.blocked_paths = [] self.root_path = os.environ.get("GRADIO_ROOT_PATH", "") self.proxy_urls = set() self.pages: list[tuple[str, str]] = [("", "Home")] self.current_page = "" if self.analytics_enabled: is_custom_theme = not any( self.theme.to_dict() == built_in_theme.to_dict() for built_in_theme in BUILT_IN_THEMES.values() ) data = { "mode": self.mode, "custom_css": self.css is not None, "theme": self.theme.name, "is_custom_theme": is_custom_theme, "version": get_package_version(), } analytics.initiated_analytics(data) self.queue()
Parameters: theme: A Theme object or a string representing a theme. If a string, will look for a built-in theme with that name (e.g. "soft" or "default"), or will attempt to load a theme from the Hugging Face Hub (e.g. "gradio/monochrome"). If None, will use the Default theme. analytics_enabled: Whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable or default to True. mode: A human-friendly name for the kind of Blocks or Interface being created. Used internally for analytics. title: The tab title to display when this is opened in a browser window. css: Custom css as a code string. This css will be included in the demo webpage. css_paths: Custom css as a pathlib.Path to a css file or a list of such paths. This css files will be read, concatenated, and included in the demo webpage. If the `css` parameter is also set, the css from `css` will be included first. js: Custom js as a code string. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside <script> tags. head: Custom html code to insert into the head of the demo webpage. This can be used to add custom meta tags, multiple scripts, stylesheets, etc. to the page. head_paths: Custom html code as a pathlib.Path to a html file or a list of such paths. This html files will be read, concatenated, and included in the head of the demo webpage. If the `head` parameter is also set, the html from `head` will be included first. fill_height: Whether to vertically expand top-level child components to the height of the window. If True, expansion occurs when the scale value of the child components >= 1. fill_width: Whether to horizontally expand to fill container fully. If False, centers and constrains app to a maximum width. Only applies if this is the outermost `Blocks` in your Gradio app. delete_cache: A tuple corresponding [frequency, age] both expressed in number of seconds. Every `frequency` seconds, the temporary files created by this Blocks instance will be deleted if more than `age` seconds have passed since the file was created. For example, setting this to (86400, 86400) will delete temporary files every day. The cache will be deleted entirely when the server restarts. If None, no cache deletion will occur.
__init__
python
gradio-app/gradio
gradio/blocks.py
https://github.com/gradio-app/gradio/blob/master/gradio/blocks.py
Apache-2.0