index
int64
0
731k
package
stringlengths
2
98
name
stringlengths
1
76
docstring
stringlengths
0
281k
code
stringlengths
4
1.07M
signature
stringlengths
2
42.8k
3,624
mcwiki.page
from_markup
null
def from_markup(markup: str) -> "PageSection": html = BeautifulSoup(markup, "html.parser") content = html.find("div", "mw-parser-output").extract() return PageSection(content)
(markup: str) -> mcwiki.page.PageSection
3,625
mcwiki.page
load
null
def load(page: str) -> "PageSection": if not page.startswith("http"): page = BASE_URL + re.sub(r"[\s_]+", "_", page) return from_markup(requests.get(page).text)
(page: str) -> mcwiki.page.PageSection
3,626
mcwiki.page
load_file
null
def load_file(filepath: Union[str, Path]) -> "PageSection": return from_markup(Path(filepath).read_text())
(filepath: Union[str, pathlib.Path]) -> mcwiki.page.PageSection
3,631
meshio._mesh
CellBlock
null
class CellBlock: def __init__( self, cell_type: str, data: list | np.ndarray, tags: list[str] | None = None, ): self.type = cell_type self.data = data if cell_type.startswith("polyhedron"): self.dim = 3 else: self.data = np.asarray(self.data) self.dim = topological_dimension[cell_type] self.tags = [] if tags is None else tags def __repr__(self): items = [ "meshio CellBlock", f"type: {self.type}", f"num cells: {len(self.data)}", f"tags: {self.tags}", ] return "<" + ", ".join(items) + ">" def __len__(self): return len(self.data)
(cell_type: 'str', data: 'list | np.ndarray', tags: 'list[str] | None' = None)
3,632
meshio._mesh
__init__
null
def __init__( self, cell_type: str, data: list | np.ndarray, tags: list[str] | None = None, ): self.type = cell_type self.data = data if cell_type.startswith("polyhedron"): self.dim = 3 else: self.data = np.asarray(self.data) self.dim = topological_dimension[cell_type] self.tags = [] if tags is None else tags
(self, cell_type: str, data: list | numpy.ndarray, tags: Optional[list[str]] = None)
3,633
meshio._mesh
__len__
null
def __len__(self): return len(self.data)
(self)
3,634
meshio._mesh
__repr__
null
def __repr__(self): items = [ "meshio CellBlock", f"type: {self.type}", f"num cells: {len(self.data)}", f"tags: {self.tags}", ] return "<" + ", ".join(items) + ">"
(self)
3,635
meshio._mesh
Mesh
null
class Mesh: def __init__( self, points: ArrayLike, cells: dict[str, ArrayLike] | list[tuple[str, ArrayLike] | CellBlock], point_data: dict[str, ArrayLike] | None = None, cell_data: dict[str, list[ArrayLike]] | None = None, field_data=None, point_sets: dict[str, ArrayLike] | None = None, cell_sets: dict[str, list[ArrayLike]] | None = None, gmsh_periodic=None, info=None, ): self.points = np.asarray(points) if isinstance(cells, dict): # Let's not deprecate this for now. # warn( # "cell dictionaries are deprecated, use list of tuples, e.g., " # '[("triangle", [[0, 1, 2], ...])]', # DeprecationWarning, # ) # old dict, deprecated # # convert dict to list of tuples cells = list(cells.items()) self.cells = [] for cell_block in cells: if isinstance(cell_block, tuple): cell_type, data = cell_block cell_block = CellBlock( cell_type, # polyhedron data cannot be converted to numpy arrays # because the sublists don't all have the same length data if cell_type.startswith("polyhedron") else np.asarray(data), ) self.cells.append(cell_block) self.point_data = {} if point_data is None else point_data self.cell_data = {} if cell_data is None else cell_data self.field_data = {} if field_data is None else field_data self.point_sets = {} if point_sets is None else point_sets self.cell_sets = {} if cell_sets is None else cell_sets self.gmsh_periodic = gmsh_periodic self.info = info # assert point data consistency and convert to numpy arrays for key, item in self.point_data.items(): self.point_data[key] = np.asarray(item) if len(self.point_data[key]) != len(self.points): raise ValueError( f"len(points) = {len(self.points)}, " f'but len(point_data["{key}"]) = {len(self.point_data[key])}' ) # assert cell data consistency and convert to numpy arrays for key, data in self.cell_data.items(): if len(data) != len(cells): raise ValueError( f"Incompatible cell data '{key}'. " f"{len(cells)} cell blocks, but '{key}' has {len(data)} blocks." ) for k in range(len(data)): data[k] = np.asarray(data[k]) if len(data[k]) != len(self.cells[k]): raise ValueError( "Incompatible cell data. " + f"Cell block {k} ('{self.cells[k].type}') " + f"has length {len(self.cells[k])}, but " + f"corresponding cell data item has length {len(data[k])}." ) def __repr__(self): lines = ["<meshio mesh object>", f" Number of points: {len(self.points)}"] special_cells = [ "polygon", "polyhedron", "VTK_LAGRANGE_CURVE", "VTK_LAGRANGE_TRIANGLE", "VTK_LAGRANGE_QUADRILATERAL", "VTK_LAGRANGE_TETRAHEDRON", "VTK_LAGRANGE_HEXAHEDRON", "VTK_LAGRANGE_WEDGE", "VTK_LAGRANGE_PYRAMID", ] if len(self.cells) > 0: lines.append(" Number of cells:") for cell_block in self.cells: string = cell_block.type if cell_block.type in special_cells: string += f"({cell_block.data.shape[1]})" lines.append(f" {string}: {len(cell_block)}") else: lines.append(" No cells.") if self.point_sets: names = ", ".join(self.point_sets.keys()) lines.append(f" Point sets: {names}") if self.cell_sets: names = ", ".join(self.cell_sets.keys()) lines.append(f" Cell sets: {names}") if self.point_data: names = ", ".join(self.point_data.keys()) lines.append(f" Point data: {names}") if self.cell_data: names = ", ".join(self.cell_data.keys()) lines.append(f" Cell data: {names}") if self.field_data: names = ", ".join(self.field_data.keys()) lines.append(f" Field data: {names}") return "\n".join(lines) def copy(self): return copy.deepcopy(self) def write(self, path_or_buf, file_format: str | None = None, **kwargs): # avoid circular import from ._helpers import write write(path_or_buf, self, file_format, **kwargs) def get_cells_type(self, cell_type: str): if not any(c.type == cell_type for c in self.cells): return np.empty((0, num_nodes_per_cell[cell_type]), dtype=int) return np.concatenate([c.data for c in self.cells if c.type == cell_type]) def get_cell_data(self, name: str, cell_type: str): return np.concatenate( [d for c, d in zip(self.cells, self.cell_data[name]) if c.type == cell_type] ) @property def cells_dict(self): cells_dict = {} for cell_block in self.cells: if cell_block.type not in cells_dict: cells_dict[cell_block.type] = [] cells_dict[cell_block.type].append(cell_block.data) # concatenate for key, value in cells_dict.items(): cells_dict[key] = np.concatenate(value) return cells_dict @property def cell_data_dict(self): cell_data_dict = {} for key, value_list in self.cell_data.items(): cell_data_dict[key] = {} for value, cell_block in zip(value_list, self.cells): if cell_block.type not in cell_data_dict[key]: cell_data_dict[key][cell_block.type] = [] cell_data_dict[key][cell_block.type].append(value) for cell_type, val in cell_data_dict[key].items(): cell_data_dict[key][cell_type] = np.concatenate(val) return cell_data_dict @property def cell_sets_dict(self): sets_dict = {} for key, member_list in self.cell_sets.items(): sets_dict[key] = {} offsets = {} for members, cells in zip(member_list, self.cells): if members is None: continue if cells.type in offsets: offset = offsets[cells.type] offsets[cells.type] += cells.data.shape[0] else: offset = 0 offsets[cells.type] = cells.data.shape[0] if cells.type in sets_dict[key]: sets_dict[key][cells.type].append(members + offset) else: sets_dict[key][cells.type] = [members + offset] return { key: { cell_type: np.concatenate(members) for cell_type, members in sets.items() if sum(map(np.size, members)) } for key, sets in sets_dict.items() } @classmethod def read(cls, path_or_buf, file_format=None): # avoid circular import from ._helpers import read # 2021-02-21 warn("meshio.Mesh.read is deprecated, use meshio.read instead") return read(path_or_buf, file_format) def cell_sets_to_data(self, data_name: str | None = None): # If possible, convert cell sets to integer cell data. This is possible if all # cells appear exactly in one group. default_value = -1 if len(self.cell_sets) > 0: intfun = [] for k, c in enumerate(zip(*self.cell_sets.values())): # Go for -1 as the default value. (NaN is not int.) arr = np.full(len(self.cells[k]), default_value, dtype=int) for i, cc in enumerate(c): if cc is None: continue arr[cc] = i intfun.append(arr) for item in intfun: num_default = np.sum(item == default_value) if num_default > 0: warn( f"{num_default} cells are not part of any cell set. " f"Using default value {default_value}." ) break if data_name is None: data_name = "-".join(self.cell_sets.keys()) self.cell_data[data_name] = intfun self.cell_sets = {} def point_sets_to_data(self, join_char: str = "-") -> None: # now for the point sets # Go for -1 as the default value. (NaN is not int.) default_value = -1 if len(self.point_sets) > 0: intfun = np.full(len(self.points), default_value, dtype=int) for i, cc in enumerate(self.point_sets.values()): intfun[cc] = i if np.any(intfun == default_value): warn( "Not all points are part of a point set. " f"Using default value {default_value}." ) data_name = join_char.join(self.point_sets.keys()) self.point_data[data_name] = intfun self.point_sets = {} # This used to be int_data_to_sets(), converting _all_ cell and point data. # This is not useful in many cases, as one usually only wants one # particular data array (e.g., "MaterialIDs") converted to sets. def cell_data_to_sets(self, key: str): """Convert point_data to cell_sets.""" data = self.cell_data[key] # handle all int and uint data if not all(v.dtype.kind in ["i", "u"] for v in data): raise RuntimeError(f"cell_data['{key}'] is not int data.") tags = np.unique(np.concatenate(data)) # try and get the names by splitting the key along "-" (this is how # sets_to_int_data() forms the key) names = key.split("-") # remove duplicates and preserve order # <https://stackoverflow.com/a/7961390/353337>: names = list(dict.fromkeys(names)) if len(names) != len(tags): # alternative names names = [f"set-{key}-{tag}" for tag in tags] # TODO there's probably a better way besides np.where, something from # np.unique or np.sort for name, tag in zip(names, tags): self.cell_sets[name] = [np.where(d == tag)[0] for d in data] # remove the cell data del self.cell_data[key] def point_data_to_sets(self, key: str): """Convert point_data to point_sets.""" data = self.point_data[key] # handle all int and uint data if not all(v.dtype.kind in ["i", "u"] for v in data): raise RuntimeError(f"point_data['{key}'] is not int data.") tags = np.unique(data) # try and get the names by splitting the key along "-" (this is how # sets_to_int_data() forms the key names = key.split("-") # remove duplicates and preserve order # <https://stackoverflow.com/a/7961390/353337>: names = list(dict.fromkeys(names)) if len(names) != len(tags): # alternative names names = [f"set-key-{tag}" for tag in tags] # TODO there's probably a better way besides np.where, something from # np.unique or np.sort for name, tag in zip(names, tags): self.point_sets[name] = np.where(data == tag)[0] # remove the cell data del self.point_data[key]
(points: 'ArrayLike', cells: 'dict[str, ArrayLike] | list[tuple[str, ArrayLike] | CellBlock]', point_data: 'dict[str, ArrayLike] | None' = None, cell_data: 'dict[str, list[ArrayLike]] | None' = None, field_data=None, point_sets: 'dict[str, ArrayLike] | None' = None, cell_sets: 'dict[str, list[ArrayLike]] | None' = None, gmsh_periodic=None, info=None)
3,636
meshio._mesh
__init__
null
def __init__( self, points: ArrayLike, cells: dict[str, ArrayLike] | list[tuple[str, ArrayLike] | CellBlock], point_data: dict[str, ArrayLike] | None = None, cell_data: dict[str, list[ArrayLike]] | None = None, field_data=None, point_sets: dict[str, ArrayLike] | None = None, cell_sets: dict[str, list[ArrayLike]] | None = None, gmsh_periodic=None, info=None, ): self.points = np.asarray(points) if isinstance(cells, dict): # Let's not deprecate this for now. # warn( # "cell dictionaries are deprecated, use list of tuples, e.g., " # '[("triangle", [[0, 1, 2], ...])]', # DeprecationWarning, # ) # old dict, deprecated # # convert dict to list of tuples cells = list(cells.items()) self.cells = [] for cell_block in cells: if isinstance(cell_block, tuple): cell_type, data = cell_block cell_block = CellBlock( cell_type, # polyhedron data cannot be converted to numpy arrays # because the sublists don't all have the same length data if cell_type.startswith("polyhedron") else np.asarray(data), ) self.cells.append(cell_block) self.point_data = {} if point_data is None else point_data self.cell_data = {} if cell_data is None else cell_data self.field_data = {} if field_data is None else field_data self.point_sets = {} if point_sets is None else point_sets self.cell_sets = {} if cell_sets is None else cell_sets self.gmsh_periodic = gmsh_periodic self.info = info # assert point data consistency and convert to numpy arrays for key, item in self.point_data.items(): self.point_data[key] = np.asarray(item) if len(self.point_data[key]) != len(self.points): raise ValueError( f"len(points) = {len(self.points)}, " f'but len(point_data["{key}"]) = {len(self.point_data[key])}' ) # assert cell data consistency and convert to numpy arrays for key, data in self.cell_data.items(): if len(data) != len(cells): raise ValueError( f"Incompatible cell data '{key}'. " f"{len(cells)} cell blocks, but '{key}' has {len(data)} blocks." ) for k in range(len(data)): data[k] = np.asarray(data[k]) if len(data[k]) != len(self.cells[k]): raise ValueError( "Incompatible cell data. " + f"Cell block {k} ('{self.cells[k].type}') " + f"has length {len(self.cells[k])}, but " + f"corresponding cell data item has length {len(data[k])}." )
(self, points: Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]], cells: dict[str, typing.Union[numpy._typing._array_like._SupportsArray[numpy.dtype[typing.Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[typing.Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[typing.Union[bool, int, float, complex, str, bytes]]]] | list[tuple[str, typing.Union[numpy._typing._array_like._SupportsArray[numpy.dtype[typing.Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[typing.Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[typing.Union[bool, int, float, complex, str, bytes]]]] | meshio._mesh.CellBlock], point_data: Optional[dict[str, Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]]]] = None, cell_data: Optional[dict[str, list[Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]]]]] = None, field_data=None, point_sets: Optional[dict[str, Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]]]] = None, cell_sets: Optional[dict[str, list[Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]]]]] = None, gmsh_periodic=None, info=None)
3,637
meshio._mesh
__repr__
null
def __repr__(self): lines = ["<meshio mesh object>", f" Number of points: {len(self.points)}"] special_cells = [ "polygon", "polyhedron", "VTK_LAGRANGE_CURVE", "VTK_LAGRANGE_TRIANGLE", "VTK_LAGRANGE_QUADRILATERAL", "VTK_LAGRANGE_TETRAHEDRON", "VTK_LAGRANGE_HEXAHEDRON", "VTK_LAGRANGE_WEDGE", "VTK_LAGRANGE_PYRAMID", ] if len(self.cells) > 0: lines.append(" Number of cells:") for cell_block in self.cells: string = cell_block.type if cell_block.type in special_cells: string += f"({cell_block.data.shape[1]})" lines.append(f" {string}: {len(cell_block)}") else: lines.append(" No cells.") if self.point_sets: names = ", ".join(self.point_sets.keys()) lines.append(f" Point sets: {names}") if self.cell_sets: names = ", ".join(self.cell_sets.keys()) lines.append(f" Cell sets: {names}") if self.point_data: names = ", ".join(self.point_data.keys()) lines.append(f" Point data: {names}") if self.cell_data: names = ", ".join(self.cell_data.keys()) lines.append(f" Cell data: {names}") if self.field_data: names = ", ".join(self.field_data.keys()) lines.append(f" Field data: {names}") return "\n".join(lines)
(self)
3,638
meshio._mesh
cell_data_to_sets
Convert point_data to cell_sets.
def cell_data_to_sets(self, key: str): """Convert point_data to cell_sets.""" data = self.cell_data[key] # handle all int and uint data if not all(v.dtype.kind in ["i", "u"] for v in data): raise RuntimeError(f"cell_data['{key}'] is not int data.") tags = np.unique(np.concatenate(data)) # try and get the names by splitting the key along "-" (this is how # sets_to_int_data() forms the key) names = key.split("-") # remove duplicates and preserve order # <https://stackoverflow.com/a/7961390/353337>: names = list(dict.fromkeys(names)) if len(names) != len(tags): # alternative names names = [f"set-{key}-{tag}" for tag in tags] # TODO there's probably a better way besides np.where, something from # np.unique or np.sort for name, tag in zip(names, tags): self.cell_sets[name] = [np.where(d == tag)[0] for d in data] # remove the cell data del self.cell_data[key]
(self, key: str)
3,639
meshio._mesh
cell_sets_to_data
null
def cell_sets_to_data(self, data_name: str | None = None): # If possible, convert cell sets to integer cell data. This is possible if all # cells appear exactly in one group. default_value = -1 if len(self.cell_sets) > 0: intfun = [] for k, c in enumerate(zip(*self.cell_sets.values())): # Go for -1 as the default value. (NaN is not int.) arr = np.full(len(self.cells[k]), default_value, dtype=int) for i, cc in enumerate(c): if cc is None: continue arr[cc] = i intfun.append(arr) for item in intfun: num_default = np.sum(item == default_value) if num_default > 0: warn( f"{num_default} cells are not part of any cell set. " f"Using default value {default_value}." ) break if data_name is None: data_name = "-".join(self.cell_sets.keys()) self.cell_data[data_name] = intfun self.cell_sets = {}
(self, data_name: Optional[str] = None)
3,640
meshio._mesh
copy
null
def copy(self): return copy.deepcopy(self)
(self)
3,641
meshio._mesh
get_cell_data
null
def get_cell_data(self, name: str, cell_type: str): return np.concatenate( [d for c, d in zip(self.cells, self.cell_data[name]) if c.type == cell_type] )
(self, name: str, cell_type: str)
3,642
meshio._mesh
get_cells_type
null
def get_cells_type(self, cell_type: str): if not any(c.type == cell_type for c in self.cells): return np.empty((0, num_nodes_per_cell[cell_type]), dtype=int) return np.concatenate([c.data for c in self.cells if c.type == cell_type])
(self, cell_type: str)
3,643
meshio._mesh
point_data_to_sets
Convert point_data to point_sets.
def point_data_to_sets(self, key: str): """Convert point_data to point_sets.""" data = self.point_data[key] # handle all int and uint data if not all(v.dtype.kind in ["i", "u"] for v in data): raise RuntimeError(f"point_data['{key}'] is not int data.") tags = np.unique(data) # try and get the names by splitting the key along "-" (this is how # sets_to_int_data() forms the key names = key.split("-") # remove duplicates and preserve order # <https://stackoverflow.com/a/7961390/353337>: names = list(dict.fromkeys(names)) if len(names) != len(tags): # alternative names names = [f"set-key-{tag}" for tag in tags] # TODO there's probably a better way besides np.where, something from # np.unique or np.sort for name, tag in zip(names, tags): self.point_sets[name] = np.where(data == tag)[0] # remove the cell data del self.point_data[key]
(self, key: str)
3,644
meshio._mesh
point_sets_to_data
null
def point_sets_to_data(self, join_char: str = "-") -> None: # now for the point sets # Go for -1 as the default value. (NaN is not int.) default_value = -1 if len(self.point_sets) > 0: intfun = np.full(len(self.points), default_value, dtype=int) for i, cc in enumerate(self.point_sets.values()): intfun[cc] = i if np.any(intfun == default_value): warn( "Not all points are part of a point set. " f"Using default value {default_value}." ) data_name = join_char.join(self.point_sets.keys()) self.point_data[data_name] = intfun self.point_sets = {}
(self, join_char: str = '-') -> NoneType
3,645
meshio._mesh
write
null
def write(self, path_or_buf, file_format: str | None = None, **kwargs): # avoid circular import from ._helpers import write write(path_or_buf, self, file_format, **kwargs)
(self, path_or_buf, file_format: Optional[str] = None, **kwargs)
3,646
meshio._exceptions
ReadError
null
class ReadError(Exception): pass
null
3,647
meshio._exceptions
WriteError
null
class WriteError(Exception): pass
null
3,660
meshio._helpers
deregister_format
null
def deregister_format(format_name: str): for value in extension_to_filetypes.values(): if format_name in value: value.remove(format_name) if format_name in reader_map: reader_map.pop(format_name) if format_name in _writer_map: _writer_map.pop(format_name)
(format_name: str)
3,677
meshio._helpers
read
Reads an unstructured mesh with added data. :param filenames: The files/PathLikes to read from. :type filenames: str :returns mesh{2,3}d: The mesh data.
def read(filename, file_format: str | None = None): """Reads an unstructured mesh with added data. :param filenames: The files/PathLikes to read from. :type filenames: str :returns mesh{2,3}d: The mesh data. """ if is_buffer(filename, "r"): return _read_buffer(filename, file_format) return _read_file(Path(filename), file_format)
(filename, file_format: Optional[str] = None)
3,678
meshio._helpers
register_format
null
def register_format( format_name: str, extensions: list[str], reader, writer_map ) -> None: for ext in extensions: if ext not in extension_to_filetypes: extension_to_filetypes[ext] = [] extension_to_filetypes[ext].append(format_name) if reader is not None: reader_map[format_name] = reader _writer_map.update(writer_map)
(format_name: str, extensions: list[str], reader, writer_map) -> NoneType
3,688
meshio._helpers
write
Writes mesh together with data to a file. :params filename: File to write to. :type filename: str :params point_data: Named additional point data to write to the file. :type point_data: dict
def write(filename, mesh: Mesh, file_format: str | None = None, **kwargs): """Writes mesh together with data to a file. :params filename: File to write to. :type filename: str :params point_data: Named additional point data to write to the file. :type point_data: dict """ if is_buffer(filename, "r"): if file_format is None: raise WriteError("File format must be supplied if `filename` is a buffer") if file_format == "tetgen": raise WriteError( "tetgen format is spread across multiple files, and so cannot be written to a buffer" ) else: path = Path(filename) if not file_format: # deduce possible file formats from extension file_formats = _filetypes_from_path(path) # just take the first one file_format = file_formats[0] try: writer = _writer_map[file_format] except KeyError: formats = sorted(list(_writer_map.keys())) raise WriteError(f"Unknown format '{file_format}'. Pick one of {formats}") # check cells for sanity for cell_block in mesh.cells: key = cell_block.type value = cell_block.data if key in num_nodes_per_cell: if value.shape[1] != num_nodes_per_cell[key]: raise WriteError( f"Unexpected cells array shape {value.shape} for {key} cells. " + f"Expected shape [:, {num_nodes_per_cell[key]}]." ) else: # we allow custom keys <https://github.com/nschloe/meshio/issues/501> and # cannot check those pass # Write return writer(filename, mesh, **kwargs)
(filename, mesh: meshio._mesh.Mesh, file_format: Optional[str] = None, **kwargs)
3,689
meshio._helpers
write_points_cells
null
def write_points_cells( filename, points: ArrayLike, cells: dict[str, ArrayLike] | list[tuple[str, ArrayLike] | CellBlock], point_data: dict[str, ArrayLike] | None = None, cell_data: dict[str, list[ArrayLike]] | None = None, field_data=None, point_sets: dict[str, ArrayLike] | None = None, cell_sets: dict[str, list[ArrayLike]] | None = None, file_format: str | None = None, **kwargs, ): points = np.asarray(points) mesh = Mesh( points, cells, point_data=point_data, cell_data=cell_data, field_data=field_data, point_sets=point_sets, cell_sets=cell_sets, ) mesh.write(filename, file_format=file_format, **kwargs)
(filename, points: Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]], cells: dict[str, typing.Union[numpy._typing._array_like._SupportsArray[numpy.dtype[typing.Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[typing.Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[typing.Union[bool, int, float, complex, str, bytes]]]] | list[tuple[str, typing.Union[numpy._typing._array_like._SupportsArray[numpy.dtype[typing.Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[typing.Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[typing.Union[bool, int, float, complex, str, bytes]]]] | meshio._mesh.CellBlock], point_data: Optional[dict[str, Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]]]] = None, cell_data: Optional[dict[str, list[Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]]]]] = None, field_data=None, point_sets: Optional[dict[str, Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]]]] = None, cell_sets: Optional[dict[str, list[Union[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]], numpy._typing._nested_sequence._NestedSequence[numpy._typing._array_like._SupportsArray[numpy.dtype[Any]]], bool, int, float, complex, str, bytes, numpy._typing._nested_sequence._NestedSequence[Union[bool, int, float, complex, str, bytes]]]]]] = None, file_format: Optional[str] = None, **kwargs)
3,694
logging_tree.format
printout
Print a tree of loggers, given a `Node` from `logging_tree.nodes`. If no `node` argument is provided, then the entire tree of currently active `logging` loggers is printed out.
def printout(node=None): """Print a tree of loggers, given a `Node` from `logging_tree.nodes`. If no `node` argument is provided, then the entire tree of currently active `logging` loggers is printed out. """ print(build_description(node)[:-1])
(node=None)
3,695
logging_tree.nodes
tree
Return a tree of tuples representing the logger layout. Each tuple looks like ``('logger-name', <Logger>, [...])`` where the third element is a list of zero or more child tuples that share the same layout.
def tree(): """Return a tree of tuples representing the logger layout. Each tuple looks like ``('logger-name', <Logger>, [...])`` where the third element is a list of zero or more child tuples that share the same layout. """ root = ('', logging.root, []) nodes = {} items = list(logging.root.manager.loggerDict.items()) # for Python 2 and 3 items.sort() for name, logger in items: nodes[name] = node = (name, logger, []) i = name.rfind('.', 0, len(name) - 1) # same formula used in `logging` if i == -1: parent = root else: parent = nodes[name[:i]] parent[2].append(node) return root
()
3,696
mllabreva
list
null
def list(): print("1.linear regression\n2.kmeans\n3.knn\n4.housePriceLinear regression\n5.decision tree\n6.svm\n7.perceptron\n8.nbc\n9.comparision\n10.tensor flow")
()
3,697
mllabreva
make
null
def make(int1): if (int1==1): print("import numpy as np \nimport pandas as pd\nimport pandas as pd\ndf = pd.read_csv('Salary.csv')\nprint(df.to_string())\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nX = df.iloc[:, :-1].values # Features => Years of experience => Independent Variable\ny = df.iloc[:, -1].values # Target => Salary => Dependent Variable\nX\nfrom sklearn.model_selection import train_test_split\nimport sklearn.metrics as sm\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\nfrom sklearn.linear_model import LinearRegression\nmodel = LinearRegression()\nmodel.fit(X_train, y_train)\npredictions = model.predict(X_test)\nimport seaborn as sns\nsns.distplot(predictions-y_test)\nplt.scatter(X_train, y_train, color='red')\nplt.plot(X_train, model.predict(X_train))\nr_sq = model.score(X_train, y_train)\nprint('coefficient of determination:', r_sq)\nprint('intercept:', model.intercept_)\nprint('slope:', model.coef_) \ny_pred = model.predict(X_train)\nprint('y='+str(float(model.coef_))+'X+'+str(float(model.intercept_)))\n") elif(int1==2): print("import pandas as pd\ndf = pd.read_csv('wine-clustering.csv')\ndf.head()\ndf.describe().T\ndf.info()\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.pairplot(df)\nfrom sklearn.cluster import KMeans\nselected_features = df[['OD280', 'Alcohol']]\nkmeans_obj = KMeans(n_clusters=3, random_state=42) \nkmeans_obj.fit(selected_features)\ny_kmeans = kmeans_obj.fit_predict(selected_features)\nprint(y_kmeans)\ncenters = kmeans_obj.cluster_centers_\nprint(centers)\nsns.scatterplot(x = selected_features['OD280'], y = selected_features['Alcohol'], hue=kmeans_obj.labels_)\nplt.scatter(kmeans_obj.cluster_centers_[:, 0], kmeans_obj.cluster_centers_[:, 1], s=200, c='red')\n") elif(int1==3): print("import numpy as np\nimport pandas as pd\ndata = pd.read_csv('breast-cancer-wisconsin-data_data.csv')\ndata.head()\ndata.columns\ndata = data.drop(['id', 'Unnamed: 32'], axis = 1)\ndata.shape\ndata.describe()\ndata.info()\ndata.columns\nX = data.loc[:, ['radius_mean', 'texture_mean', 'perimeter_mean','area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean','concave points_mean', 'symmetry_mean', 'fractal_dimension_mean','radius_se', 'texture_se', 'perimeter_se', 'area_se', 'smoothness_se','compactness_se', 'concavity_se', 'concave points_se', 'symmetry_se','fractal_dimension_se', 'radius_worst', 'texture_worst','perimeter_worst', 'area_worst', 'smoothness_worst','compactness_worst', 'concavity_worst', 'concave points_worst','symmetry_worst', 'fractal_dimension_worst']]\ny = data.loc[:, 'diagnosis']\nX.head()\ny.head()\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\nfrom sklearn.neighbors import KNeighborsClassifier\nknn_cfr = KNeighborsClassifier(n_neighbors=3)\nknn_cfr.fit(X_train, y_train)\ny_pred = knn_cfr.predict(X_test)\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test, y_pred)\n") elif(int1==4): print("import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ndata = pd.read_csv('kc_house_data.csv')\ndata.head()\ndata = data.drop(['id', 'date'], axis = 1)\ndata.head()\ndata.describe()\ndata['bedrooms'].value_counts().plot(kind='bar')\nplt.title('number of Bedroom')\nplt.xlabel('Bedrooms')\nplt.ylabel('Count')\nsns.despine()\nplt.figure(figsize=(10,10))\nsns.jointplot(x=data.lat.values, y=data.long.values, height=10)\nplt.ylabel('Longitude',fontsize=12)\nplt.xlabel('Latitude',fontsize=12)\nsns.despine()\nplt.show()\nplt.scatter(data.price,data.sqft_living)\nplt.title('Price vs Square Feet')\nplt.scatter(data.price,data.long)\nplt.title('Price vs Location of the area')\nplt.scatter(data.price,data.lat)\nplt.xlabel('Price')\nplt.ylabel('Latitude')\nplt.title('Latitude vs Price')\nplt.scatter(data.bedrooms,data.price)\nplt.title('Bedroom and Price ')\nplt.xlabel('Bedrooms')\nplt.ylabel('Price')\nsns.despine()\nplt.show()\nplt.scatter((data['sqft_living']+data['sqft_basement']),data['price'])\nplt.scatter(data.waterfront,data.price)\nplt.title('Waterfront vs Price ( 0= no waterfront)')\ny = data['price']\nX = data.drop(['price'],axis=1)\nfrom sklearn.model_selection import train_test_split\nx_train , x_test , y_train , y_test = train_test_split(X , y , test_size = 0.10,random_state =2)\nfrom sklearn.linear_model import LinearRegression\nreg = LinearRegression()\nreg.fit(x_train,y_train)\nreg.score(x_test,y_test)\n") elif(int1==5): print("import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\ndf = pd.read_csv('adult.csv')\ndf.head()\ndf.columns\ndf.shape\ndf.isin(['?']).sum()\ndf['workclass'] = df['workclass'].replace('?', np.nan)\ndf['occupation'] = df['occupation'].replace('?', np.nan)\ndf['native-country'] = df['native-country'].replace('?', np.nan)\ndf.isin(['?']).sum()\ndf.isnull().sum()\ndf.dropna(how='any', inplace=True)\nprint(f'There are {df.duplicated().sum()} duplicate values')\ndf = df.drop_duplicates()\ndf.shape\ndf.columns\ndf.drop(['fnlwgt','educational-num','marital-status','relationship', 'race',], axis = 1, inplace = True)\ndf.columns\nX = df.loc[:,['age', 'workclass', 'education', 'occupation', 'gender', 'capital-gain','capital-loss', 'hours-per-week', 'native-country']]\ny = df.loc[:,'income']\nX.head()\ny.head()\nfrom sklearn.preprocessing import LabelEncoder\ny = LabelEncoder().fit_transform(y)\ny = pd.DataFrame(y)\ny.head()\nnumeric_features = X.select_dtypes('number')\ncategorical_features = X.select_dtypes('object')\ncategorical_features\nnumeric_features\nconverted_categorical_features = pd.get_dummies(categorical_features)\nconverted_categorical_features.shape\nall_features = [converted_categorical_features, numeric_features]\nnewX = pd.concat(all_features,axis=1, join='inner')\nnewX.shape\nnewX.columns\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(newX, y, test_size=0.33, random_state=42)\nfrom sklearn.tree import DecisionTreeClassifier\nclf = DecisionTreeClassifier(max_depth=5)\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\ny_test.shape\ny_pred.shape\npredictions_df = pd.DataFrame()\npredictions_df['precdicted_salary_class'] = y_pred\npredictions_df['actual_salary_class'] = y_test[0].values\npredictions_df\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_pred,y_test))\nfrom sklearn.tree import plot_tree\nimport matplotlib.pyplot as plt\nplt.figure(figsize=(14,14))\nplot_tree(clf, fontsize=10, filled=True)\nplt.title('Decision tree trained on the selected features')\nplt.show()\n") elif(int1==6): print("import numpy as np\nimport pandas as pd\ndf = pd.read_csv('smaller_adult.csv')\ndf.head()\ndf.columns\ndf.shape\ndf.info()\ndf.describe()\ndf.isin(['?']).sum()\ndf.columns\ndf['workclass'] = df['workclass'].replace('?', np.nan)\ndf['occupation'] = df['occupation'].replace('?', np.nan)\ndf.isin(['?']).sum()\ndf.isnull().sum()\ndf.dropna(how='any', inplace=True)\nprint(f'There are {df.duplicated().sum()} duplicate values')\ndf = df.drop_duplicates()\ndf.shape\ndf.columns\nX = df.loc[:,['age', 'workclass', 'educational-num', 'occupation', 'gender', 'hours-per-week']]\ny = df.loc[:,'income']\nfrom sklearn.preprocessing import LabelEncoder\ny = LabelEncoder().fit_transform(y)\ny = pd.DataFrame(y)\ny.head()\nnumeric_features = X.select_dtypes('number')\ncategorical_features = X.select_dtypes('object')\ncategorical_features\nnumeric_features\nconverted_categorical_features = pd.get_dummies(categorical_features)\nconverted_categorical_features.shape\nall_features = [converted_categorical_features, numeric_features]\nnewX = pd.concat(all_features,axis=1, join='inner')\nnewX.shape\nnewX.columns\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(newX, y, test_size=0.33, random_state=42)\nfrom sklearn.svm import SVC\nclf = SVC(kernel='linear', gamma = 'auto')\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\npredictions_df = pd.DataFrame()\npredictions_df['precdicted_salary_class'] = y_pred\npredictions_df['actual_salary_class'] = y_test[0].values\npredictions_df\nfrom sklearn.metrics import accuracy_score\nprint(accuracy_score(y_pred,y_test))\n") elif(int1==7): print("import numpy as np\nimport pandas as pd\ndf = pd.read_csv('breast-cancer-wisconsin-data_data.csv')\ndf.head()\ndf.shape\ndf = df.drop(['id', 'Unnamed: 32'], axis = 1)\ndf.columns\ndf.describe()\ndf.info()\nX = df.loc[:, ['radius_mean', 'texture_mean', 'perimeter_mean','area_mean', 'smoothness_mean', 'compactness_mean', 'concavity_mean','concave points_mean', 'symmetry_mean', 'fractal_dimension_mean','radius_se', 'texture_se', 'perimeter_se', 'area_se', 'smoothness_se','compactness_se', 'concavity_se', 'concave points_se', 'symmetry_se','fractal_dimension_se', 'radius_worst', 'texture_worst','perimeter_worst', 'area_worst', 'smoothness_worst','compactness_worst', 'concavity_worst', 'concave points_worst','symmetry_worst', 'fractal_dimension_worst']]\ny = df.loc[:, 'diagnosis']\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\nfrom sklearn.neural_network import MLPClassifier\nclf = MLPClassifier(random_state=1, max_iter=300).fit(X_train, y_train)\ny_pred = clf.predict(X_test)\ny_pred\nfrom sklearn.metrics import accuracy_score\naccuracy_score(y_test, y_pred)\n") elif(int1==8): print("from sklearn.datasets import load_iris\nfrom sklearn.datasets import load_diabetes\nfrom sklearn import metrics\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\niris = load_iris()\nX = iris.data\ny = iris.target\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=1)\ngnb = GaussianNB()\ngnb.fit(X_train, y_train)\ny_pred = gnb.predict(X_test)\nfrom sklearn import metrics\nprint('Gaussian Naive Bayes model accuracy(in %):', metrics.accuracy_score(y_test, y_pred)*100)\n") elif(int1==9): print("import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import GaussianNB\nnames = ['K-Nearest Neighbors','Linear SVM','Decision Tree','Multilayer Perceptron','Gaussian Naive Bayes']\nclassifiers = [KNeighborsClassifier(3),SVC(kernel='linear', C=0.025),DecisionTreeClassifier(max_depth=5),MLPClassifier(alpha=1, max_iter=1000),GaussianNB(),]\ndf = pd.read_csv('Iris.csv')\ndf.head()\ndf = df.drop('Id', axis = 1)\ndf.head()\nX= df.iloc[:, :-1]\nX.head()\ny = df.iloc[:, -1]\ny.head()\nfrom sklearn.preprocessing import LabelEncoder\ny = LabelEncoder().fit_transform(y)\ny = pd.DataFrame(y)\ny.head()\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\nfor name, clf in zip(names, classifiers):\n clf.fit(X_train, y_train.values.ravel())\n score = clf.score(X_test, y_test)\n print('Classifier Name: ', name, 'Score: ', score)\n") elif(int1==10): print("import pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom tensorflow.keras import Sequential\nfrom tensorflow.keras.layers import Dense\ndf = pd.read_csv('BankNote_Authentication.csv')\ndf.head()\nX = df.values[:, :-1]\ny = df.values[:, -1]\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)\nmodel = Sequential()\nmodel.add(Dense(7, input_shape=(X.shape[1],)))\nmodel.add(Dense(1, activation = 'sigmoid'))\nmodel.compile(optimizer='adam',loss='binary_crossentropy')\nmodel.fit(X_train, y_train, epochs=30, batch_size=32)\ny_pred = model.predict(X_test)\ny_pred\ny_pred = (y_pred>0.5).flatten().astype(int)\ny_pred\nprint(accuracy_score(y_test, y_pred))\n")
(int1)
3,698
springserve
API
Get the raw API object. This is rarely used directly by a client of this library, but it used as an internal function
def API(reauth=False): """ Get the raw API object. This is rarely used directly by a client of this library, but it used as an internal function """ global _API, _ACCOUNT, _CONFIG_OVERRIDE, _TOKEN_OVERRIDE if _API is None or reauth: _msg.debug("authenticating to springserve") try: if _ACCOUNT: _API = _lnk("springserve.{}".format(_ACCOUNT)) elif _CONFIG_OVERRIDE: _API = SpringServeAPI(**_CONFIG_OVERRIDE) elif _TOKEN_OVERRIDE: _API = SpringServeAPITokenOverride(**_TOKEN_OVERRIDE) else: try: _API = _lnk("springserve.{}".format("__default__")) except: # this is to keep backwards compatiblity _API = _lnk.springserve _API.headers.update({'springserve-sdk': __version__}) except Exception as e: raise Exception("""Error authenticating: check your link.config to make sure your username, password and url are correct: {}""".format(e)) return _API
(reauth=False)
3,699
requests_toolbelt.multipart.encoder
MultipartEncoder
The ``MultipartEncoder`` object is a generic interface to the engine that will create a ``multipart/form-data`` body for you. The basic usage is: .. code-block:: python import requests from requests_toolbelt import MultipartEncoder encoder = MultipartEncoder({'field': 'value', 'other_field': 'other_value'}) r = requests.post('https://httpbin.org/post', data=encoder, headers={'Content-Type': encoder.content_type}) If you do not need to take advantage of streaming the post body, you can also do: .. code-block:: python r = requests.post('https://httpbin.org/post', data=encoder.to_string(), headers={'Content-Type': encoder.content_type}) If you want the encoder to use a specific order, you can use an OrderedDict or more simply, a list of tuples: .. code-block:: python encoder = MultipartEncoder([('field', 'value'), ('other_field', 'other_value')]) .. versionchanged:: 0.4.0 You can also provide tuples as part values as you would provide them to requests' ``files`` parameter. .. code-block:: python encoder = MultipartEncoder({ 'field': ('file_name', b'{"a": "b"}', 'application/json', {'X-My-Header': 'my-value'}) ]) .. warning:: This object will end up directly in :mod:`httplib`. Currently, :mod:`httplib` has a hard-coded read size of **8192 bytes**. This means that it will loop until the file has been read and your upload could take a while. This is **not** a bug in requests. A feature is being considered for this object to allow you, the user, to specify what size should be returned on a read. If you have opinions on this, please weigh in on `this issue`_. .. _this issue: https://github.com/requests/toolbelt/issues/75
class MultipartEncoder(object): """ The ``MultipartEncoder`` object is a generic interface to the engine that will create a ``multipart/form-data`` body for you. The basic usage is: .. code-block:: python import requests from requests_toolbelt import MultipartEncoder encoder = MultipartEncoder({'field': 'value', 'other_field': 'other_value'}) r = requests.post('https://httpbin.org/post', data=encoder, headers={'Content-Type': encoder.content_type}) If you do not need to take advantage of streaming the post body, you can also do: .. code-block:: python r = requests.post('https://httpbin.org/post', data=encoder.to_string(), headers={'Content-Type': encoder.content_type}) If you want the encoder to use a specific order, you can use an OrderedDict or more simply, a list of tuples: .. code-block:: python encoder = MultipartEncoder([('field', 'value'), ('other_field', 'other_value')]) .. versionchanged:: 0.4.0 You can also provide tuples as part values as you would provide them to requests' ``files`` parameter. .. code-block:: python encoder = MultipartEncoder({ 'field': ('file_name', b'{"a": "b"}', 'application/json', {'X-My-Header': 'my-value'}) ]) .. warning:: This object will end up directly in :mod:`httplib`. Currently, :mod:`httplib` has a hard-coded read size of **8192 bytes**. This means that it will loop until the file has been read and your upload could take a while. This is **not** a bug in requests. A feature is being considered for this object to allow you, the user, to specify what size should be returned on a read. If you have opinions on this, please weigh in on `this issue`_. .. _this issue: https://github.com/requests/toolbelt/issues/75 """ def __init__(self, fields, boundary=None, encoding='utf-8'): #: Boundary value either passed in by the user or created self.boundary_value = boundary or uuid4().hex # Computed boundary self.boundary = '--{}'.format(self.boundary_value) #: Encoding of the data being passed in self.encoding = encoding # Pre-encoded boundary self._encoded_boundary = b''.join([ encode_with(self.boundary, self.encoding), encode_with('\r\n', self.encoding) ]) #: Fields provided by the user self.fields = fields #: Whether or not the encoder is finished self.finished = False #: Pre-computed parts of the upload self.parts = [] # Pre-computed parts iterator self._iter_parts = iter([]) # The part we're currently working with self._current_part = None # Cached computation of the body's length self._len = None # Our buffer self._buffer = CustomBytesIO(encoding=encoding) # Pre-compute each part's headers self._prepare_parts() # Load boundary into buffer self._write_boundary() @property def len(self): """Length of the multipart/form-data body. requests will first attempt to get the length of the body by calling ``len(body)`` and then by checking for the ``len`` attribute. On 32-bit systems, the ``__len__`` method cannot return anything larger than an integer (in C) can hold. If the total size of the body is even slightly larger than 4GB users will see an OverflowError. This manifested itself in `bug #80`_. As such, we now calculate the length lazily as a property. .. _bug #80: https://github.com/requests/toolbelt/issues/80 """ # If _len isn't already calculated, calculate, return, and set it return self._len or self._calculate_length() def __repr__(self): return '<MultipartEncoder: {!r}>'.format(self.fields) def _calculate_length(self): """ This uses the parts to calculate the length of the body. This returns the calculated length so __len__ can be lazy. """ boundary_len = len(self.boundary) # Length of --{boundary} # boundary length + header length + body length + len('\r\n') * 2 self._len = sum( (boundary_len + total_len(p) + 4) for p in self.parts ) + boundary_len + 4 return self._len def _calculate_load_amount(self, read_size): """This calculates how many bytes need to be added to the buffer. When a consumer read's ``x`` from the buffer, there are two cases to satisfy: 1. Enough data in the buffer to return the requested amount 2. Not enough data This function uses the amount of unread bytes in the buffer and determines how much the Encoder has to load before it can return the requested amount of bytes. :param int read_size: the number of bytes the consumer requests :returns: int -- the number of bytes that must be loaded into the buffer before the read can be satisfied. This will be strictly non-negative """ amount = read_size - total_len(self._buffer) return amount if amount > 0 else 0 def _load(self, amount): """Load ``amount`` number of bytes into the buffer.""" self._buffer.smart_truncate() part = self._current_part or self._next_part() while amount == -1 or amount > 0: written = 0 if part and not part.bytes_left_to_write(): written += self._write(b'\r\n') written += self._write_boundary() part = self._next_part() if not part: written += self._write_closing_boundary() self.finished = True break written += part.write_to(self._buffer, amount) if amount != -1: amount -= written def _next_part(self): try: p = self._current_part = next(self._iter_parts) except StopIteration: p = None return p def _iter_fields(self): _fields = self.fields if hasattr(self.fields, 'items'): _fields = list(self.fields.items()) for k, v in _fields: file_name = None file_type = None file_headers = None if isinstance(v, (list, tuple)): if len(v) == 2: file_name, file_pointer = v elif len(v) == 3: file_name, file_pointer, file_type = v else: file_name, file_pointer, file_type, file_headers = v else: file_pointer = v field = fields.RequestField(name=k, data=file_pointer, filename=file_name, headers=file_headers) field.make_multipart(content_type=file_type) yield field def _prepare_parts(self): """This uses the fields provided by the user and creates Part objects. It populates the `parts` attribute and uses that to create a generator for iteration. """ enc = self.encoding self.parts = [Part.from_field(f, enc) for f in self._iter_fields()] self._iter_parts = iter(self.parts) def _write(self, bytes_to_write): """Write the bytes to the end of the buffer. :param bytes bytes_to_write: byte-string (or bytearray) to append to the buffer :returns: int -- the number of bytes written """ return self._buffer.append(bytes_to_write) def _write_boundary(self): """Write the boundary to the end of the buffer.""" return self._write(self._encoded_boundary) def _write_closing_boundary(self): """Write the bytes necessary to finish a multipart/form-data body.""" with reset(self._buffer): self._buffer.seek(-2, 2) self._buffer.write(b'--\r\n') return 2 def _write_headers(self, headers): """Write the current part's headers to the buffer.""" return self._write(encode_with(headers, self.encoding)) @property def content_type(self): return str( 'multipart/form-data; boundary={}'.format(self.boundary_value) ) def to_string(self): """Return the entirety of the data in the encoder. .. note:: This simply reads all of the data it can. If you have started streaming or reading data from the encoder, this method will only return whatever data is left in the encoder. .. note:: This method affects the internal state of the encoder. Calling this method will exhaust the encoder. :returns: the multipart message :rtype: bytes """ return self.read() def read(self, size=-1): """Read data from the streaming encoder. :param int size: (optional), If provided, ``read`` will return exactly that many bytes. If it is not provided, it will return the remaining bytes. :returns: bytes """ if self.finished: return self._buffer.read(size) bytes_to_load = size if bytes_to_load != -1 and bytes_to_load is not None: bytes_to_load = self._calculate_load_amount(int(size)) self._load(bytes_to_load) return self._buffer.read(size)
(fields, boundary=None, encoding='utf-8')
3,700
requests_toolbelt.multipart.encoder
__init__
null
def __init__(self, fields, boundary=None, encoding='utf-8'): #: Boundary value either passed in by the user or created self.boundary_value = boundary or uuid4().hex # Computed boundary self.boundary = '--{}'.format(self.boundary_value) #: Encoding of the data being passed in self.encoding = encoding # Pre-encoded boundary self._encoded_boundary = b''.join([ encode_with(self.boundary, self.encoding), encode_with('\r\n', self.encoding) ]) #: Fields provided by the user self.fields = fields #: Whether or not the encoder is finished self.finished = False #: Pre-computed parts of the upload self.parts = [] # Pre-computed parts iterator self._iter_parts = iter([]) # The part we're currently working with self._current_part = None # Cached computation of the body's length self._len = None # Our buffer self._buffer = CustomBytesIO(encoding=encoding) # Pre-compute each part's headers self._prepare_parts() # Load boundary into buffer self._write_boundary()
(self, fields, boundary=None, encoding='utf-8')
3,701
requests_toolbelt.multipart.encoder
__repr__
null
def __repr__(self): return '<MultipartEncoder: {!r}>'.format(self.fields)
(self)
3,702
requests_toolbelt.multipart.encoder
_calculate_length
This uses the parts to calculate the length of the body. This returns the calculated length so __len__ can be lazy.
def _calculate_length(self): """ This uses the parts to calculate the length of the body. This returns the calculated length so __len__ can be lazy. """ boundary_len = len(self.boundary) # Length of --{boundary} # boundary length + header length + body length + len('\r\n') * 2 self._len = sum( (boundary_len + total_len(p) + 4) for p in self.parts ) + boundary_len + 4 return self._len
(self)
3,703
requests_toolbelt.multipart.encoder
_calculate_load_amount
This calculates how many bytes need to be added to the buffer. When a consumer read's ``x`` from the buffer, there are two cases to satisfy: 1. Enough data in the buffer to return the requested amount 2. Not enough data This function uses the amount of unread bytes in the buffer and determines how much the Encoder has to load before it can return the requested amount of bytes. :param int read_size: the number of bytes the consumer requests :returns: int -- the number of bytes that must be loaded into the buffer before the read can be satisfied. This will be strictly non-negative
def _calculate_load_amount(self, read_size): """This calculates how many bytes need to be added to the buffer. When a consumer read's ``x`` from the buffer, there are two cases to satisfy: 1. Enough data in the buffer to return the requested amount 2. Not enough data This function uses the amount of unread bytes in the buffer and determines how much the Encoder has to load before it can return the requested amount of bytes. :param int read_size: the number of bytes the consumer requests :returns: int -- the number of bytes that must be loaded into the buffer before the read can be satisfied. This will be strictly non-negative """ amount = read_size - total_len(self._buffer) return amount if amount > 0 else 0
(self, read_size)
3,704
requests_toolbelt.multipart.encoder
_iter_fields
null
def _iter_fields(self): _fields = self.fields if hasattr(self.fields, 'items'): _fields = list(self.fields.items()) for k, v in _fields: file_name = None file_type = None file_headers = None if isinstance(v, (list, tuple)): if len(v) == 2: file_name, file_pointer = v elif len(v) == 3: file_name, file_pointer, file_type = v else: file_name, file_pointer, file_type, file_headers = v else: file_pointer = v field = fields.RequestField(name=k, data=file_pointer, filename=file_name, headers=file_headers) field.make_multipart(content_type=file_type) yield field
(self)
3,705
requests_toolbelt.multipart.encoder
_load
Load ``amount`` number of bytes into the buffer.
def _load(self, amount): """Load ``amount`` number of bytes into the buffer.""" self._buffer.smart_truncate() part = self._current_part or self._next_part() while amount == -1 or amount > 0: written = 0 if part and not part.bytes_left_to_write(): written += self._write(b'\r\n') written += self._write_boundary() part = self._next_part() if not part: written += self._write_closing_boundary() self.finished = True break written += part.write_to(self._buffer, amount) if amount != -1: amount -= written
(self, amount)
3,706
requests_toolbelt.multipart.encoder
_next_part
null
def _next_part(self): try: p = self._current_part = next(self._iter_parts) except StopIteration: p = None return p
(self)
3,707
requests_toolbelt.multipart.encoder
_prepare_parts
This uses the fields provided by the user and creates Part objects. It populates the `parts` attribute and uses that to create a generator for iteration.
def _prepare_parts(self): """This uses the fields provided by the user and creates Part objects. It populates the `parts` attribute and uses that to create a generator for iteration. """ enc = self.encoding self.parts = [Part.from_field(f, enc) for f in self._iter_fields()] self._iter_parts = iter(self.parts)
(self)
3,708
requests_toolbelt.multipart.encoder
_write
Write the bytes to the end of the buffer. :param bytes bytes_to_write: byte-string (or bytearray) to append to the buffer :returns: int -- the number of bytes written
def _write(self, bytes_to_write): """Write the bytes to the end of the buffer. :param bytes bytes_to_write: byte-string (or bytearray) to append to the buffer :returns: int -- the number of bytes written """ return self._buffer.append(bytes_to_write)
(self, bytes_to_write)
3,709
requests_toolbelt.multipart.encoder
_write_boundary
Write the boundary to the end of the buffer.
def _write_boundary(self): """Write the boundary to the end of the buffer.""" return self._write(self._encoded_boundary)
(self)
3,710
requests_toolbelt.multipart.encoder
_write_closing_boundary
Write the bytes necessary to finish a multipart/form-data body.
def _write_closing_boundary(self): """Write the bytes necessary to finish a multipart/form-data body.""" with reset(self._buffer): self._buffer.seek(-2, 2) self._buffer.write(b'--\r\n') return 2
(self)
3,711
requests_toolbelt.multipart.encoder
_write_headers
Write the current part's headers to the buffer.
def _write_headers(self, headers): """Write the current part's headers to the buffer.""" return self._write(encode_with(headers, self.encoding))
(self, headers)
3,712
requests_toolbelt.multipart.encoder
read
Read data from the streaming encoder. :param int size: (optional), If provided, ``read`` will return exactly that many bytes. If it is not provided, it will return the remaining bytes. :returns: bytes
def read(self, size=-1): """Read data from the streaming encoder. :param int size: (optional), If provided, ``read`` will return exactly that many bytes. If it is not provided, it will return the remaining bytes. :returns: bytes """ if self.finished: return self._buffer.read(size) bytes_to_load = size if bytes_to_load != -1 and bytes_to_load is not None: bytes_to_load = self._calculate_load_amount(int(size)) self._load(bytes_to_load) return self._buffer.read(size)
(self, size=-1)
3,713
requests_toolbelt.multipart.encoder
to_string
Return the entirety of the data in the encoder. .. note:: This simply reads all of the data it can. If you have started streaming or reading data from the encoder, this method will only return whatever data is left in the encoder. .. note:: This method affects the internal state of the encoder. Calling this method will exhaust the encoder. :returns: the multipart message :rtype: bytes
def to_string(self): """Return the entirety of the data in the encoder. .. note:: This simply reads all of the data it can. If you have started streaming or reading data from the encoder, this method will only return whatever data is left in the encoder. .. note:: This method affects the internal state of the encoder. Calling this method will exhaust the encoder. :returns: the multipart message :rtype: bytes """ return self.read()
(self)
3,714
link.wrappers.springservewrappers
SpringAuth
Does the authentication for Spring requests.
class SpringAuth(AuthBase): """ Does the authentication for Spring requests. """ def __init__(self, token): # setup any auth-related data here self.token = token def __call__(self, r): # modify and return the request r.headers['Authorization'] = self.token return r
(token)
3,715
link.wrappers.springservewrappers
__call__
null
def __call__(self, r): # modify and return the request r.headers['Authorization'] = self.token return r
(self, r)
3,716
link.wrappers.springservewrappers
__init__
null
def __init__(self, token): # setup any auth-related data here self.token = token
(self, token)
3,717
link.wrappers.springservewrappers
SpringServeAPI
Wrap the Spring API
class SpringServeAPI(APIRequestWrapper): """ Wrap the Spring API """ headers = { "Content-Type": "application/json" } def __init__(self, wrap_name=None, base_url=None, user=None, password=None): self._token = None super(SpringServeAPI, self).__init__(wrap_name = wrap_name, base_url=base_url, user=user, password=password, response_wrapper = SpringServeAPIResponseWrapper) def authenticate(self): """ Write a custom auth property where we grab the auth token and put it in the headers """ #it's weird i have to do this here, but the code makes this not simple auth_json={'email':self.user, 'password':self.password} #send a post with no auth. prevents an infinite loop auth_response = self.post('/auth', data = json.dumps(auth_json), auth = None) _token = auth_response.json['token'] self._token = _token self._wrapped.auth = SpringAuth(_token) @property def token(self): """ Returns the token from the api to tell us that we have been logged in """ if not self._token: self._token = self.authenicate().token return self._token
(wrap_name=None, base_url=None, user=None, password=None)
3,718
link.link
__call__
When you call this Callable it will run the command. The command can either be a string to run on the shell or a function to run in python Right now it only supports string commands for the shell
def __call__(self, command=None, wait=True): """ When you call this Callable it will run the command. The command can either be a string to run on the shell or a function to run in python Right now it only supports string commands for the shell """ cmd = command or self.command if cmd: p = Popen(cmd, shell=True) if wait: p.wait() return p
(self, command=None, wait=True)
3,719
link.link
__getattr__
wrap a special object if it exists
def __getattr__(self, name): """ wrap a special object if it exists """ # first look if the Wrapper object itself has it try: return self.__getattribute__(name) except AttributeError as e: pass if self._wrapped is not None: # if it has a getattr then try that out otherwise go to getattribute # TODO: Deeply understand __getattr__ vs __getattribute__. # this might not be correct try: return self._wrapped.__getattr__(name) except AttributeError as e: try: return self._wrapped.__getattribute__(name) except AttributeError as e: raise AttributeError("No Such Attribute in wrapper %s" % name) # then it is trying to unpickle itself and there is no setstate # TODO: Clean this up, it's crazy and any changes cause bugs if name == '__setstate__': raise AttributeError("No such attribute found %s" % name) # call the wrapper to create a new one wrapper = '%s.%s' % (self.wrap_name, name) if self.wrap_name: return lnk(wrapper) raise AttributeError("No such attribute found %s" % name)
(self, name)
3,720
link.link
__getitem__
null
def __getitem__(self, name): return self.__getattr__(name)
(self, name)
3,721
link.wrappers.springservewrappers
__init__
null
def __init__(self, wrap_name=None, base_url=None, user=None, password=None): self._token = None super(SpringServeAPI, self).__init__(wrap_name = wrap_name, base_url=base_url, user=user, password=password, response_wrapper = SpringServeAPIResponseWrapper)
(self, wrap_name=None, base_url=None, user=None, password=None)
3,722
link.wrappers.springservewrappers
authenticate
Write a custom auth property where we grab the auth token and put it in the headers
def authenticate(self): """ Write a custom auth property where we grab the auth token and put it in the headers """ #it's weird i have to do this here, but the code makes this not simple auth_json={'email':self.user, 'password':self.password} #send a post with no auth. prevents an infinite loop auth_response = self.post('/auth', data = json.dumps(auth_json), auth = None) _token = auth_response.json['token'] self._token = _token self._wrapped.auth = SpringAuth(_token)
(self)
3,723
link.wrappers.apiwrappers
clear_session
clears the session and reauths
def clear_session(self): """ clears the session and reauths """ sess = requests.session() sess.headers = self.headers self._wrapped = sess self._wrapped = self.authenticate()
(self)
3,724
link.link
config
null
def config(self): return self.__link_config__
(self)
3,725
link.wrappers.apiwrappers
delete
Make a delete call
def delete(self, url_params='', data='', **kwargs): """ Make a delete call """ return self.request('delete', url_params = url_params, data = data, **kwargs)
(self, url_params='', data='', **kwargs)
3,726
link.wrappers.apiwrappers
get
Make a get call
def get(self, url_params = '', **kwargs): """ Make a get call """ return self.request('get', url_params = url_params, **kwargs)
(self, url_params='', **kwargs)
3,727
link.wrappers.apiwrappers
patch
Make a patch call
def patch(self, url_params='', data='', **kwargs): """ Make a patch call """ return self.request('patch', url_params = url_params, data = data, **kwargs)
(self, url_params='', data='', **kwargs)
3,728
link.wrappers.apiwrappers
post
Make a post call
def post(self, url_params='', data='', **kwargs): """ Make a post call """ return self.request('post', url_params = url_params, data = data, **kwargs)
(self, url_params='', data='', **kwargs)
3,729
link.wrappers.apiwrappers
put
Make a put call
def put(self, url_params='', data='', **kwargs): """ Make a put call """ return self.request('put', url_params = url_params, data = data, **kwargs)
(self, url_params='', data='', **kwargs)
3,730
link.wrappers.apiwrappers
request
Make a request. This is taken care af by the request decorator
def request(self, method='get', url_params = '' , data = '', allow_reauth = True, **kwargs): """ Make a request. This is taken care af by the request decorator """ if isinstance(url_params, dict): #tricky, if its a dictonary turn it into a & delimited key=value url_params = '&'.join([ '%s=%s' % (key, value) for key,value in url_params.items()]) full_url = self.base_url + url_params #turn the string method into a function name _method = self._wrapped.__getattribute__(method) resp = self.response_wrapper(response = _method(full_url, data = data, **kwargs)) #if you get a no auth then retry the auth if allow_reauth and resp.noauth(): self.authenticate() return self.request(method, url_params, data, allow_reauth=False, **kwargs) return resp
(self, method='get', url_params='', data='', allow_reauth=True, **kwargs)
3,731
link.wrappers.apiwrappers
set_session_headers
null
def set_session_headers(self, name, value): self._wrapped.headers[name]=value
(self, name, value)
3,732
springserve
SpringServeAPITokenOverride
Used to inject a token instead of having the SDK do auth
class SpringServeAPITokenOverride(SpringServeAPI): """ Used to inject a token instead of having the SDK do auth """ def __init__(self, base_url=None, user=None, password=None, token=None): self._token = token super(SpringServeAPI, self).__init__(base_url=base_url, user=user, password=password) def authenticate(self): self._wrapped.auth = SpringAuth(self._token)
(base_url=None, user=None, password=None, token=None)
3,736
springserve
__init__
null
def __init__(self, base_url=None, user=None, password=None, token=None): self._token = token super(SpringServeAPI, self).__init__(base_url=base_url, user=user, password=password)
(self, base_url=None, user=None, password=None, token=None)
3,737
springserve
authenticate
null
def authenticate(self): self._wrapped.auth = SpringAuth(self._token)
(self)
3,747
springserve
VDAuthError
null
class VDAuthError(Exception): pass
null
3,748
springserve._account
_AccountAPI
null
class _AccountAPI(_VDAPIService): __API__ = "accounts"
()
3,749
springserve
__init__
null
def __init__(self): self.account_id = None
(self)
3,750
springserve._decorators
wrapped
null
def raw_response_retry(api_call, limit=4, sleep_duration=5, backoff_factor=2): """ Decorator for SpringServe API to handle retries (with exponential backoff) in the case of a rate-limit or 5XX error. Sleep duration and backoff factor control wait time between successive failures, e.g. sleep_duration 3 and backoff_factor 2 means sleep 3s, 6s, 12s, 24s :param int limit: Max number of retry attempts :param int sleep_duration: Initial sleep time :param float/int backoff_factor: Factor to increase sleep between successive retries. """ def wrapped(*args, **kwargs): sleeps = sleep_duration num_attempts = 0 while num_attempts < limit: # make the API call resp = api_call(*args, **kwargs) aws_check = ( # make sure it's the link response object isinstance(resp, SpringServeAPIResponseWrapper) and # HTTP status codes that are valid for retries resp.status_code >= 500 and resp.status_code < 600 and # content matches one of our error messages - note that ELB error # messages will not be JSON (they are HTML strings) so cannot check # resp.json attribute, as this will not always be valid is_resp_in_elb_error_messages(resp) ) rack_attack_check = ( isinstance(resp, SpringServeAPIResponseWrapper) and resp.status_code == RACK_ATTACK_STATUS_CODE and resp.content == RACK_ATTACK_MESSAGE ) if aws_check or rack_attack_check: _msg.warn("Encountered rate-limit (attempt {}), sleeping".format(num_attempts)) num_attempts += 1 time.sleep(sleeps) sleeps *= backoff_factor # call was either successful, or an error outside of the purview of this # handler else: return resp # We've hit max retry attempts, return anyways return resp return wrapped
(*args, **kwargs)
3,752
springserve
_raw_bulk_delete
null
def _raw_bulk_delete(self, data, path_param="", reauth=False, files=None, **query_params): params = _format_params(query_params) if not files: return API(reauth=reauth).delete( _format_url(self.endpoint, path_param), params=params, data=_json.dumps(data) ) m = MultipartEncoder( fields=files ) return API(reauth=reauth).delete( _format_url(self.endpoint, path_param), params=params, headers={'Content-Type': m.content_type}, data=m )
(self, data, path_param='', reauth=False, files=None, **query_params)
3,753
springserve
build_response
null
def build_response(self, api_response, path_params, query_params, payload=''): is_ok = api_response.ok if not is_ok and api_response.status_code == 401: raise VDAuthError("Need to Re-Auth") if api_response.status_code == 204: # this means empty resp_json = {} else: try: resp_json = api_response.json except: resp_json = {"error": "error parsing json response"} if isinstance(resp_json, list): # wrap it in a multi container return self.__RESPONSES_OBJECT__(self, resp_json, path_params, query_params, self.__RESPONSE_OBJECT__, is_ok, payload, self.account_id) return self.__RESPONSE_OBJECT__(self, resp_json, path_params, query_params, is_ok, payload, self.account_id)
(self, api_response, path_params, query_params, payload='')
3,754
springserve
bulk_delete
Delete an object.
def bulk_delete(self, data, path_param="", reauth=False, files=None, **query_params): """ Delete an object. """ global API try: return self.build_response( self._raw_bulk_delete(data, path_param=path_param, reauth=reauth, files=files, **query_params), path_param, query_params ) except VDAuthError as e: # we only retry if we are redo'n on an auto reauth if not reauth: _msg.info("Reauthing and then retry") return self.buck_delete(data, path_param, reauth=True, files=files, **query_params) # means that we had already tried a reauth and it failed raise e
(self, data, path_param='', reauth=False, files=None, **query_params)
3,755
springserve
delete
null
def delete(self, path_param="", reauth=False, **query_params): global API try: params = _format_params(query_params) return self.build_response( API(reauth=reauth).delete( _format_url(self.endpoint, path_param), params=params, ), path_param, query_params ) except VDAuthError as e: # we only retry if we are redo'n on an auto reauth if not reauth: _msg.info("Reauthing and then retry") return self.delete(path_param, reauth=True, **query_params) # means that we had already tried a reauth and it failed raise e
(self, path_param='', reauth=False, **query_params)
3,756
springserve
get
Make a get request to this api service. Allows you to pass in arbitrary query paramaters. Examples:: # get all supply_tags tags = springserve.supply_tags.get() for tag in tags: print tag.id, tag.name # get one supply tag tag = springserve.supply_tag.get(1) print tag.id, tag.name # get by many ids tags = springserve.supply_tags.get(ids=[1,2,3]) # get users that are account_contacts (ie, using query string # params) users = springserve.users.get(account_contact=True)
def get(self, path_param=None, reauth=False, **query_params): """ Make a get request to this api service. Allows you to pass in arbitrary query paramaters. Examples:: # get all supply_tags tags = springserve.supply_tags.get() for tag in tags: print tag.id, tag.name # get one supply tag tag = springserve.supply_tag.get(1) print tag.id, tag.name # get by many ids tags = springserve.supply_tags.get(ids=[1,2,3]) # get users that are account_contacts (ie, using query string # params) users = springserve.users.get(account_contact=True) """ global API try: return self.build_response( self.get_raw(path_param, reauth=reauth, **query_params), path_param, query_params ) except VDAuthError as e: # we only retry if we are redo'n on an auto reauth if not reauth: _msg.info("Reauthing and then retry") return self.get(path_param, reauth=True, **query_params) raise e
(self, path_param=None, reauth=False, **query_params)
3,758
springserve
new
Create a new object. You need to pass in the required fields as a dictionary. For instance:: resp = springserve.domain_lists.new({'name':'My Domain List'}) print resp.ok
def new(self, data, path_param="", reauth=False, **query_params): """ Create a new object. You need to pass in the required fields as a dictionary. For instance:: resp = springserve.domain_lists.new({'name':'My Domain List'}) print resp.ok """ return self.post(data, path_param, reauth, **query_params)
(self, data, path_param='', reauth=False, **query_params)
3,759
springserve
post
null
def post(self, data, path_param="", files=None, reauth=False, **query_params): global API try: return self.build_response( self._post_raw(data, path_param, reauth=reauth, files=files, **query_params), path_param, query_params, payload=data ) except VDAuthError as e: # we only retry if we are redo'n on an auto reauth if not reauth: _msg.info("Reauthing and then retry") return self.post(data, path_param, reauth=True, files=files, **query_params) # means that we had already tried a reauth and it failed raise e
(self, data, path_param='', files=None, reauth=False, **query_params)
3,760
springserve
put
null
def put(self, path_param, data, reauth=False, **query_params): global API try: return self.build_response( self._put_raw(path_param, data, reauth=reauth, **query_params), path_param, query_params, payload=data ) except VDAuthError as e: # we only retry if we are redo'n on an auto reauth if not reauth: _msg.info("Reauthing and then retry") return self.put(path_param, data, reauth=True, **query_params) raise e
(self, path_param, data, reauth=False, **query_params)
3,761
springserve._common
_AdvertiserDomainListAPI
null
class _AdvertiserDomainListAPI(_VDAPIService): __API__ = "advertiser_domain_lists" __RESPONSE_OBJECT__ = _AdvertiserDomainListResponse
()
3,774
springserve._common
_AppBundleListAPI
null
class _AppBundleListAPI(_VDAPIService): __API__ = "app_bundle_lists" __RESPONSE_OBJECT__ = _AppBundleListResponse
()
3,787
springserve._common
_AppNameListAPI
null
class _AppNameListAPI(_VDAPIService): __API__ = "app_name_lists" __RESPONSE_OBJECT__ = _AppNameListResponse
()
3,800
springserve._demand
_AudioCreativeAPI
null
class _AudioCreativeAPI(_VDAPIService): __API__ = "audio_creatives"
()
3,813
springserve._common
_BillAPI
null
class _BillAPI(_VDAPIService): __API__ = "bills" __RESPONSE_OBJECT__ = _BillResponse def bulk_sync(self, ids, reauth=False, **query_params): query_params['ids'] = ','.join(str(x) for x in ids) return self.get('bulk_sync', reauth, **query_params)
()
3,820
springserve._common
bulk_sync
null
def bulk_sync(self, ids, reauth=False, **query_params): query_params['ids'] = ','.join(str(x) for x in ids) return self.get('bulk_sync', reauth, **query_params)
(self, ids, reauth=False, **query_params)
3,827
springserve._demand
_CampaignAPI
null
class _CampaignAPI(_VDAPIService): __API__ = "campaigns"
()
3,840
springserve._common
_ChannelIdListAPI
null
class _ChannelIdListAPI(_VDAPIService): __API__ = "channel_id_lists" __RESPONSE_OBJECT__ = _ChannelIdListResponse
()
3,853
springserve._demand
_ConnectedDemandAPI
null
class _ConnectedDemandAPI(_VDAPIService): __RESPONSE_OBJECT__ = _DemandTagResponse __API__ = "connected_demand"
()
3,866
springserve._supply
_ConnectedSupplyAPI
null
class _ConnectedSupplyAPI(_VDAPIService): __API__ = "connected_supply"
()
3,879
springserve._demand
_CreativeAPI
null
class _CreativeAPI(_VDAPIService): __API__ = "creatives"
()
3,892
springserve._common
_DealIdListAPI
null
class _DealIdListAPI(_VDAPIService): __API__ = "deal_id_lists" __RESPONSE_OBJECT__ = _DealIdListResponse
()
3,905
springserve._demand
_DemandLabelAPI
null
class _DemandLabelAPI(_VDAPIService): __API__ = "demand_labels"
()
3,918
springserve._demand
_DemandPartnerAPI
null
class _DemandPartnerAPI(_VDAPIService): __API__ = "demand_partners"
()
3,931
springserve._demand
_DemandTagAPI
null
class _DemandTagAPI(_VDAPIService): __RESPONSE_OBJECT__ = _DemandTagResponse __API__ = "demand_tags"
()
3,944
springserve._direct_connect
_DirectConnectionAPI
null
class _DirectConnectionAPI(_VDAPIService): __API__ = "direct_connections"
()
3,957
springserve._common
_DomainListAPI
null
class _DomainListAPI(_VDAPIService): __API__ = "domain_lists" __RESPONSE_OBJECT__ = _DomainListResponse
()
3,970
springserve._common
_IpListAPI
null
class _IpListAPI(_VDAPIService): __API__ = "ip_lists" __RESPONSE_OBJECT__ = _IpListResponse
()
3,983
springserve._common
_KeyAPI
null
class _KeyAPI(_VDAPIService): __API__ = "keys" __RESPONSE_OBJECT__ = _KeyResponse
()
3,996
springserve._object_change_messages
_ObjectChangeMessagesAPI
null
class _ObjectChangeMessagesAPI(_VDAPIService): __API__ = "object_change_messages"
()
4,009
springserve._common
_ParameterListAPI
null
class _ParameterListAPI(_VDAPIService): __API__ = "parameter_lists" __RESPONSE_OBJECT__ = _ParameterListResponse
()
4,022
springserve._common
_PlacementIdListAPI
null
class _PlacementIdListAPI(_VDAPIService): __API__ = "placement_id_lists" __RESPONSE_OBJECT__ = _PlacementIdListResponse
()