docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Read DFA accepted states from flex compiled file Args: None Returns: list: The list of accepted states
def _read_accept_states(self): states = [] i = 0 regex = re.compile('[ \t\n\r:,]+') found = 0 # For maintaining the state of yy_accept declaration state = 0 # For maintaining the state of opening and closing tag of yy_accept mapping = [] # For writing each set of yy_accept cur_line = None with open(self.outfile) as flex_file: for cur_line in flex_file: if cur_line[0:37] == "static yyconst flex_int16_t yy_accept" or cur_line[0:35] == "static const flex_int16_t yy_accept": found = 1 continue if found == 1: # print x if state == 0 and cur_line[0:5] == " {": mapping.append(0) # there is always a zero there state = 1 continue if state == 1: if cur_line[0:7] != " } ;": cur_line = "".join(cur_line.split()) if cur_line == '': continue if cur_line[cur_line.__len__() - 1] == ',': splitted_line = regex.split( cur_line[:cur_line.__len__() - 1]) else: splitted_line = regex.split(cur_line) mapping = mapping + splitted_line continue else: cleared = [] for j in mapping: cleared.append(int(j)) max_value = max(cleared) for i in range(0, len(cleared)): if cleared[i] > 0 and cleared[ i] < (max_value - 1): states.append(i) return states return []
913,102
This function adds a sing state in the total states Args: states (list): The current states Returns: None
def _add_sink_state(self, states): cleared = [] for i in range(0, 128): cleared.append(-1) states.append(cleared)
913,104
This function creates the delta transition Args: startState (int): Initial state of automaton Results: int, func: A number indicating the total states, and the delta function
def _create_delta(self): states = self._read_transitions() total_states = len(states) self._add_sink_state(states) nulltrans = self._read_null_transitions() def delta(current_state, character): if character != '': newstate = states[current_state][ord(character)] if newstate > 0: return newstate else: return total_states else: return nulltrans[current_state] return total_states + 1, delta
913,105
Convert text values into integer values. Args: value (str or int): The value to coerce. Raises: TypeError: If the value is not an int or string. ValueError: If the value is not int or an acceptable value. Returns: int: The integer value represented.
def coerce(self, value): if isinstance(value, int) or isinstance(value, compat.long): return value return int(value)
913,115
find an instance Create a new instance and populate it with data stored if it exists. Args: instance_id (str): UUID of the instance Returns: AtlasServiceInstance.Instance: An instance
def find(self, instance_id): instance = AtlasServiceInstance.Instance(instance_id, self.backend) self.backend.storage.populate(instance) return instance
913,598
Create the instance Args: instance (AtlasServiceInstance.Instance): Existing or New instance parameters (dict): Parameters for the instance existing (bool): Create an instance on an existing Atlas cluster Returns: ProvisionedServiceSpec: Status Raises: ErrInstanceAlreadyExists: If instance exists but with different parameters ErrClusterNotFound: Cluster does not exist
def create(self, instance, parameters, existing): if not instance.isProvisioned(): # Set parameters instance.parameters = parameters # Existing cluster if existing and not self.backend.atlas.Clusters.is_existing_cluster(instance.parameters[self.backend.config.PARAMETER_CLUSTER]): # We need to use an existing cluster that is not available ! raise ErrClusterNotFound(instance.parameters[self.backend.config.PARAMETER_CLUSTER]) elif not existing: # We need to create a new cluster # We should not reach this code because the AtlasBroker.provision should # raise an ErrPlanUnsupported before. raise NotImplementedError() result = self.backend.storage.store(instance) # Provision done return ProvisionedServiceSpec(ProvisionState.SUCCESSFUL_CREATED, "", str(result)) elif instance.parameters == parameters: # Identical so nothing to do return ProvisionedServiceSpec(ProvisionState.IDENTICAL_ALREADY_EXISTS, "", "duplicate") else: # Different parameters ... raise ErrInstanceAlreadyExists()
913,599
Delete the instance Args: instance (AtlasServiceInstance.Instance): an existing instance Returns: DeprovisionServiceSpec: Status
def delete(self, instance): #TODO: Really drop the database based on a policy set in `instance.parameters`. # # We need : # - Set a policy in parameters of the instance (eg: policy-on-delete : retain|drop => default to retain) # - to check that the database name `instance.get_dbname()` is not in use by another instance (shared database) # - credential on the Atlas cluster `instance.get_cluster()` to drop the database # self.backend.storage.remove(instance) return DeprovisionServiceSpec(False, "done")
913,600
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Ping = channel.unary_unary( '/processor.Processor/Ping', request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, response_deserializer=processor__pb2.Status.FromString, ) self.Process = channel.unary_unary( '/processor.Processor/Process', request_serializer=message__pb2.Message.SerializeToString, response_deserializer=message__pb2.Message.FromString, )
913,738
Create a bitarray object that stores its components by reference). Args: *components: Any number of bitarray instances to store in this composition.
def __init__(self, component1=None, component2=None): if component1 is None and component2 is not None: component1 = component2 component2 = None self._llhead = None self._lltail = None #self._length = 0 #self._offset = 0 if isinstance(component1, CompositeBitarray): self._llhead = component1._llhead self._lltail = component1._lltail self._offset = component1._offset self._tailbitsused = component1._tailbitsused self._length = len(component1) else: self._llhead = self._lltail = _DLLNode(component1) self._offset = 0 self._tailbitsused = len(component1) self._length = self._tailbitsused if component2 is not None: oldtail = self._lltail if isinstance(component2, CompositeBitarray): if self._lltail is component2._llhead: if self._tail_end != component2._offset: raise ProteusDataJoinError() if component2._is_single_llnode: self._tailbitsused += component2._tailbitsused else: self._tailbitsused = component2._tailbitsused self._lltail = component2._lltail self._length += len(component2) elif self._lltail.next is component2._llhead and\ self._tailoffset == 0 and\ component2._offset == 0: self._lltail = component2._lltail self._tailbitsused = component2._tailbitsused self._length += len(component2) elif component2._llhead.prev is not None or\ self._lltail.next is not None or\ component2._offset or self._tailoffset or\ self._llhead is component2._lltail: #Will not catch everything. Good enough to #prevent most accidents. A 'perfect' version #would require walking the whole tree. No way. raise ProteusDataJoinError() else: self._length += len(component2) self._lltail.next = component2._llhead self._lltail = component2._lltail self._tailbitsused = component2._tailbitsused else: if self._tailoffset or self._lltail.next is not None: raise ProteusDataJoinError() self._tailbitsused = len(component2) self._length += self._tailbitsused node = _DLLNode(component2) node.prev = self._lltail self._lltail = node #WHEN IT IS OK TO MERGE #oldtail can merge right if (oldtail is not head or offset is 0) and (oldtail.next is not tail or tailbitsused is len of node) and data is combinable. Do it recursive? #Merging can happen right until can't. Move back node and merge until can't. Repeat till new left node is incompatible. #if merging with the tail node, the tail node is fully used #Merge will start at seam, or have nothing to do. if oldtail is not self._llhead or self._offset == 0: self._do_merge(oldtail)
913,832
Get the number of bits in the array with the specified value. Args: val: A boolean value to check against the array's value. Returns: An integer of the number of bits in the array equal to val.
def count(self, val=True): return sum((elem.count(val) for elem in self._iter_components()))
913,841
Populate Query mongo to get information about the obj if it exists Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Raises: ErrStorageTypeUnsupported: Type unsupported. ErrStorageMongoConnection: Error during MongoDB communication.
def populate(self, obj): # query if type(obj) is AtlasServiceInstance.Instance: query = { "instance_id" : obj.instance_id, "binding_id" : { "$exists" : False } } elif type(obj) is AtlasServiceBinding.Binding: query = { "binding_id" : obj.binding_id, "instance_id" : obj.instance.instance_id } else: raise ErrStorageTypeUnsupported(type(obj)) # find try: result = self.broker.find_one(query) except: raise ErrStorageMongoConnection("Populate Instance or Binding") if result is not None: obj.parameters = result["parameters"] # Flags the obj to provisioned obj.provisioned = True else: # New obj.provisioned = False
914,453
Store Store an object into the MongoDB storage for caching Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Returns: ObjectId: MongoDB _id Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageTypeUnsupported: Type unsupported. ErrStorageStore : Failed to store the binding or instance.
def store(self, obj): # query if type(obj) is AtlasServiceInstance.Instance: query = { "instance_id" : obj.instance_id, "database" : obj.get_dbname(), "cluster": obj.get_cluster(), "parameters" : obj.parameters } elif type(obj) is AtlasServiceBinding.Binding: query = { "binding_id" : obj.binding_id, "parameters" : obj.parameters, "instance_id": obj.instance.instance_id } else: raise ErrStorageTypeUnsupported(type(obj)) # insert try: result = self.broker.insert_one(query) except: raise ErrStorageMongoConnection("Store Instance or Binding") if result is not None: # Flags the obj to provisioned obj.provisioned = True return result.inserted_id raise ErrStorageStore()
914,454
Remove Remove an object from the MongoDB storage for caching Args: obj (AtlasServiceBinding.Binding or AtlasServiceInstance.Instance): instance or binding Raises: ErrStorageTypeUnsupported: Type unsupported.
def remove(self, obj): if type(obj) is AtlasServiceInstance.Instance: self.remove_instance(obj) elif type(obj) is AtlasServiceBinding.Binding: self.remove_binding(obj) else: raise ErrStorageTypeUnsupported(type(obj))
914,455
Remove an instance Remove an object from the MongoDB storage for caching Args: instance (AtlasServiceInstance.Instance): instance Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageRemoveInstance: Failed to remove the instance.
def remove_instance(self, instance): # query query = { "instance_id" : instance.instance_id, "binding_id" : { "$exists" : False } } # delete the instance try: result = self.broker.delete_one(query) except: raise ErrStorageMongoConnection("Remove Instance") # return the result if result is not None and result.deleted_count == 1: instance.provisioned = False else: raise ErrStorageRemoveInstance(instance.instance_id)
914,456
Remove a binding Remove an object from the MongoDB storage for caching Args: binding (AtlasServiceBinding.Binding): binding Raises: ErrStorageMongoConnection: Error during MongoDB communication. ErrStorageRemoveBinding: Failed to remove the binding
def remove_binding(self, binding): # query query = { "binding_id" : binding.binding_id, "instance_id" : binding.instance.instance_id } # delete the binding try: result = self.broker.delete_one(query) except: raise ErrStorageMongoConnection("Remove Binding") # return the result if result is not None and result.deleted_count == 1: binding.provisioned = False else: raise ErrStorageRemoveBinding(binding.binding_id)
914,457
Create a new JTAGScanChain to track and control a real chain. Args: controller: The CableDriver that this ScanChain will control. device_initializer: A callable that can map a (JTAGScanChain, Bitarray) to an instance of a JTAGDevice (Allows custom classes to be used). ignore_jtag_enabled: A boolean on if errors should be ignored when JTA is already enabled on the controller. debug: A boolean to enable extra debug printing.
def __init__(self, controller, device_initializer=\ lambda sc, idcode: JTAGDevice(sc,idcode), ignore_jtag_enabled=False, debug=False, collect_compiler_artifacts=False, collect_compiler_merge_artifacts=False, print_statistics=False): self._debug = debug self._collect_compiler_artifacts = collect_compiler_artifacts self._collect_compiler_merge_artifacts = collect_compiler_merge_artifacts self._print_statistics = print_statistics self._fitted_lv1_prim_cache = {} self._devices = [] self._hasinit = False self._sm = JTAGStateMachine() self._ignore_jtag_enabled = ignore_jtag_enabled self._desired_speed = None self.initialize_device_from_id = device_initializer self.get_descriptor_for_idcode = \ jtagDeviceDescription.get_descriptor_for_idcode if isinstance(controller, InaccessibleController): raise DevicePermissionDeniedError() self._controller = controller #This might necessitate a factory self._controller._scanchain = self self._command_queue = CommandQueue(self) default_prims = {RunInstruction, TransitionTAP, RWReg, RWDR, RWIR, Sleep, RWDevDR, RWDevIR} self._chain_primitives = {} self._device_primitives = {} self._lv1_chain_primitives = [] for prim in default_prims: assert issubclass(prim, Primitive) if issubclass(prim, DeviceTarget): self._device_primitives[prim._function_name] = prim else: self._chain_primitives[prim._function_name] = prim for prim in self._controller._primitives: if not issubclass(prim, Primitive): raise Exception("Registered Controller Prim has " "unknown type. (%s)"%prim) if issubclass(prim, DeviceTarget): self._device_primitives[prim._function_name] = prim else: self._chain_primitives[prim._function_name] = prim if issubclass(prim, Level1Primitive): self._lv1_chain_primitives.append(prim) for func_name, prim in self._chain_primitives.items(): if not self._gen_prim_adder(prim): raise Exception("Failed adding primitive %s, "\ "primitive with name %s "\ "already exists on scanchain"%\ (prim, prim._function_name))
914,522
Create a new CommandQueue to manage, compile, and run Primitives. Args: chain: A JTAGScanChain instance that this queue will be associated with.
def __init__(self, chain): self.queue = [] self._fsm = JTAGStateMachine() self._chain = chain
914,559
Initialize a configuration with a series of namespaces. Args: **namespaces: Each keyword should be a Namespace object which will be added to the configuration file. Raises: TypeError: If an entry is not a Namespace object. ValueError: If the namespace is already registered.
def __init__(self, **namespaces): super(Configuration, self).__init__() for key, entry in compat.iteritems(namespaces): self.register(key, entry)
914,734
Register a new namespace with the Configuration object. Args: name (str): The name of the section/namespace. namespace (namespace.Namespace): The Namespace object to store. Raises: TypeError: If the namespace is not a Namespace object. ValueError: If the namespace is already registered.
def register(self, name, namespace): if name in self._NAMESPACES: raise ValueError("Namespace {0} already exists.".format(name)) if not isinstance(namespace, ns.Namespace): raise TypeError("Namespaces must be of type Namespace.") self._NAMESPACES[name] = namespace
914,735
Initialize the option with some basic metadata. Args: description (str, optional): A human readable description of what the option represents. default (optional): The default value to use if unset. required (bool, optional): Whether or not the value must be set.
def __init__(self, description=None, default=None, required=False): self.__doc__ = description self._default = default self._value = default self._required = bool(required)
914,878
Generate an example file based on the given Configuration object. Args: config (confpy.core.configuration.Configuration): The configuration object on which to base the example. ext (str): The file extension to render. Choices: JSON and INI. Returns: str: The text of the example file.
def generate_example(config, ext='json'): template_name = 'example.{0}'.format(ext.lower()) template = ENV.get_template(template_name) return template.render(config=config)
914,913
Add newlines at strategic places in code string for printing. Args: s: str, piece of code. If not str, will attempt to convert to str. Returns: str
def make_code_readable(s): s = s if isinstance(s, str) else str(s) MAP = {",": ",\n", "{": "{\n ", "}": "\n}"} ll = [] state = "open" flag_single = False flag_double = False flag_backslash = False for ch in s: if flag_backslash: flag_backslash = False continue if ch == "\\": flag_backslash = True continue if flag_single: if ch == "'": flag_single = False elif not flag_double and ch == "'": flag_single = True if flag_double: if ch == '"': flag_double = False elif not flag_single and ch == '"': flag_double = True if flag_single or flag_double: ll.append(ch) else: ll.append(MAP.get(ch, ch)) return "".join(ll)
915,277
For given args, return k_agg from searching some k_range. Parameters ---------- data : array k_range : array nll : function args : Returns -------- :float Minimum k_agg
def _solve_k_from_mu(data, k_array, nll, *args): # TODO: See if a root finder like fminbound would work with Decimal used in # logpmf method (will this work with arrays?) nll_array = np.zeros(len(k_array)) for i in range(len(k_array)): nll_array[i] = nll(data, k_array[i], *args) min_nll_idx = np.argmin(nll_array) return k_array[min_nll_idx]
915,337
Dumps object contents into file on disk. Args: filename (optional): defaults to self.filename. If passed, self.filename will be updated to filename.
def save_as(self, filename=None): if filename is None: filename = self.filename if filename is None: filename = self.default_filename if filename is None: raise RuntimeError("Class '{}' has no default filename".format(self.__class__.__name__)) self._do_save_as(filename) self.filename = filename
916,034
"Are you sure you want to exit" question dialog. If flag_changed, shows question dialog. If answer is not yes, calls evt.ignore() Arguments: flag_changed evt -- QCloseEvent instance parent=None -- parent form, used to centralize the question dialog at title -- title for question dialog msg -- text of question dialog Returns True or False. True means: "yes, I want to exit"
def are_you_sure(flag_changed, evt, parent=None, title="File has been changed", msg="Are you sure you want to exit?"): if flag_changed: r = QMessageBox.question(parent, title, msg, QMessageBox.Yes|QMessageBox.No, QMessageBox.Yes) if r != QMessageBox.Yes: evt.ignore()
916,136
Shows parameters editor modal form. Arguments: obj: object to extract attribute values from, or a dict-like attrs: list of attribute names title: toolTips:
def show_edit_form(obj, attrs=None, title="", toolTips=None): if attrs is None: if hasattr(obj, "keys"): attrs = list(obj.keys()) else: raise RuntimeError("attrs is None and cannot determine it from obj") specs = [] for i, name in enumerate(attrs): # Tries as attribute, then as key try: value = obj.__getattribute__(name) except AttributeError: value = obj[name] if value is None: value = "" # None becomes str dict_ = {"value": value} if toolTips is not None: dict_["toolTip"] = toolTips[i] dict_["tooltip"] = toolTips[i] specs.append((name, dict_)) form = XParametersEditor(specs=specs, title=title) r = form.exec_() return r, form
916,138
Places window in top left corner of screen. Arguments: window -- a QWidget width=None -- window width, in case you want to change it (if not passed, not changed) height=None -- window height, in case you want to change it (if not passed, not changed)
def place_left_top(window, width=None, height=None): if width is None: width = window.width() if height is None: height = window.height() window.setGeometry(_DESKTOP_OFFSET_LEFT, _DESKTOP_OFFSET_TOP, width, height)
916,139
Snaps window to left of desktop. Arguments: window -- a QWidget width=None -- window width, in case you want to change it (if not passed, not changed)
def snap_left(window, width=None): if not width: width = window.width() rect = QApplication.desktop().screenGeometry() window.setGeometry(_DESKTOP_OFFSET_LEFT, _DESKTOP_OFFSET_TOP, width, rect.height())
916,141
Snaps window to right of desktop. Arguments: window -- a QWidget width=None -- window width, in case you want to change it (if not passed, not changed)
def snap_right(window, width=None): if not width: width = window.width() rect = QApplication.desktop().screenGeometry() window.setGeometry(rect.width()-width, _DESKTOP_OFFSET_TOP, width, rect.height())
916,142
Sets a checkbox's "checked" property + signal blocking + value tolerance Args: w: QCheckBox instance value: something that can be converted to a bool
def set_checkbox_value(w, value): save = w.blockSignals(True) try: w.setChecked(bool(value)) finally: w.blockSignals(save)
916,149
Cuts spectrum given a wavelength interval, leaving origina intact Args: sp: Spectrum instance l0: initial wavelength lf: final wavelength Returns: Spectrum: cut spectrum
def cut_spectrum(sp, l0, lf): if l0 >= lf: raise ValueError("l0 must be lower than lf") idx0 = np.argmin(np.abs(sp.x - l0)) idx1 = np.argmin(np.abs(sp.x - lf)) out = copy.deepcopy(sp) out.x = out.x[idx0:idx1] out.y = out.y[idx0:idx1] return out
916,201
Copies attribute from module object to self. Raises if object not of expected class Args: module: module object varname: variable name cls: expected class of variable attrname: attribute name of self. Falls back to varname
def _copy_attr(self, module, varname, cls, attrname=None): if not hasattr(module, varname): raise RuntimeError("Variable '{}' not found".format(varname)) obj = getattr(module, varname) if not isinstance(obj, cls): raise RuntimeError( "Expecting fobj to be a {}, not a '{}'".format(cls.__name__, obj.__class__.__name__)) if attrname is None: attrname = varname setattr(self, attrname, obj)
916,364
Finds index of nearest value in array. Args: array: numpy array value: Returns: int http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array
def index_nearest(array, value): idx = (np.abs(array-value)).argmin() return idx
916,378
Returns index of x in a, or -1 if x not in a. Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search* hi -- highest index to consider in search* *bisect.bisect_left capability that we don't need to loose.
def BSearch(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos != hi and a[pos] == x else -1
916,379
Returns index of a that is closest to x. Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search* hi -- highest index to consider in search* *bisect.bisect_left capability that we don't need to loose.
def BSearchRound(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) if pos >= hi: return hi - 1 elif a[pos] == x or pos == lo: return pos else: return pos - 1 if x - a[pos - 1] <= a[pos] - x else pos
916,380
Returns lowest i such as a[i] >= x, or -1 if x > all elements in a So, if x is in between two elements in a, this function will return the index of the higher element, hence "Ceil". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search
def BSearchCeil(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos if pos < hi else -1
916,381
Returns highest i such as a[i] <= x, or -1 if x < all elements in a So, if x is in between two elements in a, this function will return the index of the lower element, hence "Floor". Arguments: a -- ordered numeric sequence x -- element to search within a lo -- lowest index to consider in search hi -- highest index to consider in search
def BSearchFloor(a, x, lo=0, hi=None): if len(a) == 0: return -1 hi = hi if hi is not None else len(a) pos = bisect_left(a, x, lo, hi) return pos - 1 if pos >= hi \ else (pos if x == a[pos] else (pos - 1 if pos > lo else -1))
916,382
Finds row matching specific field value Args: **kwargs: (**only one argument accepted**) fielname=value, e.g., formula="OH" Returns: list element or None
def find(self, **kwargs): if len(kwargs) != 1: raise ValueError("One and only one keyword argument accepted") key = list(kwargs.keys())[0] value = list(kwargs.values())[0] ret = None for row in self.values(): if row[key] == value: ret = row break return ret
916,801
Initialization. Arguments: graph (Graph): The graph that owns this node. index (int): The unique index of the node in the graph. name (str): The name of the node. external_id (Optional[str]): The external ID of the node.
def __init__(self, graph: "Graph", index: int, name: str, external_id: Optional[str] = None): EventDispatcher.__init__(self) self._are_neighbors_loaded: bool = False self._graph: "Graph" = graph self._index: int = index self._neighbors: Dict[(int, int), Edge] = {} self.are_neighbors_cached: bool = False self.name: str = name self.external_id: Optional[str] = external_id.strip() if external_id is not None else None
916,978
Adds a new neighbor to the node. Arguments: edge (Edge): The edge that would connect this node with its neighbor.
def add_neighbor(self, edge: "Edge") -> None: if edge is None or (edge.source != self and edge.target != self): return if edge.source == self: other: Node = edge.target elif edge.target == self: other: Node = edge.source else: raise ValueError("Tried to add a neighbor with an invalid edge.") edge_key: Tuple(int, int) = edge.key # The graph is considered undirected, check neighbor existence accordingly. if self._neighbors.get(edge_key) or self._neighbors.get((edge_key[1], edge_key[0])): return # The neighbor is already added. self._neighbors[edge_key] = edge self.dispatch_event(NeighborAddedEvent(other))
916,980
Initialization. Arguments: source (Node): The source node of the edge. target (Node): The target node of the edge. weight (float): The weight of the edge.
def __init__(self, source: Node, target: Node, weight: float = 1): if not isinstance(source, Node): raise TypeError("Invalid source node: {}".format(source)) if not isinstance(target, Node): raise TypeError("Invalid target node: {}".format(target)) if (not isinstance(weight, float) and not isinstance(weight, int)) or weight <= 0: raise TypeError("Invalid edge weight: {}".format(weight)) if source.index == target.index: raise ValueError("Creating a loop edge is not allowed.") self._source: Node = source self._target: Node = target self._weight: float = weight source.add_neighbor(self) target.add_neighbor(self)
916,983
Initialization. Arguments: graph (Graph): The graph the node list belongs to.
def __init__(self, graph: "Graph"): self._graph: Graph = graph self._nodes: Dict[int, Node] = {} self._node_name_map: Dict[str, Node] = {}
916,985
Returns the node corresponding to the given key. If the given key is an integer, then the node with the given index will be returned. If the given key is a string, then the node with the given name will be returned. Arguments: key (Union[int, str]): The key that identifies the node to return. Raises: IndexError: If the index is invalid or out of range.
def __getitem__(self, key: Union[int, str]) -> Node: node: Node = None if isinstance(key, int): node = self._nodes.get(key) if isinstance(key, str): node = self._node_name_map.get(key) if node is None: raise IndexError("Invalid key.") return node
916,986
Adds a new node to the graph if it doesn't exist. Arguments: node_name (str): The name of the node to add. external_id (Optional[str]): The external ID of the node.
def add_node_by_name(self, node_name: str, external_id: Optional[str] = None) -> None: if node_name is None: return node_name = node_name.strip() if len(node_name) == 0: return node: Node = self.get_node_by_name(node_name, external_id=external_id) if node is None: self._internal_add_node(node_name=node_name, external_id=external_id, are_neighbors_cached=False, add_to_cache=True)
916,987
Returns the node with the given index if such a node currently exists in the node list. Arguments: index (int): The index of the queried node. Returns: The node with the given index if such a node currently exists in the node list, `None` otherwise.
def get_node(self, index: int) -> Optional[Node]: return self._nodes.get(index)
916,988
Adds a node with the given name to the graph without checking whether it already exists or not. Arguments: node_name (str): The name of the node to add. external_id (Optional[str]): The external ID of the node. are_neighbors_cached (bool): Whether the neighbors of the node have already been cached. add_to_cache (bool): Whether the node should also be created in the local cache.
def _internal_add_node(self, node_name: str, external_id: Optional[str] = None, are_neighbors_cached: bool = False, add_to_cache: bool = False) -> None: index: int = len(self) node: Node = self._create_node(index, node_name, external_id) node.are_neighbors_cached = are_neighbors_cached self._nodes[index] = node self._node_name_map[node_name] = node if add_to_cache: db: GraphDatabaseInterface = self._graph.database db_node: DBNode = db.Node.find_by_name(node.name) if db_node is None: db_node = db.Node(node.name, node.external_id) db_node.are_neighbors_cached = False db.session.add(db_node) db.session.commit()
916,990
Initialization. Arguments: graph (Graph): The graph the edge list belongs to.
def __init__(self, graph: "Graph"): self._graph: "Graph" = graph self._edges: Dict[(int, int), Edge] = {}
916,991
Returns the edge corresponding to the given key. If the given key is a tuple of nodes or node indexes, then the edge connecting the two nodes will be returned if such an edge exists. If the given key is a tuple of node names, then the edge connecting the corresponding nodes will be returned if such an edge exists. Arguments: key (Union[Tuple[int, int], Tuple[str, str], Tuple[Node, Node]]): The key identifying the edge to return.
def __getitem__(self, key: Union[Tuple[int, int], Tuple[str, str], Tuple[Node, Node]]) -> Optional[Edge]: if isinstance(key[0], Node) and isinstance(key[1], Node): return self.get_edge(key[0], key[1]) elif isinstance(key[0], int) and isinstance(key[1], int): return self.get_edge_by_index(key[0], key[1]) elif isinstance(key[0], str) and isinstance(key[1], str): return self.get_edge_by_name(key[0], key[1]) raise ValueError("Invalid edge key: {}".format(key))
916,992
Adds an edge to the edge list that will connect the specified nodes. Arguments: source (Node): The source node of the edge. target (Node): The target node of the edge. weight (float): The weight of the created edge. save_to_cache (bool): Whether the edge should be saved to the local database.
def add_edge(self, source: Node, target: Node, weight: float = 1, save_to_cache: bool = True) -> None: if not isinstance(source, Node): raise TypeError("Invalid source: expected Node instance, got {}.".format(source)) if not isinstance(target, Node): raise TypeError("Invalid target: expected Node instance, got {}.".format(target)) if source.index == target.index or\ self.get_edge_by_index(source.index, target.index) is not None: return self._edges[(source.index, target.index)] = Edge(source, target, weight) if save_to_cache: should_commit: bool = False database: GraphDatabaseInterface = self._graph.database db_edge: DBEdge = database.Edge.find_by_name(source.name, target.name) if db_edge is None: database.session.add(database.Edge(source.name, target.name, weight)) should_commit = True elif db_edge.weight != weight: db_edge.weight = weight should_commit = True if should_commit: database.session.commit()
916,994
Returns the edge connection the given nodes if such an edge exists. Arguments: source (Node): One of the endpoints of the queried edge. target (Node): The other endpoint of the queried edge. Returns: Returns the edge connection the given nodes or `None` if no such node exists.
def get_edge(self, source: Node, target: Node) -> Optional[Edge]: return self.get_edge_by_index(source.index, target.index)
916,995
Returns the edge connecting the nodes with the specified indices if such an edge exists. Arguments: source_index (int): The index of one of the endpoints of queried edge. target_index (int): The index of the other endpoint of the queried edge. Returns: The edge connecting the nodes with the specified indices or `None` if no such node exists.
def get_edge_by_index(self, source_index: int, target_index: int) -> Optional[Edge]: edge = self._edges.get((source_index, target_index)) if edge is not None: return edge return self._edges.get((target_index, source_index))
916,996
Returns the edge connecting the nodes with the specified names if such an edge exists. Arguments: source_name (str): The name of one of the endpoints of queried edge. target_name (str): The name of the other endpoint of the queried edge. Returns: The edge connecting the nodes with the specified names or `None` if no such node exists.
def get_edge_by_name(self, source_name: str, target_name: str) -> Optional[Edge]: nodes: NodeList = self._graph.nodes source: Optional[Node] = nodes.get_node_by_name(source_name) if source is None: return None target: Optional[Node] = nodes.get_node_by_name(target_name) if target is None: return None return self.get_edge_by_index(source.index, target.index)
916,997
Initialization. Arguments: database (GraphDatabaseInterface): The database interface the graph is using.
def __init__(self, database: GraphDatabaseInterface): self._edges: EdgeList = self._create_edge_ist() self._nodes: NodeList = self._create_node_list() self.database: GraphDatabaseInterface = database
916,998
Adds the node with the given name to the graph. Arguments: node_name (str): The name of the node to add to the graph. external_id (Optional[str]): The external ID of the node.
def add_node(self, node_name: str, external_id: Optional[str] = None) -> None: self._nodes.add_node_by_name(node_name, external_id)
917,001
Initializations. Arguments: neighbor (Node): The neighbor that was added to the node.
def __init__(self, neighbor: Node): super(NeighborAddedEvent, self).__init__(NeighborAddedEvent.NEIGHBOR_ADDED) self._neighbor: Node = neighbor
917,003
Returns a random person name Arguments: num_surnames -- number of surnames
def random_name(num_surnames=2): a = [] # Prefix if random.random() < _PROB_PREF: a.append(_prefixes[random.randint(0, len(_prefixes) - 1)]) # Forename a.append(_forenames[random.randint(0, len(_forenames) - 1)]) # Surnames for i in range(num_surnames): a.append(_surnames[random.randint(0, len(_surnames) - 1)]) # Suffix if random.random() < _PROB_SUFF: a.append(_suffixes[random.randint(0, len(_suffixes) - 1)]) return " ".join(a)
917,186
Traces a dashed line below string Args: s: string char: indents: number of leading intenting spaces Returns: list >>> print("\\n".join(format_underline("Life of João da Silva", "^", 2))) Life of João da Silva ^^^^^^^^^^^^^^^^^^^^^
def format_underline(s, char="=", indents=0): n = len(s) ind = " " * indents return ["{}{}".format(ind, s), "{}{}".format(ind, char*n)]
917,418
Encloses string in format text Args: s: string format: string starting with "text", "markdown", or "rest" indents: number of leading intenting spaces Returns: list >>> print("\\n".join(format_h2("Header 1", indents=10))) Header 1 -------- >>> print("\\n".join(format_h2("Header 1", "markdown", 0))) ## Header 1
def format_h1(s, format="text", indents=0): _CHAR = "=" if format.startswith("text"): return format_underline(s, _CHAR, indents) elif format.startswith("markdown"): return ["# {}".format(s)] elif format.startswith("rest"): return format_underline(s, _CHAR, 0)
917,419
Asks a yes/no question Args: question: string **without** the question mark and without the options. Example: 'Create links' default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of these valus (this argument is case-insensitive) Returns: bool: True if user answered Yes, False otherwise
def yesno(question, default=None): if default is not None: if isinstance(default, bool): pass else: default_ = default.upper() if default_ not in ('Y', 'YES', 'N', 'NO'): raise RuntimeError("Invalid default value: '{}'".format(default)) default = default_ in ('Y', 'YES') while True: ans = input("{} ({}/{})? ".format(question, "Y" if default == True else "y", "N" if default == False else "n")).upper() if ans == "" and default is not None: ret = default break elif ans in ("N", "NO"): ret = False break elif ans in ("Y", "YES"): ret = True break return ret
917,424
Text menu. Arguments: title -- menu title, to appear at the top options -- sequence of strings cancel_label='Cancel' -- label to show at last "zero" option flag_allow_empty=0 -- Whether to allow empty option flag_cancel=True -- whether there is a "0 - Cancel" option ch="." -- character to use to draw frame around title Returns: option -- an integer: None; 0-Back/Cancel/etc; 1, 2, ... Adapted from irootlab menu.m
def menu(title, options, cancel_label="Cancel", flag_allow_empty=False, flag_cancel=True, ch='.'): num_options, flag_ok = len(options), 0 option = None # result min_allowed = 0 if flag_cancel else 1 # minimum option value allowed (if option not empty) while True: print("") for line in format_box(title, ch): print(" "+line) for i, s in enumerate(options): print((" {0:d} - {1!s}".format(i+1, s))) if flag_cancel: print((" 0 - << (*{0!s}*)".format(cancel_label))) try: s_option = input('? ') except KeyboardInterrupt: raise except: print("") n_try = 0 while True: if n_try >= 10: print('You are messing up!') break if len(s_option) == 0 and flag_allow_empty: flag_ok = True break try: option = int(s_option) if min_allowed <= option <= num_options: flag_ok = True break except ValueError: print("Invalid integer value!") print(("Invalid option, range is [{0:d}, {1:d}]!".format(0 if flag_cancel else 1, num_options))) n_try += 1 s_option = input("? ") if flag_ok: break return option
917,425
Creates MarkDown table. Returns list of strings Arguments: data -- [(cell00, cell01, ...), (cell10, cell11, ...), ...] headers -- sequence of strings: (header0, header1, ...)
def markdown_table(data, headers): maxx = [max([len(x) for x in column]) for column in zip(*data)] maxx = [max(ll) for ll in zip(maxx, [len(x) for x in headers])] mask = " | ".join(["%-{0:d}s".format(n) for n in maxx]) ret = [mask % headers] ret.append(" | ".join(["-"*n for n in maxx])) for line in data: ret.append(mask % line) return ret
917,430
Creates reStructuredText table (grid format), allowing for multiline cells Arguments: data -- [((cell000, cell001, ...), (cell010, cell011, ...), ...), ...] headers -- sequence of strings: (header0, header1, ...) **Note** Tolerant to non-strings **Note** Cells may or may not be multiline >>> rest_table([["Eric", "Idle"], ["Graham", "Chapman"], ["Terry", "Gilliam"]], ["Name", "Surname"])
def rest_table(data, headers): num_cols = len(headers) new_data, row_heights = expand_multirow_data(data) new_data = [[str(x) for x in row] for row in new_data] col_widths = [max([len(x) for x in col]) for col in zip(*new_data)] col_widths = [max(cw, len(s)) for cw, s in zip(col_widths, headers)] if any([x == 0 for x in col_widths]): raise RuntimeError("Column widths ({}) has at least one zero".format(col_widths)) num_lines = sum(row_heights) # line != row (rows are multiline) # horizontal lines hl0 = "+"+"+".join(["-"*(n+2) for n in col_widths])+"+" hl1 = "+"+"+".join(["="*(n+2) for n in col_widths])+"+" frmtd = ["{0:{1}}".format(x, width) for x, width in zip(headers, col_widths)] ret = [hl0, "| "+" | ".join(frmtd)+" |", hl1] i0 = 0 for i, row_height in enumerate(row_heights): if i > 0: ret.append(hl0) for incr in range(row_height): frmtd = ["{0:{1}}".format(x, width) for x, width in zip(new_data[i0+incr], col_widths)] ret.append("| "+" | ".join(frmtd)+" |") i0 += row_height ret.append(hl0) return ret
917,432
Deactivate the node identified by node_id. Deactivates the node corresponding to node_id, which means that it can never be the output of a nearest_point query. Note: The node is not removed from the tree, its data is steel available. Args: node_id (int): The node identifier (given to the user after its insertion).
def deactivate(self, node_id): node = self.node_list[node_id] self.node_list[node_id] = node._replace(active=False)
917,464
Insert a new node in the tree. Args: point (:obj:`tuple` of float or int): Stores the position of the node. data (:obj, optional): The information stored by the node. Returns: int: The identifier of the new node. Example: >>> tree = Tree(4, 800) >>> point = (3, 7) >>> data = {'name': Fresnel, 'label': blue, 'speed': 98.2} >>> node_id = tree.insert(point, data)
def insert(self, point, data=None): assert len(point) == self.k if self.size == 0: if self.region is None: self.region = [[-math.inf, math.inf]] * self.k axis = 0 return self.new_node(point, self.region, axis, data) # Iteratively descends to one leaf current_id = 0 while True: parent_node = self.node_list[current_id] axis = parent_node.axis if point[axis] < parent_node.point[axis]: next_id, left = parent_node.left, True else: next_id, left = parent_node.right, False if next_id is None: break current_id = next_id # Get the region delimited by the parent node region = parent_node.region[:] region[axis] = parent_node.region[axis][:] # Limit to the child's region limit = parent_node.point[axis] # Update reference to the new node if left: self.node_list[current_id] = parent_node._replace(left=self.size) region[axis][1] = limit else: self.node_list[current_id] = parent_node._replace(right=self.size) region[axis][0] = limit return self.new_node(point, region, (axis + 1) % self.k, data)
917,465
Initialization. Arguments: graph (IGraph): The igraph `Graph` object to wrap.
def __init__(self, graph: IGraph): super(IGraphWrapper, self).__init__(self._create_memory_database_interface()) if not isinstance(graph, IGraph): raise ValueError("Invalid graph instance provided to IGraphWrapper") self._wrapped_graph: IGraph = graph
917,557
Initialization. Arguments: graph (IGraphWrapper): The graph that owns this node. index (int): The unique index of the node in the graph. name (str): The name of the node. external_id (Optional[str]): The external ID of the node.
def __init__(self, graph: IGraphWrapper, index: int, name: str, external_id: Optional[str] = None): super(IGraphNode, self).__init__(graph, index, name, external_id) vertex: IGraphVertex = None try: vertex = graph.wrapped_graph.vs.find(name) except ValueError: vertex = graph.wrapped_graph.vs[int(name)] if vertex is None: raise ValueError("The wrapped igraph graph doesn't have a vertex with the given name.") self._igraph_index: int = vertex.index
917,560
Returns a new `IGraphNode` instance with the given index and name. Arguments: index (int): The index of the node to create. name (str): The name of the node to create. external_id (Optional[str]): The external ID of the node.
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> IGraphNode: return IGraphNode(graph=self._graph, index=index, name=name, external_id=external_id)
917,562
Resize an image to the 'best fit' width & height, maintaining the scale of the image, so a 500x500 image sized to 300x400 will actually be scaled to 300x300. Params: image: ImageFieldFile to be resized (i.e. model.image_field) new_width & new_height: desired maximums for resizing Returns: the url to the new image and the new width & height (http://path-to-new-image, 300, 300)
def resize(self, image_field, new_width=None, new_height=None): if isinstance(image_field, ImageFieldFile) and \ image_field.field.width_field and \ image_field.field.height_field: # use model fields current_width = getattr(image_field.instance, image_field.field.width_field) current_height = getattr(image_field.instance, image_field.field.height_field) else: # use PIL try: file_obj = storage.default_storage.open(image_field.name, 'rb') img_obj = Image.open(file_obj) current_width, current_height = img_obj.size except IOError: return (image_field.url, 0, 0) # determine if resizing needs to be done (will not scale up) if current_width < new_width: if not new_height or current_height < new_height: return (image_field.url, current_width, current_height) # calculate ratios new_width, new_height = scale(current_width, current_height, new_width, new_height) # use the image_processor defined in the settings, or PIL by default return self._meta.image_processor.resize(image_field, new_width, new_height)
917,632
Generate directory listing HTML Arguments: FS (FS): filesystem object to read files from filepath (str): path to generate directory listings for Keyword Arguments: list_dir (callable: list[str]): list file names in a directory isdir (callable: bool): os.path.isdir Yields: str: lines of an HTML table
def generate_dirlist_html(FS, filepath): yield '<table class="dirlist">' if filepath == '/': filepath = '' for name in FS.listdir(filepath): full_path = pathjoin(filepath, name) if FS.isdir(full_path): full_path = full_path + '/' yield u'<tr><td><a href="{0}">{0}</a></td></tr>'.format( cgi.escape(full_path)) # TODO XXX yield '</table>'
917,771
Dispatches the given event. It is the duty of this method to set the target of the dispatched event by calling `event.set_target(self)`. Args: event (Event): The event to dispatch. Must not be `None`. Raises: TypeError: If the event is `None` or its type is incorrect.
def dispatch_event(self, event: "Event") -> None: # Set the target of the event if it doesn't have one already. It could happen that # we are simply redispatching an event. if event.target is None: event.set_target(self) listeners: dict[types.MethodType, bool] = self._registered_listeners.get(event.type) if listeners is None: return for listener in listeners: listener(event)
918,031
Constructor. Args: event_type (str): The type - string identifier - of the event. Must not be `None` or empty string.
def __init__(self, event_type: str): if not isinstance(event_type, str) or event_type == "": raise TypeError("Invalid event type: {}".format(event_type)) self._event_type: str = event_type self._target: EventDispatcherBase = None
918,033
This method should be called by the event dispatcher that dispatches this event to set its target property. Args: target (EventDispatcherBase): The event dispatcher that will dispatch this event. Raises: PermissionError: If the target property of the event has already been set. TypeError: If `target` is not an `EventDispatcherBase` instance.
def set_target(self, target: EventDispatcherBase) -> None: if self._target is not None: raise PermissionError("The target property already has a valid value.") if not isinstance(target, EventDispatcherBase): raise TypeError("Invalid target type: {}".format(target)) self._target = target
918,034
Extract some meta-data from files (actually mostly from their paths) and stores it in a DB. Arguments: :param file_path: File path. :param file_type: File type. :param is_copy: Indicate if this file is a copy. :param step_id: Step ID. :param db_conn: Database connection. :return:
def others2db(file_path, file_type, is_copy, step_id, db_conn): logging.info("Processing '%s'" % file_path) df = db_conn.db_session.query(db_conn.DataFile).filter_by(path=file_path).one_or_none() if not df: df = db_conn.DataFile( path=file_path, type=file_type, is_copy=is_copy, processing_step_id=step_id ) db_conn.db_session.merge(df) db_conn.db_session.commit() else: if file_type not in [None, '', df.type]: df.type = file_type db_conn.db_session.commit() if is_copy not in [None, df.is_copy]: df.is_copy = is_copy db_conn.db_session.commit() if step_id not in [None, df.processing_step_id]: df.processing_step_id = step_id db_conn.db_session.commit()
918,401
Create a WSGI application out of the given Minion app. Arguments: application (Application): a minion app request_class (callable): a class to use for constructing incoming requests out of the WSGI environment. It will be passed a single arg, the environ. By default, this is :class:`minion.request.WSGIRequest` if unprovided.
def create_app(application, request_class=Request): def wsgi(environ, start_response): response = application.serve( request=request_class(environ), path=environ.get("PATH_INFO", ""), ) start_response( response.status, [ (name, b",".join(values)) for name, values in response.headers.canonicalized() ], ) return [response.content] return wsgi
918,517
Superficially cleans data, i.e. changing simple things about formatting. Parameters: df - DataFrame DataFrame to clean error_rate - float {0 <= error_rate <= 1}, default 0 Maximum amount of errors/inconsistencies caused explicitly by cleaning, expressed as a percentage of total dataframe rows (0 = 0%, .5 = 50%, etc.) Ex: na values from coercing a column of data to numeric
def clean(df,error_rate = 0): df = df.copy() # Change colnames basics.clean_colnames(df) # Eventually use a more advanced function to clean colnames print('Changed colnames to {}'.format(df.columns)) # Remove extra whitespace obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: df[col_name] = basics.col_strip(df,col_name) print("Stripped extra whitespace from '{}'".format(col_name)) # Coerce columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) # Scrub columns obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: scrubf, scrubb = smart_scrub(df,col_name,1-error_rate) if scrubf is not None or scrubb is not None: print("Scrubbed '{}' from the front and '{}' from the back of column '{}'" \ .format(scrubf,scrubb,col_name)) # Coerice columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) return df
918,559
Tries to convert any type of argument to datetime Args: arg: datetime, date, or str. If "?", will be converted to 1970-1-1. if 0 or "now", will be converted to datetime.datetime.now()
def to_datetime(arg): if isinstance(arg, datetime.datetime): return arg elif arg == 0: return datetime.datetime.now() elif isinstance(arg, str): if arg == "now": arg = datetime.datetime.now() elif arg == "?": arg = datetime.datetime(1970, 1, 1) else: arg = str2dt(arg) elif isinstance(arg, datetime.date): arg = date2datetime(arg) elif isinstance(arg, (int, float)): # Suppose it is a timestamp arg = ts2dt(arg) else: raise TypeError("Wrong type for argument 'arg': {}".format(arg.__class__.__name__)) return arg
919,024
Returns a list of ExeInfo objects, which represent Python scripts within dir_ Args: dir_: string, path to directory flag_protected: whether or not to include files starting with a '_' Returns: list of ExeInfo objects The ExeInfo objects represent the ".py" files in directory dir_,
def get_exe_info(dir_, flag_protected=False): ret = [] # gets all scripts in script directory ff = glob.glob(os.path.join(dir_, "*.py")) # discards scripts whose file name starts with a "_" ff = [f for f in ff if flag_protected or not os.path.basename(f).startswith("_")] ff.sort() for f in ff: _, filename = os.path.split(f) flag_error = False flag_gui = None descr = "(no doc)" try: # Checks if it is a graphical application with open(f, "r") as h: flag_gui = "QApplication" in h.read() try: script_ = None script_ = import_module(f) # imp.load_source('script_', f) # module object except SystemExit: descr = "? (called sys.exit())" else: if script_.__doc__ is not None: descr = script_.__doc__.strip().split("\n")[0] # first line of docstring except Exception as e: flag_error = True descr = "*{0!s}*: {1!s}".format(e.__class__.__name__, str(e)) if len(descr) == 0: descr = "(no doc)" ret.append(ExeInfo(filename, descr, flag_error, flag_gui)) # Sorts command-line and graphical applications by name separately sisi_gra = [si for si in ret if si.flag_gui] sisi_cmd = [si for si in ret if not si.flag_gui] sisi_gra = sorted(sisi_gra, key=lambda x: x.filename) sisi_cmd = sorted(sisi_cmd, key=lambda x: x.filename) ret = sisi_cmd+sisi_gra return ret
919,535
Collects class names and docstrings in module for classes starting with prefix Arguments: module -- Python module prefix -- argument for str.startswith(); if not passed, does not filter base_class -- filters only descendants of this class flag_exclude_prefix -- whether or not to exclude prefix from class name in result Returns: [(classname0, signature, docstring0), ...]
def collect_doc(module, base_class=None, prefix="", flag_exclude_prefix=False): ret = [] for attrname in module.__all__: if prefix and not attrname.startswith(prefix): continue attr = module.__getattribute__(attrname) if base_class is not None and not issubclass(attr, base_class): continue spec = inspect.signature(attr) ret.append((attrname if not flag_exclude_prefix else attrname[len(prefix):], spec, attr.__doc__)) return ret
919,536
Returns a list with all classes in module that descend from parent Args: module: builtins.module superclass: a class Returns: list
def get_classes_in_module(module, superclass=object): ret = [] for classname in dir(module): attr = module.__getattribute__(classname) try: if issubclass(attr, superclass) and (attr != superclass): ret.append(attr) except TypeError: # "issubclass() arg 1 must be a class" pass except RuntimeError: # a99.get_python_logger().exception("Failed probing attribute '{}'".format(classname)) # raise pass return ret
919,537
Figures out the names of the subpackages of a package Args: dir_: (str) path to package directory Source: http://stackoverflow.com/questions/832004/python-finding-all-packages-inside-a-package
def get_subpackages_names(dir_): def is_package(d): d = os.path.join(dir_, d) return os.path.isdir(d) and glob.glob(os.path.join(d, '__init__.py*')) ret = list(filter(is_package, os.listdir(dir_))) ret.sort() return ret
919,539
All known File* classes Args: flag_leaf: returns only classes that do not have subclasses ("leaf" nodes as in a class tree graph)
def classes_file(flag_leaf=False): if __flag_first: __setup() if not flag_leaf: return _classes_file return [cls for cls in _classes_file if cls not in _classes_file_superclass]
919,664
Adds entries to _classes_* Args: m: module object that must contain the following sub-modules: datatypes, vis
def _collect_classes(m): from f311 import filetypes as ft from f311 import explorer as ex def _extend(classes, newclasses): classes.extend([class_ for class_ in newclasses if class_ not in classes]) # classes.extend(newclasses) file_classes = [class_ for class_ in a99.get_classes_in_module(m, ft.DataFile) if class_.flag_collect] # Classes to consider when attempts to load a text file (see load_any_file()) _extend(_classes_txt, [class_ for class_ in file_classes if class_.flag_txt]) # Classes to consider when attempts to load a binary file (see load_any_file()) _extend(_classes_bin, [class_ for class_ in file_classes if not class_.flag_txt]) # Adds Classes to consider when attempts to load a spectrum file (see load_spectrum()) _extend(_classes_sp, [class_ for class_ in file_classes if issubclass(class_, ft.FileSpectrum)]) # All kwown File* classes _extend(_classes_file, file_classes) # All kwnown Vis* classes _extend(_classes_vis, a99.get_classes_in_module(m, ex.Vis)) global _classes_file_superclass _classes_file_superclass = [cls.__bases__[0] for cls in _classes_file]
919,665
Scans COLLABORATORS_S packages for scripts, eventually filtering if arguments passed Args: pkgname_only: name of single package within COLLABORATORS_S flag_protected: include scripts starting with "_"? Returns: dictionary: {"packagename0": {"exeinfo": [ExeInfo00, ...], "description": description0}, ...}
def get_programs_dict(pkgname_only=None, flag_protected=False): ___ret = _get_programs_dict() __ret = ___ret if pkgname_only is None else OrderedDict(((pkgname_only, ___ret[pkgname_only]),)) if flag_protected: _ret = __ret else: _ret = copy.deepcopy(__ret) for value in _ret.values(): value["exeinfo"] = [exeinfo for exeinfo in value["exeinfo"] if not exeinfo.filename.startswith("_")] # Removes packages that may have gone out of scripts after filtering ret = _ret if pkgname_only is None and flag_protected is None else \ OrderedDict(((key, value) for key, value in _ret.items() if len(value["exeinfo"]) > 0)) return ret
919,668
Populates _params using specification Arguments: specs -- either: (a) list as [(name, {...}), ...] (see Parameter.FromSpec() for further information) (b) dictionary as {"name": value, ...}
def _FromSpecs(self, specs): if isinstance(specs, dict): specs_ = [] for name, value in specs.items(): specs_.append((name, {"value": value})) else: specs_ = specs for spec in specs_: self.params.append(Parameter(spec))
919,821
Attempts to load file by trial-and-error using a given list of classes. Arguments: filename -- full path to file classes -- list of classes having a load() method Returns: DataFile object if loaded successfully, or None if not. Note: it will stop at the first successful load. Attention: this is not good if there is a bug in any of the file readers, because *all exceptions will be silenced!*
def load_with_classes(filename, classes): ok = False for class_ in classes: obj = class_() try: obj.load(filename) ok = True # # cannot let IOError through because pyfits raises IOError!! # except IOError: # raise # # also cannot let OSError through because astropy.io.fits raises OSError!! # except OSError: # raise except FileNotFoundError: raise except Exception as e: # (ValueError, NotImplementedError): # Note: for debugging, switch the below to True if a99.logging_level == logging.DEBUG: a99.get_python_logger().exception("Error trying with class \"{0!s}\"".format( class_.__name__)) pass if ok: break if ok: return obj return None
919,836
Reports available data types Args: editor_quote: character to enclose the name of the editor script between. flag_leaf: see tabulate_filetypes_rest() Returns: list: list of FileTypeInfo
def get_filetypes_info(editor_quote="`", flag_leaf=True): NONE_REPL = "" import f311 data = [] # [FileTypeInfo, ...] for attr in f311.classes_file(flag_leaf): description = a99.get_obj_doc0(attr) def_ = NONE_REPL if attr.default_filename is None else attr.default_filename ee = attr.editors if ee is None: ee = NONE_REPL else: # Example: "``mained.py``, ``x.py``" ee = ", ".join(["{0}{1}{0}".format(editor_quote, x, editor_quote) for x in ee]) data.append({"description": description, "default_filename": def_, "classname": attr.__name__, "editors": ee, "class": attr, "txtbin": "text" if attr.flag_txt else "binary"}) data.sort(key=lambda x: x["description"]) return data
919,840
Returns connection to database. Tries to return existing connection, unless flag_force_new Args: flag_force_new: filename: Returns: sqlite3.Connection object **Note** this is a private method because you can get a connection to any file, so it has to be used in the right moment
def __get_conn(self, flag_force_new=False, filename=None): flag_open_new = flag_force_new or not self._conn_is_open() if flag_open_new: if filename is None: filename = self.filename # funny that __get_conn() calls _get_conn() but that's it conn = self._get_conn(filename) self._conn = conn else: conn = self._conn return conn
920,149
Initialization. The graph requires a valid Spotify API client ID and key pair to work. Arguments: client_id (str): The Spotify API client ID to use. client_key (str): The Spotify API cliend secret key corresponding to the client ID. neighbor_count (int): The number of neighbors to load for any given node. database (Optional[GraphDatabaseInterface]): The database interface the graph is using.
def __init__(self, client_id: str, client_key: str, neighbor_count: int = 6, database: Optional[GraphDatabaseInterface] = None): if database is None: database = SpotifyArtistGraph.create_default_database() super(SpotifyArtistGraph, self).__init__(database) self._client: SpotifyClient = SpotifyClient(client_id, client_key) self._neighbor_count: int = neighbor_count
920,277
Creates and returns a default SQLAlchemy database interface to use. Arguments: reset (bool): Whether to reset the database if it happens to exist already.
def create_default_database(reset: bool = False) -> GraphDatabaseInterface: import sqlalchemy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from sqlalchemy.pool import StaticPool Base = declarative_base() engine = sqlalchemy.create_engine("sqlite:///SpotifyArtistGraph.db", poolclass=StaticPool) Session = sessionmaker(bind=engine) dbi: GraphDatabaseInterface = create_graph_database_interface( sqlalchemy, Session(), Base, sqlalchemy.orm.relationship ) if reset: Base.metadata.drop_all(engine) Base.metadata.create_all(engine) return dbi
920,278
Initialization. Arguments: graph (Graph): The graph that owns this node. index (int): The unique index of the node in the graph. name (str): The name of the node. external_id (Optional[str]): The external ID of the node.
def __init__(self, graph: SpotifyArtistGraph, index: int, name: str, external_id: Optional[str] = None): if external_id is None: raise SpotifyArtistGraphError( "{} must always have an external ID.".format(self.__class__.__name__)) super(SpotifyArtistNode, self).__init__(graph, index, name, external_id)
920,280
Returns a new `SpotifyArtistNode` instance with the given index and name. Arguments: index (int): The index of the node to create. name (str): The name of the node to create. external_id (Optional[str]): The external ID of the node.
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> SpotifyArtistNode: if external_id is None: graph: SpotifyArtistGraph = self._graph items: List[NameExternalIDPair] = graph.client.search_artists_by_name(name) for item in items: if item.name == name: external_id = item.external_id break return SpotifyArtistNode(graph=self._graph, index=index, name=name, external_id=external_id)
920,282
Initialization. Arguments: client_id (str): The Spotify API client ID to use. client_key (str): The Spotify API cliend secret key corresponding to the client ID.
def __init__(self, client_id: str, client_key: str): self._client_id: str = client_id self._client_key: str = client_key self._token: Optional[Dict] = None self._token_expires_at: float = 0
920,283
Initialization. Arguments: client_id (str): The Spotify API client ID to use. client_key (str): The Spotify API cliend secret key corresponding to the client ID.
def __init__(self, client_id: str, client_key: str): self._token: SpotifyClientTokenWrapper = SpotifyClientTokenWrapper(client_id, client_key)
920,286
Returns zero or more artist name - external ID pairs that match the specified artist name. Arguments: artist_name (str): The artist name to search in the Spotify API. limit (int): The maximum number of results to return. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found.
def search_artists_by_name(self, artist_name: str, limit: int = 5) -> List[NameExternalIDPair]: response: requests.Response = requests.get( self._API_URL_TEMPLATE.format("search"), params={"q": artist_name, "type": "artist", "limit": limit}, headers={"Authorization": "Bearer {}".format(self._token.access_token)} ) # TODO: handle API rate limiting response.raise_for_status() if not response.text: return [] result: List[NameExternalIDPair] = [] data: List[Dict] = response.json()["artists"]["items"] for artist in data: artist = NameExternalIDPair(artist["name"].strip(), artist["id"].strip()) if not artist.name or not artist.external_id: raise SpotifyClientError("Name or ID is missing") result.append(artist) return result
920,287
Returns zero or more similar artists (in the form of artist name - external ID pairs) to the one corresponding to the given artist ID. Arguments: artist_id ([str]): The Spotify ID of the artist for whom similar artists are requested. Returns: Zero or more artist name - external ID pairs. Raises: requests.HTTPError: If an HTTP error occurred during the request. SpotifyClientError: If an invalid item is found.
def similar_artists(self, artist_id: str) -> List[NameExternalIDPair]: response: requests.Response = requests.get( self._API_URL_TEMPLATE.format("artists/{}/related-artists".format(artist_id)), headers={"Authorization": "Bearer {}".format(self._token.access_token)} ) # TODO: handle API rate limiting response.raise_for_status() if not response.text: return [] result: List[NameExternalIDPair] = [] data: List[Dict] = response.json()["artists"] for artist in data: artist = NameExternalIDPair(artist["name"], artist["id"]) if artist.name is None or artist.external_id is None: raise SpotifyClientError("Name or ID is missing") result.append(artist) return result
920,288
Assembles a vector that spans several rows in a text file. Arguments: f -- file-like object n -- number of values expected r (optional) -- Index of last row read in file (to tell which file row in case of error) Returns: (list-of-strings, number-of-rows-read-from-file)
def multirow_str_vector(f, n, r=0): so_far = 0 n_rows = 0 v = [] while True: temp = str_vector(f) n_rows += 1 n_now = len(temp) if n_now+so_far > n: a99.get_python_logger().warning(('Reading multi-row vector: ' 'row %d should have %d values (has %d)') % (r+n_rows, n-so_far, n_now)) v.extend(temp[:n-so_far]) so_far = n elif n_now+so_far <= n: so_far += n_now v.extend(temp) if so_far == n: break return v, n_rows
920,529
Returns full path to specified module Args: *args: are added at the end of module path with os.path.join() module: Python module, defaults to a99 Returns: path string >>> get_path()
def get_path(*args, module=a99): p = os.path.abspath(os.path.join(os.path.split(module.__file__)[0], *args)) return p
920,534
Loads as many files as the number of pages Args: fobjs: [filename or DataFile obj, ...]
def load_many(self, fobjs=None): if fobjs is not None: # tolerance if not hasattr(fobjs, "__iter__"): fobjs = [fobjs] for index, (fobj, page) in enumerate(zip(fobjs, self.pages)): if fobj is None: continue elif isinstance(fobj, ft.DataFile): self.load(fobj, index) elif isinstance(fobj, str): self.load_filename(fobj, index) else: raise TypeError("Invalid object of class '{}'".format(fobj.__class__.__name__))
920,558
Loads given DataFile object. **tolerant with None** Args: fobj: object of one of accepted classes index: tab index to load fobj into. If not passed, loads into current tab
def load(self, fobj, index=None): if index is None: index = self._get_tab_index() page = self.pages[index] if fobj is None: return if not isinstance(fobj, tuple(page.clss_load)): raise RuntimeError('Object to load must be in {0!s} (not a {1!s})'.format( [x.__name__ for x in page.clss_load], fobj.__class__.__name__)) page.editor.load(fobj) self._update_gui_text_tabs()
920,561
Loads file given filename Args: filename: index: tab index to load file into. If not passed, loads into current tab
def load_filename(self, filename, index=None): filename = str(filename) # QString protection if index is None: index = self._get_tab_index() page = self.pages[index] # Maybe this is set on purpose before loading attempt to leave new load_dir set (?) self.load_dir, _ = os.path.split(filename) clss = page.clss_load if len(clss) == 1: # If there is only one class to handle the file, will load it in a way that eventual # load errors will raise f = clss[0]() f.load(filename) else: # At the moment, the multi-class alternative will not display particular error information # if the file does not load f = f311.load_with_classes(filename, page.clss_load) if f is None: raise RuntimeError("Could not load '{0!s}'".format(filename)) self.load(f, index)
920,562