code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def env_dictionary(self): none_to_str = lambda x: str(x) if x else "" return {"DOCKERSTACHE_{}".format(k.upper()): none_to_str(v) for k, v in six.iteritems(self)}
convert the options to this script into an env var dictionary for pre and post scripts
def pre_script(self): if self['pre_script'] is None: return LOGGER.info("Executing pre script: {}".format(self['pre_script'])) cmd = self['pre_script'] execute_command(self.abs_input_dir(), cmd, self.env_dictionary()) LOGGER.info("Pre Script completed")
execute the pre script if it is defined
def say_tmp_filepath( text = None, preference_program = "festival" ): filepath = shijian.tmp_filepath() + ".wav" say( text = text, preference_program = preference_program, filepath = filepath ) return filepath
Say specified text to a temporary file and return the filepath.
def clacks_overhead(fn): @wraps(fn) def _wrapped(*args, **kw): response = fn(*args, **kw) response['X-Clacks-Overhead'] = 'GNU Terry Pratchett' return response return _wrapped
A Django view decorator that will add the `X-Clacks-Overhead` header. Usage: @clacks_overhead def my_view(request): return my_response
def render(self, request, template, context): if self.allow_force_html and self.request.GET.get('html', False): html = get_template(template).render(context) return HttpResponse(html) else: response = HttpResponse(content_type='application/pdf') if self.prompt_download: response['Content-Disposition'] = 'attachment; filename="{}"' \ .format(self.get_download_name()) helpers.render_pdf( template=template, file_=response, url_fetcher=self.url_fetcher, context=context, ) return response
Returns a response. By default, this will contain the rendered PDF, but if both ``allow_force_html`` is ``True`` and the querystring ``html=true`` was set it will return a plain HTML.
def _bfs_path_states(self, graph, start): pathstates = {} # maintain a queue of paths queue = [] visited = [] # push the first path into the queue queue.append([['', start]]) while queue: # get the first path from the queue path = queue.pop(0) # get the last node from the path node = path[-1][1] # path found """ if node.stateid not in pathstates and node.stateid != len(list(graph.states)): pathstates[node.stateid] = ''.join( [mnode[0] for mnode in path]) visited.append(node.stateid) # enumerate all adjacent nodes, construct a new path and push it # into the queue for arc in node.arcs: char = graph.isyms.find(arc.ilabel) next_state = graph[arc.nextstate] if next_state.stateid not in visited: new_path = list(path) new_path.append([char, next_state]) queue.append(new_path) return pathstates
Find state access strings (DFA shortest paths for every state) using BFS Args: graph (DFA): The DFA states start (int): The DFA initial state Return: list: A list of all the DFA shortest paths for every state
def _get_accepted(self, graph): accepted = [] for state in graph.states: if state.final != TropicalWeight(float('inf')): accepted.append(state) return accepted
Find the accepted states Args: graph (DFA): The DFA states Return: list: Returns the list of the accepted states
def _object_set_to_state_list(self, objectset): state_list = [] for state in list(objectset): state_list.append(state.stateid) return state_list
Args: objectset (list): A list of all the DFA states (as objects) Return: list: A list of all the DFA states (as identifiers)
def _get_group_from_state(self, sid): for index, selectgroup in enumerate(self.groups): if sid in selectgroup: return index
Args: sid (int): The state identifier Return: int: The group identifier that the state belongs
def _reverse_to_source(self, target, group1): new_group = [] for dst in group1: new_group += target[dst] return set(new_group)
Args: target (dict): A table containing the reverse transitions for each state group1 (list): A group of states Return: Set: A set of states for which there is a transition with the states of the group
def _partition_group(self, group): for (group1, group2, distinguish_string) in self.bookeeping: if group & group1 != set() and not group.issubset(group1): new_g1 = group & group1 new_g2 = group - group1 return (new_g1, new_g2, distinguish_string) if group & group2 != set() and not group.issubset(group2): new_g1 = group & group2 new_g2 = group - group2 return (new_g1, new_g2, distinguish_string) assert False, "Unmatched group partition"
Args: group (list): A group of states Return: tuple: A set of two groups
def _init_smi(self, graph, access_strings_map): smi = [] for selected_state in sorted(graph.states, key=attrgetter('initial'), reverse=True): # Initially gather all transitions of the state into a dictionary transitions_map = defaultdict(list) for character in self.alphabet: destination_state = self._delta(graph, selected_state, character) transitions_map[destination_state.stateid].append(character) chars_in_smi = [] sorted_transitions = sorted( transitions_map.items(), key=lambda x: len( x[1])) if len(sorted_transitions) == 1: # Just put 1 symbol is enough all other'input_string will be generalized # by the guardgen algorithm chars_in_smi.append(self.alphabet[0]) else: # Otherwise insert in smi_vector all transitions as explicit except # the one from the sink transition where we add just enough # explicity transitions to make sure that this state will be # selected as the sink state. # # If no transition has a clear advantage in terms of symbols then # just add all transitions in explicit form because it may be the # case the guardgen() will generalize in the wrong transition. for (_, char_list) in sorted_transitions[:-1]: chars_in_smi += char_list sink_chars = len(sorted_transitions[-2][1]) + 1 chars_in_smi.extend(sorted_transitions[-1][1][:sink_chars]) access_string = access_strings_map[selected_state.stateid] smi.extend([access_string + character for character in chars_in_smi]) return smi
Args: graph (DFA): The DFA states access_strings_map (list): a dict containing all the access strings for each state Return: list: SMI transition table
def initialize(self, givengraph, sfa=False): sm_vector, smi_vector, em_vector = self._init_using_k_equivalence( givengraph, sfa) return sm_vector, smi_vector, em_vector
Args: givengraph (DFA): The DFA states sfa (bool): A boolean for chosing SFA Return: list, list, list: sm_vector, smi_vector, em_vector initialization vectors
def push(self,message,message_type): super(Producer,self).send(message,message_type)
Send a reply message of the given type Args: - message: the message to publish - message_type: the type of message being sent
def label_size(base, label_name=None, children=[], parents=[], dependencies=[]): label_name = label_name or base.key cl_children = children cl_parents = parents cl_dependencies = dependencies class LabelSize(Formatoption): __doc__ = """ Set the size of the %s Possible types -------------- %%(fontsizes)s See Also -------- %s, %s, %s""" % (label_name, base.key, base.key + 'weight', base.key + 'props') children = [base.key] + cl_children parent = [base.key + 'props'] + cl_parents dependencies = cl_dependencies group = 'labels' name = 'Font size of ' + (base.name or base.key) def update(self, value): for text in getattr(self, base.key).texts: text.set_size(value) def get_fmt_widget(self, parent, project): """Get a widget with the different font weights""" from psy_simple.widgets.texts import FontSizeWidget return FontSizeWidget( parent, self, next(iter(getattr(self, base.key).texts), None), base) return LabelSize(base.key + 'size')
Function that returns a Formatoption class for modifying the fontsite This function returns a :class:`~psyplot.plotter.Formatoption` instance that modifies the size of the given `base` formatoption Parameters ---------- %(label_weight.parameters)s Returns ------- Formatoption The formatoption instance that modifies the fontsize of `base` See Also -------- label_weight, label_props, Figtitle, Title
def replace(self, s, data, attrs=None): # insert labels s = s.format(**self.rc['labels']) # replace attributes attrs = attrs or data.attrs if hasattr(getattr(data, 'psy', None), 'arr_name'): attrs = attrs.copy() attrs['arr_name'] = data.psy.arr_name s = safe_modulo(s, attrs) # replace datetime.datetime like time informations if isinstance(data, InteractiveList): data = data[0] tname = self.any_decoder.get_tname( next(self.plotter.iter_base_variables), data.coords) if tname is not None and tname in data.coords: time = data.coords[tname] if not time.values.ndim: try: # assume a valid datetime.datetime instance s = pd.to_datetime(str(time.values[()])).strftime(s) except ValueError: pass if six.PY2: return s.decode('utf-8') return s
Replace the attributes of the plotter data in a string %(replace_note)s Parameters ---------- s: str String where the replacements shall be made data: InteractiveBase Data object from which to use the coordinates and insert the coordinate and attribute informations attrs: dict Meta attributes that shall be used for replacements. If None, it will be gained from `data.attrs` Returns ------- str `s` with inserted informations
def get_fig_data_attrs(self, delimiter=None): if self.project is not None: delimiter = next(filter(lambda d: d is not None, [ delimiter, self.delimiter, self.rc['delimiter']])) figs = self.project.figs fig = self.ax.get_figure() if self.plotter._initialized and fig in figs: ret = figs[fig].joined_attrs(delimiter=delimiter, plot_data=True) else: ret = self.get_enhanced_attrs(self.plotter.plot_data) self.logger.debug( 'Can not get the figure attributes because plot has not ' 'yet been initialized!') return ret else: return self.get_enhanced_attrs(self.plotter.plot_data)
Join the data attributes with other plotters in the project This method joins the attributes of the :class:`~psyplot.InteractiveBase` instances in the project that draw on the same figure as this instance does. Parameters ---------- delimiter: str Specifies the delimiter with what the attributes are joined. If None, the :attr:`delimiter` attribute of this instance or (if the latter is also None), the rcParams['texts.delimiter'] item is used. Returns ------- dict A dictionary with all the meta attributes joined by the specified `delimiter`
def get_fmt_widget(self, parent, project): from psy_simple.widgets.texts import LabelWidget return LabelWidget(parent, self, project)
Create a combobox with the attributes
def clear_other_texts(self, remove=False): fig = self.ax.get_figure() # don't do anything if our figtitle is the only Text instance if len(fig.texts) == 1: return for i, text in enumerate(fig.texts): if text == self._text: continue if text.get_position() == self._text.get_position(): if not remove: text.set_text('') else: del fig[i]
Make sure that no other text is a the same position as this one This method clears all text instances in the figure that are at the same position as the :attr:`_text` attribute Parameters ---------- remove: bool If True, the Text instances are permanently deleted from the figure, otherwise there text is simply set to
def transform(self): ax = self.ax return {'axes': ax.transAxes, 'fig': ax.get_figure().transFigure, 'data': ax.transData}
Dictionary containing the relevant transformations
def _remove_texttuple(self, pos): for i, (old_x, old_y, s, old_cs, d) in enumerate(self.value): if (old_x, old_y, old_cs) == pos: self.value.pop(i) return raise ValueError("{0} not found!".format(pos))
Remove a texttuple from the value in the plotter Parameters ---------- pos: tuple (x, y, cs) x and y are the x- and y-positions and cs the coordinate system
def _update_texttuple(self, x, y, s, cs, d): pos = (x, y, cs) for i, (old_x, old_y, old_s, old_cs, old_d) in enumerate(self.value): if (old_x, old_y, old_cs) == pos: self.value[i] = (old_x, old_y, s, old_cs, d) return raise ValueError("No text tuple found at {0}!".format(pos))
Update the text tuple at `x` and `y` with the given `s` and `d`
def share(self, fmto, **kwargs): kwargs.setdefault('texts_to_remove', self._texts_to_remove) super(Text, self).share(fmto, **kwargs)
Share the settings of this formatoption with other data objects Parameters ---------- fmto: Formatoption The :class:`Formatoption` instance to share the attributes with ``**kwargs`` Any other keyword argument that shall be passed to the update method of `fmto` Notes ----- The Text formatoption sets the 'texts_to_remove' keyword to the :attr:`_texts_to_remove` attribute of this instance (if not already specified in ``**kwargs``
def save(self, *args, **kwargs): self.slug = uuslug( self.name, instance=self, max_length=100, separator='-', start_no=2 ) if not self.uid: self.uid = 'organization:{}'.format(self.slug) super(Organization, self).save(*args, **kwargs)
**uid**: :code:`person:{slug}`
def replace_variables(self, source: str, variables: dict) -> str: try: replaced = re.sub( "{{(.*?)}}", lambda m: variables.get(m.group(1), ""), source ) except TypeError: replaced = source return replaced
Replace {{variable-name}} with stored value.
def preprocess_cell( self, cell: "NotebookNode", resources: dict, index: int ) -> Tuple["NotebookNode", dict]: if cell.cell_type == "markdown": variables = cell["metadata"].get("variables", {}) if len(variables) > 0: cell.source = self.replace_variables(cell.source, variables) if resources.get("delete_pymarkdown", False): del cell.metadata["variables"] return cell, resources
Preprocess cell. Parameters ---------- cell : NotebookNode cell Notebook cell being processed resources : dictionary Additional resources used in the conversion process. Allows preprocessors to pass variables into the Jinja engine. cell_index : int Index of the cell being processed (see base.py)
def emit(self, ast_reg, ast_guide): ''' Default emit method: visit both ASTs and return the codegen ''' if (ast_reg): self.visit(ast_reg) codegen_reg = self.codegen self.codegen = self.cg_type() if (ast_guide): self.visit(ast_guide) return (codegen_reg, self.codegenf emit(self, ast_reg, ast_guide): ''' Default emit method: visit both ASTs and return the codegen ''' if (ast_reg): self.visit(ast_reg) codegen_reg = self.codegen self.codegen = self.cg_type() if (ast_guide): self.visit(ast_guide) return (codegen_reg, self.codegen)
Default emit method: visit both ASTs and return the codegen
def index_dir(self, folder): folder_path = folder print('Indexing folder: ' + folder_path) nested_dir = {} folder = folder_path.rstrip(os.sep) start = folder.rfind(os.sep) + 1 for root, dirs, files in os.walk(folder): folders = root[start:].split(os.sep) # subdir = dict.fromkeys(files) subdir = {} for f in files: # Create an entry for every markdown file if os.path.splitext(f)[1] == '.md': with open(os.path.abspath(os.path.join(root, f)), encoding='utf-8') as fp: try: _, meta = self.mrk.extract_meta(fp.read()) except: print("Skipping indexing " + f +"; Could not parse metadata") meta = {'title': f} pass # Value of the entry (the key) is it's metadata subdir[f] = meta parent = nested_dir for fold in folders[:-1]: parent = parent.get(fold) # Attach the config of all children nodes onto the parent parent[folders[-1]] = subdir return nested_dir
Creates a nested dictionary that represents the folder structure of folder. Also extracts meta data from all markdown posts and adds to the dictionary.
def average_detections(detections, predictions, relative_prediction_threshold = 0.25): # remove the predictions that are too low prediction_threshold = relative_prediction_threshold * max(predictions) detections, predictions = zip(*[[d,p] for d,p in zip(detections, predictions) if p >= prediction_threshold]) # turn remaining predictions into weights s = sum(predictions) weights = [p/s for p in predictions] # compute weighted average of bounding boxes top = sum(w * b.topleft_f[0] for w, b in zip(weights, detections)) left = sum(w * b.topleft_f[1] for w, b in zip(weights, detections)) bottom = sum(w * b.bottomright_f[0] for w, b in zip(weights, detections)) right = sum(w * b.bottomright_f[1] for w, b in zip(weights, detections)) # compute the average prediction value value = sum(w*p for w,p in zip(weights, predictions)) # return the average bounding box return BoundingBox((top, left), (bottom-top, right-left)), value
average_detections(detections, predictions, [relative_prediction_threshold]) -> bounding_box, prediction Computes the weighted average of the given detections, where the weights are computed based on the prediction values. **Parameters:** ``detections`` : [:py:class:`BoundingBox`] The overlapping bounding boxes. ``predictions`` : [float] The predictions for the ``detections``. ``relative_prediction_threshold`` : float between 0 and 1 Limits the bounding boxes to those that have a prediction value higher then ``relative_prediction_threshold * max(predictions)`` **Returns:** ``bounding_box`` : :py:class:`BoundingBox` The bounding box which has been merged from the detections ``prediction`` : float The prediction value of the bounding box, which is a weighted sum of the predictions with minimum overlap
def best_detection(detections, predictions, minimum_overlap = 0.2, relative_prediction_threshold = 0.25): # remove all negative predictions since they harm the calculation of the weights detections = [detections[i] for i in range(len(detections)) if predictions[i] > 0] predictions = [predictions[i] for i in range(len(predictions)) if predictions[i] > 0] if not detections: raise ValueError("No detections with a prediction value > 0 have been found") # keep only the bounding boxes with the highest overlap detections, predictions = overlapping_detections(detections, numpy.array(predictions), minimum_overlap) return average_detections(detections, predictions, relative_prediction_threshold)
best_detection(detections, predictions, [minimum_overlap], [relative_prediction_threshold]) -> bounding_box, prediction Computes the best detection for the given detections and according predictions. This is achieved by computing a weighted sum of detections that overlap with the best detection (the one with the highest prediction), where the weights are based on the predictions. Only detections with according prediction values > 0 are considered. **Parameters:** ``detections`` : [:py:class:`BoundingBox`] The detected bounding boxes. ``predictions`` : [float] The predictions for the ``detections``. ``minimum_overlap`` : float between 0 and 1 The minimum overlap (in terms of Jaccard :py:meth:`BoundingBox.similarity`) of bounding boxes with the best detection to be considered. ``relative_prediction_threshold`` : float between 0 and 1 Limits the bounding boxes to those that have a prediction value higher then ``relative_prediction_threshold * max(predictions)`` **Returns:** ``bounding_box`` : :py:class:`BoundingBox` The bounding box which has been merged from the detections ``prediction`` : float The prediction value of the bounding box, which is a weighted sum of the predictions with minimum overlap
def cycles_created_by(callable): with restore_gc_state(): gc.disable() gc.collect() gc.set_debug(gc.DEBUG_SAVEALL) callable() new_object_count = gc.collect() if new_object_count: objects = gc.garbage[-new_object_count:] del gc.garbage[-new_object_count:] else: objects = [] return ObjectGraph(objects)
Return graph of cyclic garbage created by the given callable. Return an :class:`~refcycle.object_graph.ObjectGraph` representing those objects generated by the given callable that can't be collected by Python's usual reference-count based garbage collection. This includes objects that will eventually be collected by the cyclic garbage collector, as well as genuinely unreachable objects that will never be collected. `callable` should be a callable that takes no arguments; its return value (if any) will be ignored.
def garbage(): with restore_gc_state(): gc.disable() gc.set_debug(gc.DEBUG_SAVEALL) collected_count = gc.collect() if collected_count: objects = gc.garbage[-collected_count:] del gc.garbage[-collected_count:] else: objects = [] return ObjectGraph(objects)
Collect garbage and return an :class:`~refcycle.object_graph.ObjectGraph` based on collected garbage. The collected elements are removed from ``gc.garbage``, but are still kept alive by the references in the graph. Deleting the :class:`~refcycle.object_graph.ObjectGraph` instance and doing another ``gc.collect`` will remove those objects for good.
def objects_reachable_from(obj): # Depth-first search. found = ObjectGraph.vertex_set() to_process = [obj] while to_process: obj = to_process.pop() found.add(obj) for referent in gc.get_referents(obj): if referent not in found: to_process.append(referent) return ObjectGraph(found)
Return graph of objects reachable from *obj* via ``gc.get_referrers``. Returns an :class:`~refcycle.object_graph.ObjectGraph` object holding all objects reachable from the given one by following the output of ``gc.get_referrers``. Note that unlike the :func:`~refcycle.creators.snapshot` function, the output graph may include non-gc-tracked objects.
def snapshot(): all_objects = gc.get_objects() this_frame = inspect.currentframe() selected_objects = [] for obj in all_objects: if obj is not this_frame: selected_objects.append(obj) graph = ObjectGraph(selected_objects) del this_frame, all_objects, selected_objects, obj return graph
Return the graph of all currently gc-tracked objects. Excludes the returned :class:`~refcycle.object_graph.ObjectGraph` and objects owned by it. Note that a subsequent call to :func:`~refcycle.creators.snapshot` will capture all of the objects owned by this snapshot. The :meth:`~refcycle.object_graph.ObjectGraph.owned_objects` method may be helpful when excluding these objects from consideration.
def extendMarkdown(self, md, md_globals): md.registerExtension(self) for processor in (self.preprocessors or []): md.preprocessors.add(processor.__name__.lower(), processor(md), '_end') for pattern in (self.inlinepatterns or []): md.inlinePatterns.add(pattern.__name__.lower(), pattern(md), '_end') for processor in (self.postprocessors or []): md.postprocessors.add(processor.__name__.lower(), processor(md), '_end')
Every extension requires a extendMarkdown method to tell the markdown renderer how use the extension.
def run( paths, output=_I_STILL_HATE_EVERYTHING, recurse=core.flat, sort_by=None, ls=core.ls, stdout=stdout, ): if output is _I_STILL_HATE_EVERYTHING: output = core.columnized if stdout.isatty() else core.one_per_line if sort_by is None: if output == core.as_tree: def sort_by(thing): return ( thing.parent(), thing.basename().lstrip(string.punctuation).lower(), ) else: def sort_by(thing): return thing def _sort_by(thing): return not getattr(thing, "_always_sorts_first", False), sort_by(thing) contents = [ path_and_children for path in paths or (project.from_path(FilePath(".")),) for path_and_children in recurse(path=path, ls=ls) ] for line in output(contents, sort_by=_sort_by): stdout.write(line) stdout.write("\n")
Project-oriented directory and file information lister.
def mkdir_p(path): ''' Mimic `mkdir -p` since os module doesn't provide one. :param str path: directory to create ''' assert isinstance(path, basestring), ("path must be a string but is %r" % path) try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raisf mkdir_p(path): ''' Mimic `mkdir -p` since os module doesn't provide one. :param str path: directory to create ''' assert isinstance(path, basestring), ("path must be a string but is %r" % path) try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise
Mimic `mkdir -p` since os module doesn't provide one. :param str path: directory to create
def limit_chord_unlock_tasks(app): task = app.tasks['celery.chord_unlock'] if task.max_retries is None: retries = getattr(app.conf, 'CHORD_UNLOCK_MAX_RETRIES', None) task.max_retries = retries
Set max_retries for chord.unlock tasks to avoid infinitely looping tasks. (see celery/celery#1700 or celery/celery#2725)
def setup_exchanges(app): with app.producer_or_acquire() as P: # Ensure all queues are noticed and configured with their # appropriate exchange. for q in app.amqp.queues.values(): P.maybe_declare(q)
Setup result exchange to route all tasks to platform queue.
def setup_app(app, throw=True): success = True try: for func in SETUP_FUNCS: try: func(app) except Exception: success = False if throw: raise else: msg = "Failed to run setup function %r(app)" logger.exception(msg, func.__name__) finally: setattr(app, 'is_set_up', success)
Ensure application is set up to expected configuration. This function is typically triggered by the worker_init signal, however it must be called manually by codebases that are run only as task producers or from within a Python shell.
def insert(self, item, priority): with self.lock: self_data = self.data rotate = self_data.rotate self_items = self.items maxlen = self._maxlen try: if priority <= self_data[-1][1]: self_data.append((item, priority)) elif priority > self_data[0][1]: self_data.appendleft((item, priority)) else: length = len(self_data) + 1 mid = length // 2 shift = 0 while True: if priority <= self_data[0][1]: rotate(-mid) shift += mid mid //= 2 if mid == 0: mid += 1 else: rotate(mid) shift -= mid mid //= 2 if mid == 0: mid += 1 if self_data[-1][1] >= priority > self_data[0][1]: self_data.appendleft((item, priority)) # When returning to original position, never shift # more than half length of DEPQ i.e. if length is # 100 and we rotated -75, rotate -25, not 75 if shift > length // 2: shift = length % shift rotate(-shift) else: rotate(shift) break try: self_items[item] += 1 except TypeError: self_items[repr(item)] += 1 except IndexError: self_data.append((item, priority)) try: self_items[item] = 1 except TypeError: self_items[repr(item)] = 1 if maxlen is not None and maxlen < len(self_data): self._poplast()
Adds item to DEPQ with given priority by performing a binary search on the concurrently rotating deque. Amount rotated R of DEPQ of length n would be n <= R <= 3n/2. Performance: O(n)
def addfirst(self, item, new_priority=None): with self.lock: self_data = self.data try: priority = self_data[0][1] if new_priority is not None: if new_priority < priority: raise ValueError('Priority must be >= ' 'highest priority.') else: priority = new_priority except IndexError: priority = 0 if new_priority is None else new_priority self_data.appendleft((item, priority)) self_items = self.items maxlen = self._maxlen try: self_items[item] += 1 except TypeError: self_items[repr(item)] += 1 if maxlen is not None and maxlen < len(self_data): self._poplast()
Adds item to DEPQ as highest priority. The default starting priority is 0, the default new priority is self.high(). Performance: O(1)
def addlast(self, item, new_priority=None): with self.lock: self_data = self.data maxlen = self._maxlen if maxlen is not None and maxlen == len(self_data): return try: priority = self_data[-1][1] if new_priority is not None: if new_priority > priority: raise ValueError('Priority must be <= ' 'lowest priority.') else: priority = new_priority except IndexError: priority = 0 if new_priority is None else new_priority self_data.append((item, priority)) self_items = self.items try: self_items[item] += 1 except TypeError: self_items[repr(item)] += 1
Adds item to DEPQ as lowest priority. The default starting priority is 0, the default new priority is self.low(). Performance: O(1)
def popfirst(self): with self.lock: try: tup = self.data.popleft() except IndexError as ex: ex.args = ('DEPQ is already empty',) raise self_items = self.items try: self_items[tup[0]] -= 1 if self_items[tup[0]] == 0: del self_items[tup[0]] except TypeError: r = repr(tup[0]) self_items[r] -= 1 if self_items[r] == 0: del self_items[r] return tup
Removes item with highest priority from DEPQ. Returns tuple(item, priority). Performance: O(1)
def _poplast(self): try: tup = self.data.pop() except IndexError as ex: ex.args = ('DEPQ is already empty',) raise self_items = self.items try: self_items[tup[0]] -= 1 if self_items[tup[0]] == 0: del self_items[tup[0]] except TypeError: r = repr(tup[0]) self_items[r] -= 1 if self_items[r] == 0: del self_items[r] return tup
For avoiding lock during inserting to keep maxlen
def first(self): with self.lock: try: return self.data[0][0] except IndexError as ex: ex.args = ('DEPQ is empty',) raise
Gets item with highest priority. Performance: O(1)
def last(self): with self.lock: try: return self.data[-1][0] except IndexError as ex: ex.args = ('DEPQ is empty',) raise
Gets item with lowest priority. Performance: O(1)
def high(self): with self.lock: try: return self.data[0][1] except IndexError as ex: ex.args = ('DEPQ is empty',) raise
Gets highest priority. Performance: O(1)
def low(self): with self.lock: try: return self.data[-1][1] except IndexError as ex: ex.args = ('DEPQ is empty',) raise
Gets lowest priority. Performance: O(1)
def clear(self): with self.lock: self.data.clear() self.items.clear()
Empties DEPQ. Performance: O(1)
def set_maxlen(self, length): with self.lock: self._maxlen = length while len(self.data) > length: self._poplast()
Sets maxlen
def count(self, item): try: return self.items.get(item, 0) except TypeError: return self.items.get(repr(item), 0)
Returns number of occurrences of item in DEPQ. Performance: O(1)
def remove(self, item, count=1): with self.lock: try: count = int(count) except ValueError as ex: ex.args = ('{} cannot be represented as an ' 'integer'.format(count),) raise except TypeError as ex: ex.args = ('{} cannot be represented as an ' 'integer'.format(count),) raise removed = [] self_items = self.items try: item_freq = self_items[item] item_repr = item if item_freq == 0: return removed except TypeError: item_freq = self_items[repr(item)] item_repr = repr(item) if item_freq == 0: return removed if count == -1: count = item_freq self_data = self.data rotate = self_data.rotate pop = self_data.pop counter = 0 for i in range(len(self_data)): if count > counter and item == self_data[-1][0]: removed.append(pop()) counter += 1 continue rotate() if item_freq <= count: del self_items[item_repr] else: self_items[item_repr] -= count return removed
Removes occurrences of given item in ascending priority. Default number of removals is 1. Useful for tasks that no longer require completion, inactive clients, certain algorithms, etc. Returns a list of tuple(item, priority). Performance: O(n)
def DatabaseEnabled(cls): if not issubclass(cls, Storable): raise ValueError( "%s is not a subclass of gludb.datab.Storage" % repr(cls) ) cls.ensure_table = classmethod(_ensure_table) cls.find_one = classmethod(_find_one) cls.find_all = classmethod(_find_all) cls.find_by_index = classmethod(_find_by_index) cls.save = _save cls.delete = _delete return cls
Given persistence methods to classes with this annotation. All this really does is add some functions that forward to the mapped database class.
def _find_playlist(self): data = None if self.id: data = self.connection.get_item( 'find_playlist_by_id', playlist_id=self.id) elif self.reference_id: data = self.connection.get_item( 'find_playlist_by_reference_id', reference_id=self.reference_id) if data: self._load(data)
Internal method to populate the object given the ``id`` or ``reference_id`` that has been set in the constructor.
def _to_dict(self): data = { 'name': self.name, 'referenceId': self.reference_id, 'shortDescription': self.short_description, 'playlistType': self.type, 'id': self.id} if self.videos: for video in self.videos: if video.id not in self.video_ids: self.video_ids.append(video.id) if self.video_ids: data['videoIds'] = self.video_ids [data.pop(key) for key in data.keys() if data[key] == None] return data
Internal method that serializes object into a dictionary.
def _load(self, data): self.raw_data = data self.id = data['id'] self.reference_id = data['referenceId'] self.name = data['name'] self.short_description = data['shortDescription'] self.thumbnail_url = data['thumbnailURL'] self.videos = [] self.video_ids = data['videoIds'] self.type = data['playlistType'] for video in data.get('videos', []): self.videos.append(pybrightcove.video.Video( data=video, connection=self.connection))
Internal method that deserializes a ``pybrightcove.playlist.Playlist`` object.
def save(self): d = self._to_dict() if len(d.get('videoIds', [])) > 0: if not self.id: self.id = self.connection.post('create_playlist', playlist=d) else: data = self.connection.post('update_playlist', playlist=d) if data: self._load(data)
Create or update a playlist.
def delete(self, cascade=False): if self.id: self.connection.post('delete_playlist', playlist_id=self.id, cascade=cascade) self.id = None
Deletes this playlist.
def find_all(connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): return pybrightcove.connection.ItemResultSet("find_all_playlists", Playlist, connection, page_size, page_number, sort_by, sort_order)
List all playlists.
def find_by_ids(ids, connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): ids = ','.join([str(i) for i in ids]) return pybrightcove.connection.ItemResultSet('find_playlists_by_ids', Playlist, connection, page_size, page_number, sort_by, sort_order, playlist_ids=ids)
List playlists by specific IDs.
def find_by_reference_ids(reference_ids, connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): reference_ids = ','.join([str(i) for i in reference_ids]) return pybrightcove.connection.ItemResultSet( "find_playlists_by_reference_ids", Playlist, connection, page_size, page_number, sort_by, sort_order, reference_ids=reference_ids)
List playlists by specific reference_ids.
def find_for_player_id(player_id, connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): return pybrightcove.connection.ItemResultSet( "find_playlists_for_player_id", Playlist, connection, page_size, page_number, sort_by, sort_order, player_id=player_id)
List playlists for a for given player id.
def is_any_type_set(sett: Set[Type]) -> bool: return len(sett) == 1 and is_any_type(min(sett))
Helper method to check if a set of types is the {AnyObject} singleton :param sett: :return:
def get_validated_types(object_types: Set[Type], set_name: str) -> Set[Type]: check_var(object_types, var_types=set, var_name=set_name) res = {get_validated_type(typ, set_name + '[x]') for typ in object_types} if AnyObject in res and len(res) > 1: raise ValueError('The set of types contains \'object\'/\'Any\'/\'AnyObject\', so no other type must be present ' 'in the set') else: return res
Utility to validate a set of types : * None is not allowed as a whole or within the set, * object and Any are converted into AnyObject * if AnyObject is in the set, it must be the only element :param object_types: the set of types to validate :param set_name: a name used in exceptions if any :return: the fixed set of types
def get_validated_type(object_type: Type[Any], name: str, enforce_not_joker: bool = True) -> Type[Any]: if object_type is object or object_type is Any or object_type is AnyObject: return AnyObject else: # -- !! Do not check TypeVar or Union : this is already handled at higher levels -- if object_type is JOKER: # optionally check if JOKER is allowed if enforce_not_joker: raise ValueError('JOKER is not allowed for object_type') else: # note: we dont check var earlier, since 'typing.Any' is not a subclass of type anymore check_var(object_type, var_types=type, var_name=name) return object_type
Utility to validate a type : * None is not allowed, * 'object', 'AnyObject' and 'Any' lead to the same 'AnyObject' type * JOKER is either rejected (if enforce_not_joker is True, default) or accepted 'as is' :param object_type: the type to validate :param name: a name used in exceptions if any :param enforce_not_joker: a boolean, set to False to tolerate JOKER types :return: the fixed type
def get_options_for_id(options: Dict[str, Dict[str, Any]], identifier: str): check_var(options, var_types=dict, var_name='options') res = options[identifier] if identifier in options.keys() else dict() check_var(res, var_types=dict, var_name='options[' + identifier + ']') return res
Helper method, from the full options dict of dicts, to return either the options related to this parser or an empty dictionary. It also performs all the var type checks :param options: :param identifier: :return:
def are_worth_chaining(left_converter, right_converter) -> bool: if not left_converter.can_chain: return False elif not is_any_type(left_converter.to_type) and is_any_type(right_converter.to_type): # we gain the capability to generate any type. So it is interesting. return True elif issubclass(left_converter.from_type, right_converter.to_type) \ or issubclass(left_converter.to_type, right_converter.to_type) \ or issubclass(left_converter.from_type, right_converter.from_type): # Not interesting : the outcome of the chain would be not better than one of the converters alone return False # Note: we dont say that chaining a generic converter with a converter is useless. Indeed it might unlock some # capabilities for the user (new file extensions, etc.) that would not be available with the generic parser # targetting to_type alone. For example parsing object A from its constructor then converting A to B might # sometimes be interesting, rather than parsing B from its constructor else: # interesting return True
Utility method to check if it makes sense to chain these two converters. Returns True if it brings value to chain the first converter with the second converter. To bring value, * the second converter's input should not be a parent class of the first converter's input (in that case, it is always more interesting to use the second converter directly for any potential input) * the second converter's output should not be a parent class of the first converter's input or output. Otherwise the chain does not even make any progress :) * The first converter has to allow chaining (with converter.can_chain=True) :param left_converter: :param right_converter: :return:
def can_be_appended_to(self, left_converter, strict: bool) -> bool: is_able_to_take_input = self.is_able_to_convert(strict, from_type=left_converter.to_type, to_type=JOKER) if left_converter.is_generic(): return is_able_to_take_input \ and left_converter.is_able_to_convert(strict, from_type=JOKER, to_type=self.from_type) else: return is_able_to_take_input
Utility method to check if this (self) converter can be appended after the output of the provided converter. This method does not check if it makes sense, it just checks if the output type of the left converter is compliant with the input type of this converter. Compliant means: * strict mode : type equality * non-strict mode : output type of left_converter should be a subclass of input type of this converter In addition, the custom function provided in constructor may be used to reject conversion (see is_able_to_convert for details) :param left_converter: :param strict: boolean to :return:
def get_applicable_options(self, options: Dict[str, Dict[str, Any]]): return get_options_for_id(options, self.get_id_for_options())
Returns the options that are applicable to this particular converter, from the full map of options. It first uses 'get_id_for_options()' to know the id of this parser, and then simply extracts the contents of the options corresponding to this id, or returns an empty dict(). :param options: a dictionary converter_id > options :return:
def _convert(self, desired_type: Type[T], source_obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: pass
Implementing classes should implement this method to perform the conversion itself :param desired_type: the destination type of the conversion :param source_obj: the source object that should be converter :param logger: a logger to use if any is available, or None :param options: additional options map. Implementing classes may use 'self.get_applicable_options()' to get the options that are of interest for this converter. :return:
def create_not_able_to_convert(source: S, converter: Converter, desired_type: Type[T]): base_msg = 'Converter ' + str(converter) + ' is not able to ingest source value \'' + str(source) + '\''\ ' of type \'' + get_pretty_type_str(type(source)) + '\' and/or convert it to type \'' \ + get_pretty_type_str(desired_type) + '\'.' base_msg += ' This can happen in a chain when the previous step in the chain is generic and actually produced '\ ' an output of the wrong type/content' return ConversionException(base_msg)
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param source: :param converter: :param desired_type: :return:
def create(converter_func: ConversionMethod, caught: Exception): msg = 'Caught TypeError while calling conversion function \'' + str(converter_func.__name__) + '\'. ' \ 'Note that the conversion function signature should be \'' + conversion_method_example_signature_str \ + '\' (unpacked options mode - default) or ' + multioptions_conversion_method_example_signature_str \ + ' (unpack_options = False).' \ + 'Caught error message is : ' + caught.__class__.__name__ + ' : ' + str(caught) return CaughtTypeError(msg).with_traceback(caught.__traceback__)
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests https://github.com/nose-devs/nose/issues/725 :param converter_func: :param caught: :return:
def _convert(self, desired_type: Type[T], source_obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: try: if self.unpack_options: opts = self.get_applicable_options(options) if self.function_args is not None: return self.conversion_method(desired_type, source_obj, logger, **self.function_args, **opts) else: return self.conversion_method(desired_type, source_obj, logger, **opts) else: if self.function_args is not None: return self.conversion_method(desired_type, source_obj, logger, options, **self.function_args) else: return self.conversion_method(desired_type, source_obj, logger, options) except TypeError as e: raise CaughtTypeError.create(self.conversion_method, e)
Delegates to the user-provided method. Passes the appropriate part of the options according to the function name. :param desired_type: :param source_obj: :param logger: :param options: :return:
def is_able_to_convert_detailed(self, strict: bool, from_type: Type[Any], to_type: Type[Any]): # check if first and last converters are happy if not self._converters_list[0].is_able_to_convert(strict, from_type=from_type, to_type=JOKER): return False, None, None elif not self._converters_list[-1].is_able_to_convert(strict, from_type=JOKER, to_type=to_type): return False, None, None else: # behave as usual. This is probably useless but lets be sure. return super(ConversionChain, self).is_able_to_convert_detailed(strict, from_type, to_type)
Overrides the parent method to delegate left check to the first (left) converter of the chain and right check to the last (right) converter of the chain. This includes custom checking if they have any... see Converter.is_able_to_convert for details :param strict: :param from_type: :param to_type: :return:
def remove_first(self, inplace: bool = False): if len(self._converters_list) > 1: if inplace: self._converters_list = self._converters_list[1:] # update the current source type self.from_type = self._converters_list[0].from_type return else: new = copy(self) new._converters_list = new._converters_list[1:] # update the current source type new.from_type = new._converters_list[0].from_type return new else: raise ValueError('cant remove first: would make it empty!')
Utility method to remove the first converter of this chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the first converter removed
def add_conversion_steps(self, converters: List[Converter], inplace: bool = False): check_var(converters, var_types=list, min_len=1) if inplace: for converter in converters: self.add_conversion_step(converter, inplace=True) else: new = copy(self) new.add_conversion_steps(converters, inplace=True) return new
Utility method to add converters to this chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param converters: the list of converters to add :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the converters added
def add_conversion_step(self, converter: Converter[S, T], inplace: bool = False): # it the current chain is generic, raise an error if self.is_generic() and converter.is_generic(): raise ValueError('Cannot chain this generic converter chain to the provided converter : it is generic too!') # if the current chain is able to transform its input into a valid input for the new converter elif converter.can_be_appended_to(self, self.strict): if inplace: self._converters_list.append(converter) # update the current destination type self.to_type = converter.to_type return else: new = copy(self) new._converters_list.append(converter) # update the current destination type new.to_type = converter.to_type return new else: raise TypeError('Cannnot register a converter on this conversion chain : source type \'' + get_pretty_type_str(converter.from_type) + '\' is not compliant with current destination type of the chain : \'' + get_pretty_type_str(self.to_type) + ' (this chain performs ' + ('' if self.strict else 'non-') + 'strict mode matching)')
Utility method to add a converter to this chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param converter: the converter to add :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the converter added
def insert_conversion_steps_at_beginning(self, converters: List[Converter], inplace: bool = False): if inplace: for converter in reversed(converters): self.insert_conversion_step_at_beginning(converter, inplace=True) return else: new = copy(self) for converter in reversed(converters): # do inplace since it is a copy new.insert_conversion_step_at_beginning(converter, inplace=True) return new
Utility method to insert converters at the beginning ofthis chain. If inplace is True, this object is modified and None is returned. Otherwise, a copy is returned :param converters: the list of converters to insert :param inplace: boolean indicating whether to modify this object (True) or return a copy (False) :return: None or a copy with the converters added
def _convert(self, desired_type: Type[T], obj: S, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T: for converter in self._converters_list[:-1]: # convert into each converters destination type obj = converter.convert(converter.to_type, obj, logger, options) # the last converter in the chain should convert to desired type return self._converters_list[-1].convert(desired_type, obj, logger, options)
Apply the converters of the chain in order to produce the desired result. Only the last converter will see the 'desired type', the others will be asked to produce their declared to_type. :param desired_type: :param obj: :param logger: :param options: :return:
def are_worth_chaining(first_converter: Converter, second_converter: Converter) -> bool: if isinstance(first_converter, ConversionChain): if isinstance(second_converter, ConversionChain): # BOTH are chains for sec_conv in second_converter._converters_list: for fir_conv in first_converter._converters_list: if not Converter.are_worth_chaining(fir_conv, sec_conv): return False else: for fir_conv in first_converter._converters_list: if not Converter.are_worth_chaining(fir_conv, second_converter): return False else: if isinstance(second_converter, ConversionChain): for sec_conv in second_converter._converters_list: if not Converter.are_worth_chaining(first_converter, sec_conv): return False else: # Neither is a chain if not Converter.are_worth_chaining(first_converter, second_converter): return False # finally return True if nothing proved otherwise return True
This is a generalization of Converter.are_worth_chaining(), to support ConversionChains. :param first_converter: :param second_converter: :return:
def main(as_module=False): this_module = __package__ args = sys.argv[1:] if as_module: if sys.version_info >= (2, 7): name = 'python -m ' + this_module.rsplit('.', 1)[0] else: name = 'python -m ' + this_module # This module is always executed as "python -m flask.run" and as such # we need to ensure that we restore the actual command line so that # the reloader can properly operate. sys.argv = ['-m', this_module] + sys.argv[1:] else: name = None cli.main(args=args, prog_name=name)
This is copy/paste of flask.cli.main to instanciate our own group
def init_app(self, app, entry_point_group='invenio_queues.queues'): self.init_config(app) app.extensions['invenio-queues'] = _InvenioQueuesState( app, app.config['QUEUES_CONNECTION_POOL'], entry_point_group=entry_point_group ) return app
Flask application initialization.
def parse_result(result): if(result['Type'] == 'D'): print """There is more than one answer for this. Try making your query\ more specific. For example, if you want to learn about apple the company\ and not apple the fruit, try something like apple inc or apple computers. """ elif(result['Type'] == 'A'): print result['AbstractText'] print '\nResults from DuckDuckGo' elif(result['Type'] == 'C'): for entry in result['RelatedTopics']: print entry['Text'] print "\n" else: print "I do not know how to process this query at the moment."
parse_result(json result) -- print the web query according to the type of result from duckduckgo.
def query(string): url = "https://api.duckduckgo.com/?q=" formating = "&format=json" query_string = url+'+'.join(string)+formating try: result = json.loads(requests.get(query_string).text) except: print "I'm sorry! Something went wrong. Maybe we could try again later." return parse_result(result)
query(user string) -- make http request to duckduckgo api, to get result in json format, then call parse_result.
def listens_to(name, sender=None, weak=True): def decorator(f): if sender: return signal(name).connect(f, sender=sender, weak=weak) return signal(name).connect(f, weak=weak) return decorator
Listens to a named signal
def LoadInstallations(counter): process = subprocess.Popen(["pip", "list", "--format=json"], stdout=subprocess.PIPE) output, _ = process.communicate() installations = json.loads(output) for i in installations: counter.labels(i["name"], i["version"]).inc()
Load installed packages and export the version map. This function may be called multiple times, but the counters will be increased each time. Since Prometheus counters are never decreased, the aggregated results will not make sense.
def RESTrequest(*args, **kwargs): verbose = kwargs.get('verbose', False) force_download = kwargs.get('force', False) save = kwargs.get('force', True) # so you can copy paste from kegg args = list(chain.from_iterable(a.split('/') for a in args)) args = [a for a in args if a] request = 'http://rest.kegg.jp/' + "/".join(args) print_verbose(verbose, "richiedo la pagina: " + request) filename = "KEGG_" + "_".join(args) try: if force_download: raise IOError() print_verbose(verbose, "loading the cached file " + filename) with open(filename, 'r') as f: data = pickle.load(f) except IOError: print_verbose(verbose, "downloading the library,it may take some time") import urllib2 try: req = urllib2.urlopen(request) data = req.read() if save: with open(filename, 'w') as f: print_verbose(verbose, "saving the file to " + filename) pickle.dump(data, f) # clean the error stacktrace except urllib2.HTTPError as e: raise e return data
return and save the blob of data that is returned from kegg without caring to the format
def command_help_long(self): indent = " " * 2 # replace with current_indent help = "Command must be one of:\n" for action_name in self.parser.valid_commands: help += "%s%-10s %-70s\n" % (indent, action_name, self.parser.commands[action_name].desc_short.capitalize()) help += '\nSee \'%s help COMMAND\' for help and information on a command' % self.parser.prog return help
Return command help for use in global parser usage string @TODO update to support self.current_indent from formatter
def _print(self, helpstr, file=None): if file is None: file = sys.stdout encoding = self._get_encoding(file) file.write(helpstr.encode(encoding, "replace"))
.
def run(self): self.parser = MultioptOptionParser( usage="%prog <command> [options] [args]", prog=self.clsname, version=self.version, option_list=self.global_options, description=self.desc_short, commands=self.command_set, epilog=self.footer ) try: self.options, self.args = self.parser.parse_args(self.argv) except Exception, e: print str(e) pass if len(self.args) < 1: self.parser.print_lax_help() return 2 self.command = self.args.pop(0) showHelp = False if self.command == 'help': if len(self.args) < 1: self.parser.print_lax_help() return 2 else: self.command = self.args.pop() showHelp = True if self.command not in self.valid_commands: self.parser.print_cmd_error(self.command) return 2 self.command_set[self.command].set_cmdname(self.command) subcmd_parser = self.command_set[self.command].get_parser(self.clsname, self.version, self.global_options) subcmd_options, subcmd_args = subcmd_parser.parse_args(self.args) if showHelp: subcmd_parser.print_help_long() return 1 try: self.command_set[self.command].func(subcmd_options, *subcmd_args) except (CommandError, TypeError), e: # self.parser.print_exec_error(self.command, str(e)) subcmd_parser.print_exec_error(self.command, str(e)) print # @TODO show command help # self.parser.print_lax_help() return 2 return 1
Run the multiopt parser
def list(self, community=None, hostfilter=None, host=None): return self.send.snmp_list(community, hostfilter, host)
Returns a list of SNMP information for a community, hostfilter or host :param snmpstring: A specific SNMP string to list :param hostfilter: Valid hostfilter or None :param host: t_hosts.id or t_hosts.f_ipaddr :return: [ [ record_id, ipaddr, hostname, community, access, version ] ... ]
def add(self, host=None, f_community=None, f_access=None, f_version=None): return self.send.snmp_add(host, f_community, f_access, f_version)
Add an SNMP community string to a host :param host: t_hosts.id or t_hosts.f_ipaddr :param f_community: Community string to add :param f_access: READ or WRITE :param f_version: v1, v2c or v3 :return: (True/False, t_snmp.id/Error string)
def delete_collection(db_name, collection_name, host='localhost', port=27017): client = MongoClient("mongodb://%s:%d" % (host, port)) client[db_name].drop_collection(collection_name)
Almost exclusively for testing.
def ensure_table(self, cls): coll_name = cls.get_table_name() try: db = self.mongo_client.get_default_database() db.create_collection(coll_name) except CollectionInvalid: pass # Expected if collection already exists # Make sure we have indexes coll = self.get_collection(coll_name) for idx_name in cls.index_names(): coll.ensure_index(idx_name)
Required functionality.
def find_one(self, cls, id): one = self._find(cls, {"_id": id}) if not one: return None return one[0]
Required functionality.
def find_by_index(self, cls, index_name, value): return self._find(cls, {index_name: str(value)})
Required functionality.
def save(self, obj): if not obj.id: obj.id = uuid() stored_data = { '_id': obj.id, 'value': json.loads(obj.to_data()) } index_vals = obj.indexes() or {} for key in obj.__class__.index_names() or []: val = index_vals.get(key, '') stored_data[key] = str(val) coll = self.get_collection(obj.__class__.get_table_name()) coll.update({"_id": obj.id}, stored_data, upsert=True)
Required functionality.
def delete(self, obj): del_id = obj.get_id() if not del_id: return coll = self.get_collection(obj.__class__.get_table_name()) coll.delete_one({"_id": del_id})
Required functionality.
def _check_1st_line(line, **kwargs): components = kwargs.get("components", ()) max_first_line = kwargs.get("max_first_line", 50) errors = [] lineno = 1 if len(line) > max_first_line: errors.append(("M190", lineno, max_first_line, len(line))) if line.endswith("."): errors.append(("M191", lineno)) if ':' not in line: errors.append(("M110", lineno)) else: component, msg = line.split(':', 1) if component not in components: errors.append(("M111", lineno, component)) return errors
First line check. Check that the first line has a known component name followed by a colon and then a short description of the commit. :param line: first line :type line: str :param components: list of known component names :type line: list :param max_first_line: maximum length of the first line :type max_first_line: int :return: errors as in (code, line number, *args) :rtype: list