code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def set_rinput(self, name, input_type, value): if type(name) != str: return False, "Name must be string" if type(input_type) != str: return False, "Input type must be string" self.r_inputs[name] = {"type": input_type, "value": value} return True, "Ok"
Add rinput to be used in next api call :param name: key :param input_type: variable type :param value: value :return: True/False, message
def set_routput(self, routput): if type(routput) != str: return False, "Routput must be string" self.r_outputs.append(routput) return True, "Ok"
Add routput to be used in next api call :param routput: key :return: True/False, message
def call_api(self, url, data, files={}, print_response=True): if type(url) != str: return False, "Url must be string" if type(data) != dict: return False, "Data must be a dict" if type(files) != dict: return False, "Files must be a dict" if type(print_response) != bool: return False, "Print_response must be boolean" url = self.HOST + url data["format"] = "json" cookies = {"JSESSIONID": self.JSESSIONID} # Add rinputs to post data if self.r_inputs: data["inputs"] = json.dumps(self.r_inputs) # Add routputs to post data if self.r_outputs: data["robjects"] = ",".join(self.r_outputs) try: response = requests.post(url, data=data, files=files, cookies=cookies) except requests.exceptions.RequestException as exception: self.clear_rdata() return 500, {"error": str(exception)} status_code = response.status_code # Print log only if required if print_response: print status_code print DeployRConnection.pretty_json(response.json()) # remove rinputs and routputs self.clear_rdata() return status_code, response.json()
call api with given parameters and returns its result :param url: end point :param data: post data :param files: files if needed :param print_response: print log if required :return: status code, response
def is_resource_class_terminal_attribute(rc, attr_name): attr = get_resource_class_attribute(rc, attr_name) return attr.kind == RESOURCE_ATTRIBUTE_KINDS.TERMINAL
Checks if the given attribute name is a terminal attribute of the given registered resource.
def is_resource_class_member_attribute(rc, attr_name): attr = get_resource_class_attribute(rc, attr_name) return attr.kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER
Checks if the given attribute name is a member attribute of the given registered resource.
def is_resource_class_collection_attribute(rc, attr_name): attr = get_resource_class_attribute(rc, attr_name) return attr.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION
Checks if the given attribute name is a collection attribute of the given registered resource.
def is_resource_class_resource_attribute(rc, attr_name): attr = get_resource_class_attribute(rc, attr_name) return attr != RESOURCE_ATTRIBUTE_KINDS.TERMINAL
Checks if the given attribute name is a resource attribute (i.e., either a member or a collection attribute) of the given registered resource.
def get_resource_class_terminal_attribute_iterator(rc): for attr in itervalues_(rc.__everest_attributes__): if attr.kind == RESOURCE_ATTRIBUTE_KINDS.TERMINAL: yield attr
Returns an iterator over all terminal attributes in the given registered resource.
def get_resource_class_relationship_attribute_iterator(rc): for attr in itervalues_(rc.__everest_attributes__): if attr.kind != RESOURCE_ATTRIBUTE_KINDS.TERMINAL: yield attr
Returns an iterator over all terminal attributes in the given registered resource.
def get_resource_class_member_attribute_iterator(rc): for attr in itervalues_(rc.__everest_attributes__): if attr.kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER: yield attr
Returns an iterator over all terminal attributes in the given registered resource.
def get_resource_class_collection_attribute_iterator(rc): for attr in itervalues_(rc.__everest_attributes__): if attr.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION: yield attr
Returns an iterator over all terminal attributes in the given registered resource.
def get_joke(): page = requests.get("http://ron-swanson-quotes.herokuapp.com/v2/quotes") if page.status_code == 200: jokes = [] jokes = json.loads(page.content.decode(page.encoding)) return '"' + jokes[0] + '" - Ron Swanson' return None
Return a Ron Swanson quote. Returns None if unable to retrieve a quote.
def window(iterable, n=2, cast=tuple): it = iter(iterable) win = deque((next(it) for _ in repeat(None, n)), maxlen=n) if len(win) < n: raise ValueError('Window size was greater than iterable length') yield cast(win) append = win.append for e in it: append(e) yield cast(win)
This function passes a running window along the length of the given iterable. By default, the return value is a tuple, but the cast parameter can be used to change the final result.
def combinations_with_replacement(iterable, r): stk = [[i,] for i in iterable] pop = stk.pop while len(stk) > 0: top = pop() if len(top) == r: yield tuple(top) else: stk.extend(top + [i] for i in iterable)
This function acts as a replacement for the itertools.combinations_with_replacement function. The original does not replace items that come earlier in the provided iterator.
def main(): # Initialize logging to the terminal and system log. coloredlogs.install(syslog=True) # Parse the command line arguments. context_opts = {} program_opts = {} try: options, arguments = getopt.getopt(sys.argv[1:], 'fur:vqh', [ 'force', 'use-sudo', 'remote-host=', 'verbose', 'quiet', 'help', ]) for option, value in options: if option in ('-f', '--force'): program_opts['force'] = True elif option in ('-u', '--use-sudo'): context_opts['sudo'] = True elif option in ('-r', '--remote-host'): context_opts['ssh_alias'] = value elif option in ('-v', '--verbose'): coloredlogs.increase_verbosity() elif option in ('-q', '--quiet'): coloredlogs.decrease_verbosity() elif option in ('-h', '--help'): usage(__doc__) sys.exit(0) else: # Programming error... assert False, "Unhandled option!" if not arguments: usage(__doc__) sys.exit(0) if len(arguments) != 1: raise Exception("Expected a filename as the first and only argument!") program_opts['filename'] = arguments[0] except Exception as e: warning("Error: %s", e) sys.exit(1) # Run the program. try: # Initialize the execution context. program_opts['context'] = create_context(**context_opts) # Initialize the program and update the file. UpdateDotDee(**program_opts).update_file() except Exception as e: logger.exception("Encountered unexpected exception, aborting!") sys.exit(1)
Command line interface for the ``update-dotdee`` program.
def add_days(self, days: int) -> datetime: self.value = self.value + relativedelta(days=days) return self.value
Adds days
def add_months(self, value: int) -> datetime: self.value = self.value + relativedelta(months=value) return self.value
Add a number of months to the given date
def from_date(self, value: date) -> datetime: assert isinstance(value, date) #self.value = datetime.combine(value, time.min) self.value = datetime(value.year, value.month, value.day) return self.value
Initializes from the given date value
def from_iso_long_date(self, date_str: str) -> datetime: assert isinstance(date_str, str) assert len(date_str) == 19 self.value = datetime.strptime(date_str, ISO_LONG_FORMAT) return self.value
Parse ISO date string (YYYY-MM-DDTHH:mm:ss)
def from_iso_date_string(self, date_str: str) -> datetime: assert isinstance(date_str, str) self.value = datetime.strptime(date_str, ISO_DATE_FORMAT) return self.value
Parse ISO date string (YYYY-MM-DD)
def get_day_name(self) -> str: weekday = self.value.isoweekday() - 1 return calendar.day_name[weekday]
Returns the day name
def to_iso_string(self) -> str: assert isinstance(self.value, datetime) return datetime.isoformat(self.value)
Returns full ISO string for the given date
def end_of_day(self) -> datetime: self.value = datetime(self.value.year, self.value.month, self.value.day, 23, 59, 59) return self.value
End of day
def end_of_month(self) -> datetime: # Increase month by 1, result = self.value + relativedelta(months=1) # take the 1st day of the (next) month, result = result.replace(day=1) # subtract one day result = result - relativedelta(days=1) self.value = result return self.value
Provides end of the month for the given date
def is_end_of_month(self) -> bool: end_of_month = Datum() # get_end_of_month(value) end_of_month.end_of_month() return self.value == end_of_month.value
Checks if the date is at the end of the month
def set_day(self, day: int) -> datetime: self.value = self.value.replace(day=day) return self.value
Sets the day value
def set_value(self, value: datetime): assert isinstance(value, datetime) self.value = value
Sets the current value
def start_of_day(self) -> datetime: self.value = datetime(self.value.year, self.value.month, self.value.day) return self.value
Returns start of day
def subtract_days(self, days: int) -> datetime: self.value = self.value - relativedelta(days=days) return self.value
Subtracts dates from the given value
def subtract_weeks(self, weeks: int) -> datetime: self.value = self.value - timedelta(weeks=weeks) return self.value
Subtracts number of weeks from the current value
def subtract_months(self, months: int) -> datetime: self.value = self.value - relativedelta(months=months) return self.value
Subtracts a number of months from the current value
def to_short_time_string(self) -> str: hour = self.time.hour minute = self.time.minute return f"{hour:02}:{minute:02}"
Return the iso time string only
def to_long_time_string(self) -> str: hour = self.time.hour minute = self.time.minute second = self.time.second return f"{hour:02}:{minute:02}:{second:02}"
Return the iso time string only
def to_iso_time_string(self) -> str: short_time = self.to_short_time_string() second = self.time.second return f"{short_time}:{second:02}"
Return the iso time string only
def to_datetime_string(self) -> str: date_display = self.to_iso_date_string() time_display = self.to_long_time_string() return f"{date_display} {time_display}"
Returns a human-readable string representation with iso date and time Example: 2018-12-06 12:32:56
def to_long_datetime_string(self) -> str: date_display = self.to_iso_date_string() time_display = self.to_short_time_string() return f"{date_display} {time_display}"
Returns the long date/time string Example: 2018-12-06 12:34
def today(self) -> datetime: self.value = datetime.combine(datetime.today().date(), time.min) return self.value
Returns today (date only) as datetime
def yesterday(self) -> datetime: self.value = datetime.today() - timedelta(days=1) return self.value
Set the value to yesterday
def get_uuid_string(low=None, high=None, **x): if low is None or high is None: return None x = ''.join([parse_part(low), parse_part(high)]) return '-'.join([x[:8], x[8:12], x[12:16], x[16:20], x[20:32]])
This method parses a UUID protobuf message type from its component 'high' and 'low' longs into a standard formatted UUID string Args: x (dict): containing keys, 'low' and 'high' corresponding to the UUID protobuf message type Returns: str: UUID formatted string
def get_versions_from_webpage(self): res = requests.get('https://zenodo.org/record/'+self.data['conceptrecid']) soup = BeautifulSoup(res.text, 'html.parser') version_rows = soup.select('.well.metadata > table.table tr') if len(version_rows) == 0: # when only 1 version return [{ 'recid': self.data['id'], 'name': '1', 'doi': self.data['doi'], 'date': self.data['created'], 'original_version': self.original_version() }] return [self._row_to_version(row) for row in version_rows if len(row.select('td')) > 1]
Get version details from Zenodo webpage (it is not available in the REST api)
def search(self, search): search = search.replace('/', ' ') # zenodo can't handle '/' in search query params = {'q': search} return self._get_records(params)
search Zenodo record for string `search` :param search: string to search :return: Record[] results
def qdict_get_list(qdict, k): pks = qdict.getlist(k) return [e for e in pks if e]
get list from QueryDict and remove blank date from list.
def request_get_next(request, default_next): next_url = request.POST.get('next')\ or request.GET.get('next')\ or request.META.get('HTTP_REFERER')\ or default_next return next_url
get next url form request order: POST.next GET.next HTTP_REFERER, default_next
def upload_progress(request): if 'X-Progress-ID' in request.GET: progress_id = request.GET['X-Progress-ID'] elif 'X-Progress-ID' in request.META: progress_id = request.META['X-Progress-ID'] if 'logfilename' in request.GET: logfilename = request.GET['logfilename'] elif 'logfilename' in request.META: logfilename = request.META['logfilename'] cache_key = "%s_%s" % (request.META['REMOTE_ADDR'], progress_id) data = cache.get(cache_key) if not data: data = cache.get(logfilename.replace(' ','_')) return HttpResponse(json.dumps(data))
AJAX view adapted from django-progressbarupload Return the upload progress and total length values
def set_color(self, fg=None, bg=None, intensify=False, target=sys.stdout): raise NotImplementedError
Set foreground- and background colors and intensity.
def add(self, entity): do_append = self.__check_new(entity) if do_append: self.__entities.append(entity)
Adds the given entity to this cache. :param entity: Entity to add. :type entity: Object implementing :class:`everest.interfaces.IEntity`. :raises ValueError: If the ID of the entity to add is ``None`` (unless the `allow_none_id` constructor argument was set).
def remove(self, entity): self.__id_map.pop(entity.id, None) self.__slug_map.pop(entity.slug, None) self.__entities.remove(entity)
Removes the given entity from this cache. :param entity: Entity to remove. :type entity: Object implementing :class:`everest.interfaces.IEntity`. :raises KeyError: If the given entity is not in this cache. :raises ValueError: If the ID of the given entity is `None`.
def retrieve(self, filter_expression=None, order_expression=None, slice_key=None): ents = iter(self.__entities) if not filter_expression is None: ents = filter_expression(ents) if not order_expression is None: # Ordering always involves a copy and conversion to a list, so # we have to wrap in an iterator. ents = iter(order_expression(ents)) if not slice_key is None: ents = islice(ents, slice_key.start, slice_key.stop) return ents
Retrieve entities from this cache, possibly after filtering, ordering and slicing.
def extract_user_keywords_generator(twitter_lists_gen, lemmatizing="wordnet"): #################################################################################################################### # Extract keywords serially. #################################################################################################################### for user_twitter_id, twitter_lists_list in twitter_lists_gen: if twitter_lists_list is not None: if "lists" in twitter_lists_list.keys(): twitter_lists_list = twitter_lists_list["lists"] bag_of_lemmas, lemma_to_keywordbag = user_twitter_list_bag_of_words(twitter_lists_list, lemmatizing) for lemma, keywordbag in lemma_to_keywordbag.items(): lemma_to_keywordbag[lemma] = dict(keywordbag) lemma_to_keywordbag = dict(lemma_to_keywordbag) user_annotation = dict() user_annotation["bag_of_lemmas"] = bag_of_lemmas user_annotation["lemma_to_keywordbag"] = lemma_to_keywordbag yield user_twitter_id, user_annotation
Based on the user-related lists I have downloaded, annotate the users. Inputs: - twitter_lists_gen: A python generator that yields a user Twitter id and a generator of Twitter lists. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Yields: - user_twitter_id: A Twitter user id. - user_annotation: A python dictionary that contains two dicts: * bag_of_lemmas: Maps emmas to multiplicity. * lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords.
def form_user_label_matrix(user_twitter_list_keywords_gen, id_to_node, max_number_of_labels): user_label_matrix, annotated_nodes, label_to_lemma, node_to_lemma_tokeywordbag = form_user_term_matrix(user_twitter_list_keywords_gen, id_to_node, None) # write_terms_and_frequencies("/home/georgerizos/Documents/term_matrix.txt", user_label_matrix, label_to_lemma) user_label_matrix, annotated_nodes, label_to_lemma = filter_user_term_matrix(user_label_matrix, annotated_nodes, label_to_lemma, max_number_of_labels) # write_terms_and_frequencies("/home/georgerizos/Documents/label_matrix.txt", user_label_matrix, label_to_lemma) lemma_to_keyword = form_lemma_tokeyword_map(annotated_nodes, node_to_lemma_tokeywordbag) return user_label_matrix, annotated_nodes, label_to_lemma, lemma_to_keyword
Forms the user-label matrix to be used in multi-label classification. Input: - user_twitter_list_keywords_gen: - id_to_node: A Twitter id to node map as a python dictionary. Outputs: - user_label_matrix: A user-to-label matrix in scipy sparse matrix format. - annotated_nodes: A numpy array containing graph nodes. - label_to_lemma: A python dictionary that maps a numerical label to a string topic lemma. - lemma_to_keyword: A python dictionary that maps a lemma to the original keyword.
def form_lemma_tokeyword_map(annotated_nodes, node_to_lemma_tokeywordbag): # Reduce relevant lemma-to-original keyword bags. lemma_to_keywordbag = defaultdict(lambda: defaultdict(int)) for node in annotated_nodes: for lemma, keywordbag in node_to_lemma_tokeywordbag[node].items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag[lemma][keyword] += multiplicity lemma_to_keyword = dict() for lemma, keywordbag in lemma_to_keywordbag.items(): lemma_to_keyword[lemma] = max(keywordbag.items(), key=itemgetter(1))[0] return lemma_to_keyword
Forms the aggregated dictionary that maps lemmas/stems to the most popular topic keyword. Inputs: - annotated_nodes: A numpy array of anonymized nodes that are annotated. - node_to_lemma_tokeywordbag: A map from nodes to maps from lemmas to bags of keywords. Output: - lemma_to_keyword: A dictionary that maps lemmas to keywords.
def decide_which_users_to_annotate(centrality_vector, number_to_annotate, already_annotated, node_to_id): # Sort the centrality vector according to decreasing centrality. centrality_vector = np.asarray(centrality_vector) ind = np.argsort(np.squeeze(centrality_vector)) if centrality_vector.size > 1: reversed_ind = ind[::-1] else: reversed_ind = list() reversed_ind = reversed_ind.append(ind) # Get the sublist of Twitter user ids to return. user_id_list = list() append_user_id = user_id_list.append counter = 0 for node in reversed_ind: user_twitter_id = node_to_id[node] if user_twitter_id not in already_annotated: append_user_id(user_twitter_id) counter += 1 if counter >= number_to_annotate: break return user_id_list
Sorts a centrality vector and returns the Twitter user ids that are to be annotated. Inputs: - centrality_vector: A numpy array vector, that contains the centrality values for all users. - number_to_annotate: The number of users to annotate. - already_annotated: A python set of user twitter ids that have already been annotated. - node_to_id: A python dictionary that maps graph nodes to user twitter ids. Output: - user_id_list: A python list of Twitter user ids.
def on_demand_annotation(twitter_app_key, twitter_app_secret, user_twitter_id): #################################################################################################################### # Log into my application #################################################################################################################### twitter = login(twitter_app_key, twitter_app_secret) twitter_lists_list = twitter.get_list_memberships(user_id=user_twitter_id, count=1000) for twitter_list in twitter_lists_list: print(twitter_list) return twitter_lists_list
A service that leverages twitter lists for on-demand annotation of popular users. TODO: Do this.
def get_member_class(resource): reg = get_current_registry() if IInterface in provided_by(resource): member_class = reg.getUtility(resource, name='member-class') else: member_class = reg.getAdapter(resource, IMemberResource, name='member-class') return member_class
Returns the registered member class for the given resource. :param resource: registered resource :type resource: class implementing or instance providing or subclass of a registered resource interface.
def get_collection_class(resource): reg = get_current_registry() if IInterface in provided_by(resource): coll_class = reg.getUtility(resource, name='collection-class') else: coll_class = reg.getAdapter(resource, ICollectionResource, name='collection-class') return coll_class
Returns the registered collection resource class for the given marker interface or member resource class or instance. :param rc: registered resource :type rc: class implementing or instance providing or subclass of a registered resource interface.
def as_member(entity, parent=None): reg = get_current_registry() rc = reg.getAdapter(entity, IMemberResource) if not parent is None: rc.__parent__ = parent # interface method pylint: disable=E1121 return rc
Adapts an object to a location aware member resource. :param entity: a domain object for which a resource adapter has been registered :type entity: an object implementing :class:`everest.entities.interfaces.IEntity` :param parent: optional parent collection resource to make the new member a child of :type parent: an object implementing :class:`everest.resources.interfaces.ICollectionResource` :returns: an object implementing :class:`everest.resources.interfaces.IMemberResource`
def get_resource_url(resource): path = model_path(resource) parsed = list(urlparse.urlparse(path)) parsed[1] = "" return urlparse.urlunparse(parsed)
Returns the URL for the given resource.
def provides_resource(obj): if isinstance(obj, type): obj = object.__new__(obj) return IResource in provided_by(obj)
Checks if the given type or instance provides the :class:`everest.resources.interfaces.IResource` interface.
def provides_member_resource(obj): if isinstance(obj, type): obj = object.__new__(obj) return IMemberResource in provided_by(obj)
Checks if the given type or instance provides the :class:`everest.resources.interfaces.IMemberResource` interface.
def provides_collection_resource(obj): if isinstance(obj, type): obj = object.__new__(obj) return ICollectionResource in provided_by(obj)
Checks if the given type or instance provides the :class:`everest.resources.interfaces.ICollectionResource` interface.
def get_registered_collection_resources(): reg = get_current_registry() return [util.component for util in reg.registeredUtilities() if util.name == 'collection-class']
Returns a list of all registered collection resource classes.
def resource_to_url(resource, request=None, quote=False): if request is None: request = get_current_request() # cnv = request.registry.getAdapter(request, IResourceUrlConverter) reg = get_current_registry() cnv = reg.getAdapter(request, IResourceUrlConverter) return cnv.resource_to_url(resource, quote=quote)
Converts the given resource to a URL. :param request: Request object (required for the host name part of the URL). If this is not given, the current request is used. :param bool quote: If set, the URL returned will be quoted.
def url_to_resource(url, request=None): if request is None: request = get_current_request() # cnv = request.registry.getAdapter(request, IResourceUrlConverter) reg = get_current_registry() cnv = reg.getAdapter(request, IResourceUrlConverter) return cnv.url_to_resource(url)
Converts the given URL to a resource. :param request: Request object (required for the host name part of the URL). If this is not given, the current request is used.
def ber(tp, tn, fp, fn): return (fp / float(tn + fp) + fn / float(fn + tp)) / 2
Balanced Error Rate [0, 1] :param int tp: number of true positives :param int tn: number of true negatives :param int fp: number of false positives :param int fn: number of false negatives :rtype: float
def mcc(tp, tn, fp, fn): if tp + fp == 0 or tp + fn == 0 or tn + fp == 0 or tn + fn == 0: den = 1.0 else: den = math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) return (tp * tn - fp * fn) / den
Matthew's Correlation Coefficient [-1, 1] 0 = you're just guessing :param int tp: number of true positives :param int tn: number of true negatives :param int fp: number of false positives :param int fn: number of false negatives :rtype: float
def mse(exp, obs): assert len(exp) == len(obs) return numpy.mean((numpy.array(exp) - numpy.array(obs)) ** 2)
Mean Squared Error :param exp: expected values :type exp: list of float :param obs: observed values :type obs: list of float
def mui(x, y): assert len(x) == len(y) l = len(x) p_x = Counter(x) p_y = Counter(y) p_xy = Counter(zip(x, y)) return sum(p_xy[(x, y)] * math.log((p_xy[(x, y)] * l) / float(p_x[x] * p_y[y]), 2) / l for x, y in p_xy)
MUtial Information
def roc(clss, vals, reverse=False): assert len(clss) == len(vals) global X, Y, A, T, M, B order = numpy.argsort(vals) if reverse: order = order[::-1] clss = numpy.array(clss)[order] vals = numpy.array(vals)[order] length = len(clss) + 1 data = numpy.empty((length, 6), dtype=numpy.float32) data[0, X], data[0, Y], data[0, A] = 0 data[0, T] = vals[0] for i in range(length-1): if clss[i] == 1: data[i+1, X] = data[i, X] data[i+1, Y] = data[i, Y] + 1 data[i+1, A] = data[i, A] else: data[i+1, X] = data[i, X] + 1 data[i+1, Y] = data[i, Y] data[i+1, A] = data[i, A] + data[i+1, Y] data[i+1, T] = vals[i] # Incorporate accuracy scores data[0, M] = 0 for i in range(1, length-1): fp = data[i, X] tp = data[i, Y] tn = data[-1, X] - fp fn = data[-1, Y] - tp data[i, M] = mcc(tp, tn, fp, fn) data[i, B] = ber(tp, tn, fp, fn) data[-1, M] = 0 return data
Reciever Operator Characteristic :param clss: known classes. 1 if positive case, -1 if the negative case :type class: list of boolean :param vals: classification probabilites etc... :type vals: list of real numbers :param bool reverse: whether the values should be sorted in reverse order
def confusion_matrix(exp, obs): assert len(exp) == len(obs) # Expected in the first dimension (0;rows), observed in the second (1;cols) lbls = sorted(set(exp)) res = numpy.zeros(shape=(len(lbls), len(lbls))) for i in range(len(exp)): res[lbls.index(exp[i]), lbls.index(obs[i])] += 1 return res, lbls
Create a confusion matrix In each axis of the resulting confusion matrix the negative case is 0-index and the positive case 1-index. The labels get sorted, in a True/False scenario true positives will occur at (1,1). The first dimension (rows) of the resulting matrix is the expected class and the second dimension (columns) is the observed class. :param exp: expected values :type exp: list of float :param obs: observed values :type obs: list of float :rtype: tuple of square matrix and sorted labels
def confusion_performance(mat, fn): if mat.shape[0] != mat.shape[1] or mat.shape < (2, 2): raise TypeError('{} is not a confusion matrix'.format(mat)) elif mat.shape == (2, 2): return fn(mat[TP], mat[TN], mat[FP], mat[FN]) res = numpy.empty(mat.shape[0]) for i in range(len(res)): res[i] = fn(mat[i, i], # TP sum(mat) - sum(mat[:, i]) - sum(mat[i, :]), # TN sum(mat[:, i]) - mat[i, i], # FP sum(mat[i, :]) - mat[i, i]) # FN return res
Apply a performance function to a confusion matrix :param mat: confusion matrix :type mat: square matrix :param function fn: performance function
def get_entity_class(resource): reg = get_current_registry() if IInterface in provided_by(resource): ent_cls = reg.getUtility(resource, name='entity-class') else: ent_cls = reg.getAdapter(resource, IEntity, name='entity-class') return ent_cls
Returns the entity class registered for the given registered resource. :param resource: registered resource :type collection: class implementing or instance providing a registered resource interface. :return: entity class (class implementing `everest.entities.interfaces.IEntity`)
def release(ctx, type_, repo, prebump=PREBUMP): if prebump not in REL_TYPES: raise ValueError(f'{type_} not in {REL_TYPES}') prebump = REL_TYPES.index(prebump) version = _read_version() version = _bump_release(version, type_) _write_version(version) # Needs to happen before Towncrier deletes fragment files. tag_content = _render_log() ctx.run('towncrier') ctx.run(f'git commit -am "Release {version}"') tag_content = tag_content.replace('"', '\\"') ctx.run(f'git tag -a {version} -m "Version {version}\n\n{tag_content}"') ctx.run(f'python setup.py sdist bdist_wheel') dist_pattern = f'{PACKAGE_NAME.replace("-", "[-_]")}-*' artifacts = list(ROOT.joinpath('dist').glob(dist_pattern)) filename_display = '\n'.join(f' {a}' for a in artifacts) print(f'[release] Will upload:\n{filename_display}') try: input('[release] Release ready. ENTER to upload, CTRL-C to abort: ') except KeyboardInterrupt: print('\nAborted!') return arg_display = ' '.join(f'"{n}"' for n in artifacts) ctx.run(f'twine upload --repository="{repo}" {arg_display}') version = _prebump(version, prebump) _write_version(version) ctx.run(f'git commit -am "Prebump to {version}"')
Make a new release.
def install_board_with_programmer(mcu, programmer, f_cpu=16000000, core='arduino', replace_existing=False, ): bunch = AutoBunch() board_id = '{mcu}_{f_cpu}_{programmer}'.format(f_cpu=f_cpu, mcu=mcu, programmer=programmer, ) bunch.name = '{mcu}@{f} Prog:{programmer}'.format(f=strfreq(f_cpu), mcu=mcu, programmer=programmer, ) bunch.upload.using = programmer bunch.build.mcu = mcu bunch.build.f_cpu = str(f_cpu) + 'L' bunch.build.core = core install_board(board_id, bunch, replace_existing=replace_existing)
install board with programmer.
def get_branch(self): if self.repo.head.is_detached: if os.getenv('GIT_BRANCH'): branch = os.getenv('GIT_BRANCH') elif os.getenv('BRANCH_NAME'): branch = os.getenv('BRANCH_NAME') elif os.getenv('TRAVIS_BRANCH'): branch = os.getenv('TRAVIS_BRANCH') else: branch = "HEAD" else: branch = str(self.repo.active_branch) return branch.replace("/", "_")
:return:
def get_version(self): tag = next((tag for tag in self.repo.tags if tag.commit == self.repo.commit()), None) if tag: return tag return self.repo.rev_parse(str(self.repo.commit()))
:return:
def get_changed_files(self, first_sha, second_sha, exclude_paths=None): if not exclude_paths: exclude_paths = [] first_commit = self.repo.commit(first_sha) second_commit = self.repo.commit(second_sha) diffs = first_commit.diff(second_commit) changed_files = [] for diff in diffs: excluded = False for exclude in exclude_paths: if diff.a_path.startswith(exclude): excluded = True if not excluded: changed_files.append(diff.a_path) return changed_files
:param first_sha: :param second_sha: :param exclude_paths: :return:
def print_all(self): output = "\n\n# Git information \n" \ "-------------------------------------------\n" \ " Branch :\t{0}\n" \ " Version:\t{1}\n" \ " Summary:\t{2}\n" \ "-------------------------------------------\n\n".format( self.get_branch(), str(self.get_version()), self.repo.commit().summary, ) print(output)
:return:
def logMsg(self, msg, printMsg=True): time = datetime.datetime.now().strftime('%I:%M %p') self.log = '{0}\n{1} | {2}'.format(self.log, time, msg) if printMsg: print msg if self.addLogsToArcpyMessages: from arcpy import AddMessage AddMessage(msg)
logs a message and prints it to the screen
def logGPMsg(self, printMsg=True): from arcpy import GetMessages msgs = GetMessages() try: self.logMsg(msgs, printMsg) except: self.logMsg('error getting arcpy message', printMsg)
logs the arcpy messages and prints them to the screen
def writeLogToFile(self): if not os.path.exists(self.logFolder): os.mkdir(self.logFolder) with open(self.logFile, mode='a') as f: f.write('\n\n' + self.log)
writes the log to a
def logError(self): # got from http://webhelp.esri.com/arcgisdesktop/9.3/index.cfm?TopicName=Error_handling_with_Python import traceback self.logMsg('ERROR!!!') errMsg = traceback.format_exc() self.logMsg(errMsg) return errMsg
gets traceback info and logs it
def song(self): song = self._connection.request( 'autoplayGetSong', {'weightModifierRange': [-9, 9], 'seedArtists': dict([(artist, 'p') for artist in self._artists]), 'tagID': self._radio, 'recentArtists': self._recent_artists, 'songQueueID': self._connection.session.queue, 'secondaryArtistWeightModifier': 0.75, 'country': self._connection.session.country, 'seedArtistWeightRange': [110, 130], 'songIDsAlreadySeen': self._songs_already_seen, 'maxDuration': 1500, 'minDuration': 60, 'frowns': []}, self._connection.header('autoplayGetSong', 'jsqueue'))[1] return Song( song['SongID'], song['SongName'], song['ArtistID'], song['ArtistName'], song['AlbumID'], song['AlbumName'], song['CoverArtUrl'], None, song['EstimateDuration'], None, self._connection)
:class:`Song` object of next song to play
def export(self): return {'artists': self._artists, 'radio': self._radio, 'recent_artists': self._recent_artists, 'songs_already_seen': self._songs_already_seen}
Returns a dictionary with all song information. Use the :meth:`from_export` method to recreate the :class:`Song` object.
def get_random_giphy(phrase): with warnings.catch_warnings(): warnings.simplefilter('ignore') giphy = giphypop.Giphy() results = giphy.search_list(phrase=phrase, limit=100) if not results: raise ValueError('There were no results for that phrase') return random.choice(results).media_url
Return the URL of a random GIF related to the phrase, if possible
def handle_command_line(): phrase = ' '.join(sys.argv[1:]) or 'random' try: giphy = get_random_giphy(phrase) except ValueError: sys.stderr.write('Unable to find any GIFs for {!r}\n'.format(phrase)) sys.exit(1) display(fetch_image(giphy))
Display an image for the phrase in sys.argv, if possible
def make_required_folders(self): for folder in [ self.pending_folder, self.usb_incoming_folder, self.outgoing_folder, self.incoming_folder, self.archive_folder, self.tmp_folder, self.log_folder, ]: if not os.path.exists(folder): os.makedirs(folder)
Makes all folders declared in the config if they do not exist.
def load(self, filename, offset): try: self.offset = offset self.fd = open(filename, 'rb') # 1024 - temporary, need to find out actual volume header size self.fd.seek(self.offset + VOLUME_HEADER_OFFSET) data = self.fd.read(1024) self.vol_header = VolumeHeader(data) self.fd.close() except IOError as e: print(e)
Loads HFS+ volume information
def get_interfaces(self): interfaces = self.xml.find('devices').iter('interface') iobjs = [] for interface in interfaces: _type = interface.attrib['type'] mac = interface.find('mac').attrib['address'] source = interface.find('source').attrib[_type] model = interface.find('model').attrib['type'] iobjs.append(NetworkInterface(_type, mac, source, model)) return iobjs
Return a list of sham.network.interfaces.NetworkInterface describing all the interfaces this VM has
def get_disks(self): disks = [disk for disk in self.xml.iter('disk')] disk_objs = [] for disk in disks: source = disk.find('source') if source is None: continue path = source.attrib['file'] diskobj = self.domain.connect().storageVolLookupByPath(path) disk_objs.append(diskobj) return [Volume(d, StoragePool(d.storagePoolLookupByVolume())) for d in disk_objs]
Return a list of all the Disks attached to this VM The disks are returned in a sham.storage.volumes.Volume object
def delete(self): disks = self.get_disks() self.domain.undefine() for disk in disks: disk.wipe() disk.delete()
Delete this VM, and remove all its disks
def shutdown(self, delete=False): disks = self.get_disks() self.domain.destroy() if delete: for disk in disks: disk.wipe() disk.delete()
Shutdown this VM :param delete: Should we delete after shutting the VM down? :type delete: bool
def to_dict(self): return {'domain_type': self.domain_type, 'max_memory': self.max_memory, 'current_memory': self.current_memory, 'num_cpus': self.num_cpus, 'running': self.is_running(), 'name': self.name, }
Return the values contained in this object as a dict
def guess_url_vcs(url): parsed = urllib.parse.urlsplit(url) if parsed.scheme in ('git', 'svn'): return parsed.scheme elif parsed.path.endswith('.git'): return 'git' elif parsed.hostname == 'github.com': return 'git' # If it's an http url, we can try requesting it and guessing from the # contents. if parsed.scheme in ('http', 'https'): resp = requests.get(url) if re.match('basehttp.*python.*', resp.headers.get('server').lower()): # It's the mercurial http server return 'hg' return None
Given a url, try to guess what kind of VCS it's for. Return None if we can't make a good guess.
def guess_folder_vcs(folder): try: contents = os.listdir(folder) vcs_folders = ['.git', '.hg', '.svn'] found = next((x for x in vcs_folders if x in contents), None) # Chop off the dot if we got a string back return found[1:] if found else None except OSError: return None
Given a path for a folder on the local filesystem, see what kind of vcs repo it is, if any.
def basename(url): # It's easy to accidentally have whitespace on the beginning or end of the # url. url = url.strip() url, _sep, _fragment = url.partition('#') # Remove trailing slash from url if present if url.endswith('/'): url = url[:-1] # Also strip .git from url if it ends in that. return re.sub(r'\.git$', '', url.split('/')[-1])
Return the name of the folder that you'd get if you cloned 'url' into the current working directory.
def get_url(self): cmd = { 'hg': 'hg paths default', 'git': 'git config --local --get remote.origin.url', }[self.vcs_type] with chdir(self.folder): r = self.run(cmd) return r.output.replace('\n', '')
Assuming that the repo has been cloned locally, get its default upstream URL.
def fburl(parser, token): bits = token.contents.split(' ') if len(bits) < 2: raise template.TemplateSyntaxError("'%s' takes at least one argument" " (path to a view)" % bits[0]) viewname = bits[1] args = [] kwargs = {} asvar = None if len(bits) > 2: bits = iter(bits[2:]) for bit in bits: if bit == 'as': asvar = bits.next() break else: for arg in bit.split(","): if '=' in arg: k, v = arg.split('=', 1) k = k.strip() kwargs[k] = parser.compile_filter(v) elif arg: args.append(parser.compile_filter(arg)) return URLNode(viewname, args, kwargs, asvar)
Returns an absolute URL matching given view with its parameters. This is a way to define links that aren't tied to a particular URL configuration:: {% url path.to.some_view arg1,arg2,name1=value1 %} The first argument is a path to a view. It can be an absolute python path or just ``app_name.view_name`` without the project name if the view is located inside the project. Other arguments are comma-separated values that will be filled in place of positional and keyword arguments in the URL. All arguments for the URL should be present. For example if you have a view ``app_name.client`` taking client's id and the corresponding line in a URLconf looks like this:: ('^client/(\d+)/$', 'app_name.client') and this app's URLconf is included into the project's URLconf under some path:: ('^clients/', include('project_name.app_name.urls')) then in a template you can create a link for a certain client like this:: {% url app_name.client client.id %} The URL will look like ``/clients/client/123/``.
def chdir(method): def wrapper(self, dir, *args, **kw): dirstack = ChdirStack() dirstack.push(dir) try: return method(self, dir, *args, **kw) finally: dirstack.pop() return functools.wraps(method)(wrapper)
Decorator executing method in directory 'dir'.
def push(self, dir): self.stack.append(os.getcwd()) os.chdir(dir or os.getcwd())
Push cwd on stack and change to 'dir'.
def pop(self): if len(self.stack): os.chdir(self.stack.pop())
Pop dir off stack and change to it.