code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def minkowski_distance(x, y, p=2): from math import pow assert len(y) == len(x) assert len(x) >= 1 sum = 0 for i in range(len(x)): sum += abs(x[i] - y[i]) ** p return pow(sum, 1.0 / float(p))
Calculates the minkowski distance between two points. :param x: the first point :param y: the second point :param p: the order of the minkowski algorithm. If *p=1* it is equal to the manhatten distance, if *p=2* it is equal to the euclidian distance. The higher the order, the closer it converges to the Chebyshev distance, which has *p=infinity*.
def magnitude(a): "calculates the magnitude of a vecor" from math import sqrt sum = 0 for coord in a: sum += coord ** 2 return sqrt(sumf magnitude(a): "calculates the magnitude of a vecor" from math import sqrt sum = 0 for coord in a: sum += coord ** 2 return sqrt(sum)
calculates the magnitude of a vecor
def dotproduct(a, b): "Calculates the dotproduct between two vecors" assert(len(a) == len(b)) out = 0 for i in range(len(a)): out += a[i] * b[i] return ouf dotproduct(a, b): "Calculates the dotproduct between two vecors" assert(len(a) == len(b)) out = 0 for i in range(len(a)): out += a[i] * b[i] return out
Calculates the dotproduct between two vecors
def centroid(data, method=median): "returns the central vector of a list of vectors" out = [] for i in range(len(data[0])): out.append(method([x[i] for x in data])) return tuple(outf centroid(data, method=median): "returns the central vector of a list of vectors" out = [] for i in range(len(data[0])): out.append(method([x[i] for x in data])) return tuple(out)
returns the central vector of a list of vectors
def display(self, depth=0): print(depth * " " + "[level %s]" % self.level) for item in self.items: if isinstance(item, Cluster): item.display(depth + 1) else: print(depth * " " + "%s" % item)
Pretty-prints this cluster. Useful for debuging.
def topology(self): left = self.items[0] right = self.items[1] if isinstance(left, Cluster): first = left.topology() else: first = left if isinstance(right, Cluster): second = right.topology() else: second = right return first, second
Returns the structure (topology) of the cluster as tuples. Output from cl.data:: [<[email protected](['CVS', <[email protected](['34.xls', <[email protected]([<[email protected](['0.txt', <[email protected](['ChangeLog', 'ChangeLog.txt'])>])>, <[email protected](['20060730.py', <[email protected](['.cvsignore', <[email protected](['About.py', <[email protected](['.idlerc', '.pylint.d'])>])>])>])>])>])>])>] Corresponding output from cl.topo():: ('CVS', ('34.xls', (('0.txt', ('ChangeLog', 'ChangeLog.txt')), ('20060730.py', ('.cvsignore', ('About.py', ('.idlerc', '.pylint.d')))))))
def getlevel(self, threshold): left = self.items[0] right = self.items[1] # if this object itself is below the threshold value we only need to # return it's contents as a list if self.level <= threshold: return [fullyflatten(self.items)] # if this cluster's level is higher than the threshold we will # investgate it's left and right part. Their level could be below the # threshold if isinstance(left, Cluster) and left.level <= threshold: if isinstance(right, Cluster): return [fullyflatten(left.items)] + right.getlevel(threshold) else: return [fullyflatten(left.items)] + [[right]] elif isinstance(right, Cluster) and right.level <= threshold: if isinstance(left, Cluster): return left.getlevel(threshold) + [fullyflatten(right.items)] else: return [[left]] + [fullyflatten(right.items)] # Alright. We covered the cases where one of the clusters was below # the threshold value. Now we'll deal with the clusters that are above # by recursively applying the previous cases. if isinstance(left, Cluster) and isinstance(right, Cluster): return left.getlevel(threshold) + right.getlevel(threshold) elif isinstance(left, Cluster): return left.getlevel(threshold) + [[right]] elif isinstance(right, Cluster): return [[left]] + right.getlevel(threshold) else: return [[left], [right]]
Retrieve all clusters up to a specific level threshold. This level-threshold represents the maximum distance between two clusters. So the lower you set this threshold, the more clusters you will receive and the higher you set it, you will receive less but bigger clusters. :param threshold: The level threshold: .. note:: It is debatable whether the value passed into this method should really be as strongly linked to the real cluster-levels as it is right now. The end-user will not know the range of this value unless s/he first inspects the top-level cluster. So instead you might argue that a value ranging from 0 to 1 might be a more useful approach.
def jsmin(js, **kwargs): if not is_3: if cStringIO and not isinstance(js, unicode): # strings can use cStringIO for a 3x performance # improvement, but unicode (in python2) cannot klass = cStringIO.StringIO else: klass = StringIO.StringIO else: klass = io.StringIO ins = klass(js) outs = klass() JavascriptMinify(ins, outs, **kwargs).minify() return outs.getvalue()
returns a minified version of the javascript string
def cached(fun): _cache = {} @wraps(fun) def newfun(a, b, distance_function): frozen_a = frozenset(a) frozen_b = frozenset(b) if (frozen_a, frozen_b) not in _cache: result = fun(a, b, distance_function) _cache[(frozen_a, frozen_b)] = result return _cache[(frozen_a, frozen_b)] return newfun
memoizing decorator for linkage functions. Parameters have been hardcoded (no ``*args``, ``**kwargs`` magic), because, the way this is coded (interchangingly using sets and frozensets) is true for this specific case. For other cases that is not necessarily guaranteed.
def single(a, b, distance_function): left_a, right_a = min(a), max(a) left_b, right_b = min(b), max(b) result = min(distance_function(left_a, right_b), distance_function(left_b, right_a)) return result
Given two collections ``a`` and ``b``, this will return the distance of the points which are closest together. ``distance_function`` is used to determine the distance between two elements. Example:: >>> single([1, 2], [3, 4], lambda x, y: abs(x-y)) 1 # (distance between 2 and 3)
def average(a, b, distance_function): distances = [distance_function(x, y) for x in a for y in b] return sum(distances) / len(distances)
Given two collections ``a`` and ``b``, this will return the mean of all distances. ``distance_function`` is used to determine the distance between two elements. Example:: >>> single([1, 2], [3, 100], lambda x, y: abs(x-y)) 26
def uclus(a, b, distance_function): distances = sorted([distance_function(x, y) for x in a for y in b]) midpoint, rest = len(distances) // 2, len(distances) % 2 if not rest: return sum(distances[midpoint-1:midpoint+1]) / 2 else: return distances[midpoint]
Given two collections ``a`` and ``b``, this will return the *median* of all distances. ``distance_function`` is used to determine the distance between two elements. Example:: >>> single([1, 2], [3, 100], lambda x, y: abs(x-y)) 2.5
def _encapsulate_item_for_combinfunc(item): encapsulated_item = None if ( not hasattr(item, '__iter__') or isinstance(item, tuple) or isinstance(item, str) ): encapsulated_item = [item] else: encapsulated_item = item logging.debug( "item class:%s encapsulated as:%s ", item.__class__.__name__, encapsulated_item.__class__.__name__ ) return encapsulated_item
This function has been extracted in order to make Github issue #28 easier to investigate. It replaces the following two lines of code, which occur twice in method genmatrix, just before the invocation of combinfunc. if not hasattr(item, '__iter__') or isinstance(item, tuple): item = [item] Logging was added to the original two lines and shows that the outcome of this snippet has changed between Python2.7 and Python3.5. This logging showed that the difference in outcome consisted of the handling of the builtin str class, which was encapsulated into a list in Python2.7 but returned naked in Python3.5. Adding a test for this specific class to the set of conditions appears to give correct behaviour under both versions.
def worker(self): tasks_completed = 0 for task in iter(self.task_queue.get, 'STOP'): col_index, item, item2 = task if not hasattr(item, '__iter__') or isinstance(item, tuple): item = [item] if not hasattr(item2, '__iter__') or isinstance(item2, tuple): item2 = [item2] result = (col_index, self.combinfunc(item, item2)) self.done_queue.put(result) tasks_completed += 1 logger.info("Worker %s performed %s tasks", current_process().name, tasks_completed)
Multiprocessing task function run by worker processes
def validate(fname): validation = { "errors": [], "warnings": [] } for line in _process(fname): kind, message = _determine(line) if kind in validation: validation[kind].append(message) return validation
This function uses dciodvfy to generate a list of warnings and errors discovered within the DICOM file. :param fname: Location and filename of DICOM file.
def numpy(self): # load GDCM's image reading functionality image_reader = gdcm.ImageReader() image_reader.SetFileName(self.fname) if not image_reader.Read(): raise IOError("Could not read DICOM image") pixel_array = self._gdcm_to_numpy(image_reader.GetImage()) return pixel_array
Grabs image data and converts it to a numpy array
def _gdcm_to_numpy(self, image): gdcm_typemap = { gdcm.PixelFormat.INT8: numpy.int8, gdcm.PixelFormat.UINT8: numpy.uint8, gdcm.PixelFormat.UINT16: numpy.uint16, gdcm.PixelFormat.INT16: numpy.int16, gdcm.PixelFormat.UINT32: numpy.uint32, gdcm.PixelFormat.INT32: numpy.int32, gdcm.PixelFormat.FLOAT32: numpy.float32, gdcm.PixelFormat.FLOAT64: numpy.float64 } pixel_format = image.GetPixelFormat().GetScalarType() if pixel_format in gdcm_typemap: self.data_type = gdcm_typemap[pixel_format] else: raise KeyError(''.join(pixel_format, \ " is not a supported pixel format")) #dimension = image.GetDimension(0), image.GetDimension(1) self.dimensions = image.GetDimension(1), image.GetDimension(0) gdcm_array = image.GetBuffer() # GDCM returns char* as type str. This converts it to type bytes if sys.version_info >= (3, 0): gdcm_array = gdcm_array.encode(sys.getfilesystemencoding(), "surrogateescape") # use float for accurate scaling dimensions = image.GetDimensions() result = numpy.frombuffer(gdcm_array, dtype=self.data_type).astype(float) if len(dimensions) == 3: # for cine (animations) there are 3 dims: x, y, number of frames result.shape = dimensions[2], dimensions[0], dimensions[1] else: result.shape = dimensions return result
Converts a GDCM image to a numpy array. :param image: GDCM.ImageReader.GetImage()
def save_as_plt(self, fname, pixel_array=None, vmin=None, vmax=None, cmap=None, format=None, origin=None): from matplotlib.backends.backend_agg \ import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure from pylab import cm if pixel_array is None: pixel_array = self.numpy if cmap is None: cmap = cm.bone fig = Figure(figsize=pixel_array.shape[::-1], dpi=1, frameon=False) canvas = FigureCanvas(fig) fig.figimage(pixel_array, cmap=cmap, vmin=vmin, vmax=vmax, origin=origin) fig.savefig(fname, dpi=1, format=format) return True
This method saves the image from a numpy array using matplotlib :param fname: Location and name of the image file to be saved. :param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value :param vmin: matplotlib vmin :param vmax: matplotlib vmax :param cmap: matplotlib color map :param format: matplotlib format :param origin: matplotlib origin This method will return True if successful
def save_as_pil(self, fname, pixel_array=None): if pixel_array is None: pixel_array = self.numpy from PIL import Image as pillow pil_image = pillow.fromarray(pixel_array.astype('uint8')) pil_image.save(fname) return True
This method saves the image from a numpy array using Pillow (PIL fork) :param fname: Location and name of the image file to be saved. :param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value This method will return True if successful
def read(self): def ds(data_element): value = self._str_filter.ToStringPair(data_element.GetTag()) if value[1]: return DataElement(data_element, value[0].strip(), value[1].strip()) results = [data for data in self.walk(ds) if data is not None] return results
Returns array of dictionaries containing all the data elements in the DICOM file.
def walk(self, fn): if not hasattr(fn, "__call__"): raise TypeError("""walk_dataset requires a function as its parameter""") dataset = self._dataset iterator = dataset.GetDES().begin() while (not iterator.equal(dataset.GetDES().end())): data_element = iterator.next() yield fn(data_element) header = self._header iterator = header.GetDES().begin() while (not iterator.equal(header.GetDES().end())): data_element = iterator.next() yield fn(data_element)
Loops through all data elements and allows a function to interact with each data element. Uses a generator to improve iteration. :param fn: Function that interacts with each DICOM element
def find(self, group=None, element=None, name=None, VR=None): results = self.read() if name is not None: def find_name(data_element): return data_element.name.lower() == name.lower() return filter(find_name, results) if group is not None: def find_group(data_element): return (data_element.tag['group'] == group or int(data_element.tag['group'], 16) == group) results = filter(find_group, results) if element is not None: def find_element(data_element): return (data_element.tag['element'] == element or int(data_element.tag['element'], 16) == element) results = filter(find_element, results) if VR is not None: def find_VR(data_element): return data_element.VR.lower() == VR.lower() results = filter(find_VR, results) return results
Searches for data elements in the DICOM file given the filters supplied to this method. :param group: Hex decimal for the group of a DICOM element e.g. 0x002 :param element: Hex decimal for the element value of a DICOM element e.g. 0x0010 :param name: Name of the DICOM element, e.g. "Modality" :param VR: Value Representation of the DICOM element, e.g. "PN"
def anonymize(self): self._anon_obj = gdcm.Anonymizer() self._anon_obj.SetFile(self._file) self._anon_obj.RemoveGroupLength() if self._anon_tags is None: self._anon_tags = get_anon_tags() for tag in self._anon_tags: cur_tag = tag['Tag'].replace("(", "") cur_tag = cur_tag.replace(")", "") name = tag["Attribute Name"].replace(" ", "").encode("utf8") group, element = cur_tag.split(",", 1) # TODO expand this 50xx, 60xx, gggg, eeee if ("xx" not in group and "gggg" not in group and "eeee" not in group): group = int(group, 16) element = int(element, 16) if self.find(group=group, element=element): self._anon_obj.Replace( gdcm.Tag(group, element), "Anon" + name) return self._anon_obj
According to PS 3.15-2008, basic application level De-Indentification of a DICOM file requires replacing the values of a set of data elements
def save_as(self, fname, obj=None): writer = gdcm.Writer() writer.SetFileName(fname) if obj is None and self._anon_obj: obj = self._anon_obj else: raise ValueError("Need DICOM object, e.g. obj=gdcm.Anonymizer()") writer.SetFile(obj.GetFile()) if not writer.Write(): raise IOError("Could not save DICOM file") return True
Save DICOM file given a GDCM DICOM object. Examples of a GDCM DICOM object: * gdcm.Writer() * gdcm.Reader() * gdcm.Anonymizer() :param fname: DICOM file name to be saved :param obj: DICOM object to be saved, if None, Anonymizer() is used
def image(self): if self._image is None: self._image = Image(self.fname) return self._image
Read the loaded DICOM image data
def VR(VR=None, description=None): value_repr = { "AE": "Application Entity", "AS": "Age String", "AT": "Attribute Tag", "CS": "Code String", "DA": "Date", "DS": "Decimal String", "DT": "Date/Time", "FL": "Floating Point Single (4 bytes)", "FD": "Floating Point Double (8 bytes)", "IS": "Integer String", "LO": "Long String", "LT": "Long Text", "OB": "Other Byte", "OF": "Other Float", "OW": "Other Word", "PN": "Person Name", "SH": "Short String", "SL": "Signed Long", "SQ": "Sequence of Items", "SS": "Signed Short", "ST": "Short Text", "TM": "Time", "UI": "Unique Identifier", "UL": "Unsigned Long", "UN": "Unknown", "US": "Unsigned Short", "UT": "Unlimited Text" } assert VR or description, "Either VR or description required to map VR" if VR is not None: VR = VR.upper() if VR in value_repr: return value_repr[VR] for key, value in value_repr.iteritems(): if description == value: return key return None
Value Representation (VR) <-> Description lookup. :param VR: Takes the VR and returns its description :param description: Take the description of a VR and returns the VR
def repo_name(self): ds = [[x.repo_name] for x in self.repos] df = pd.DataFrame(ds, columns=['repository']) return df
Returns a DataFrame of the repo names present in this project directory :return: DataFrame
def coverage(self): df = pd.DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage', 'repository']) for repo in self.repos: try: cov = repo.coverage() cov['repository'] = repo.repo_name df = df.append(cov) except GitCommandError: print('Warning! Repo: %s seems to not have coverage' % (repo, )) df.reset_index() return df
Will return a DataFrame with coverage information (if available) for each repo in the project). If there is a .coverage file available, this will attempt to form a DataFrame with that information in it, which will contain the columns: * repository * filename * lines_covered * total_lines * coverage If it can't be found or parsed, an empty DataFrame of that form will be returned. :return: DataFrame
def file_change_rates(self, branch='master', limit=None, coverage=False, days=None, ignore_globs=None, include_globs=None): columns = ['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change', 'edit_rate', 'repository'] if coverage: columns += ['lines_covered', 'total_lines', 'coverage'] df = pd.DataFrame(columns=columns) for repo in self.repos: try: fcr = repo.file_change_rates( branch=branch, limit=limit, coverage=coverage, days=days, ignore_globs=ignore_globs, include_globs=include_globs ) fcr['repository'] = repo.repo_name df = df.append(fcr) except GitCommandError: print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch)) df.reset_index() return df
This function will return a DataFrame containing some basic aggregations of the file change history data, and optionally test coverage data from a coverage_data.py .coverage file. The aim here is to identify files in the project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has a high change rate and poor test coverage, then it is a great candidate for writing more tests. :param branch: (optional, default=master) the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data. :param days: (optional, default=None) number of days to return if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame
def commit_history(self, branch, limit=None, days=None, ignore_globs=None, include_globs=None): if limit is not None: limit = int(limit / len(self.repo_dirs)) df = pd.DataFrame(columns=['author', 'committer', 'message', 'lines', 'insertions', 'deletions', 'net']) for repo in self.repos: try: ch = repo.commit_history(branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs) ch['repository'] = repo.repo_name df = df.append(ch) except GitCommandError: print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch)) df.reset_index() return df
Returns a pandas DataFrame containing all of the commits for a given branch. The results from all repositories are appended to each other, resulting in one large data frame of size <limit>. If a limit is provided, it is divided by the number of repositories in the project directory to find out how many commits to pull from each project. Future implementations will use date ordering across all projects to get the true most recent N commits across the project. Included in that DataFrame will be the columns: * repository * date (index) * author * committer * message * lines * insertions * deletions * net :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param days: (optional, default=None) number of days to return if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame
def file_detail(self, rev='HEAD', committer=True, ignore_globs=None, include_globs=None): df = None for repo in self.repos: try: if df is None: df = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev) df['repository'] = repo.repo_name else: chunk = repo.file_detail(ignore_globs=ignore_globs, include_globs=include_globs, committer=committer, rev=rev) chunk['repository'] = repo.repo_name df = df.append(chunk) except GitCommandError: print('Warning! Repo: %s couldnt be inspected' % (repo, )) df = df.reset_index(level=1) df = df.set_index(['file', 'repository']) return df
Returns a table of all current files in the repos, with some high level information about each file (total LOC, file owner, extension, most recent edit date, etc.). :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :param committer: (optional, default=True) true if committer should be reported, false if author :return:
def branches(self): df = pd.DataFrame(columns=['repository', 'local', 'branch']) if _has_joblib: ds = Parallel(n_jobs=-1, backend='threading', verbose=0)( delayed(_branches_func) (x) for x in self.repos ) for d in ds: df = df.append(d) else: for repo in self.repos: try: df = df.append(_branches_func(repo)) except GitCommandError: print('Warning! Repo: %s couldn\'t be inspected' % (repo, )) df.reset_index() return df
Returns a data frame of all branches in origin. The DataFrame will have the columns: * repository * local * branch :returns: DataFrame
def revs(self, branch='master', limit=None, skip=None, num_datapoints=None): if limit is not None: limit = math.floor(float(limit) / len(self.repos)) if num_datapoints is not None: num_datapoints = math.floor(float(num_datapoints) / len(self.repos)) df = pd.DataFrame(columns=['repository', 'rev']) if _has_joblib: ds = Parallel(n_jobs=-1, backend='threading', verbose=0)( delayed(_revs_func) (x, branch, limit, skip, num_datapoints) for x in self.repos ) for d in ds: df = df.append(d) else: for repo in self.repos: try: revs = repo.revs(branch=branch, limit=limit, skip=skip, num_datapoints=num_datapoints) revs['repository'] = repo.repo_name df = df.append(revs) except GitCommandError: print('Warning! Repo: %s couldn\'t be inspected' % (repo, )) df.reset_index() return df
Returns a dataframe of all revision tags and their timestamps for each project. It will have the columns: * date * repository * rev :param branch: (optional, default 'master') the branch to work in :param limit: (optional, default None), the maximum number of revisions to return, None for no limit :param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping. :param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used :return: DataFrame
def repo_information(self): data = [[repo.git_dir, repo.repo.branches, repo.repo.bare, repo.repo.remotes, repo.repo.description, repo.repo.references, repo.repo.heads, repo.repo.submodules, repo.repo.tags, repo.repo.active_branch] for repo in self.repos] df = pd.DataFrame(data, columns=[ 'local_directory', 'branches', 'bare', 'remotes', 'description', 'references', 'heads', 'submodules', 'tags', 'active_branch' ]) return df
Returns a DataFrame with the properties of all repositories in the project directory. The returned DataFrame will have the columns: * local_directory * branches * bare * remotes * description * references * heads * submodules * tags * active_branch :return: DataFrame
def bus_factor(self, ignore_globs=None, include_globs=None, by='projectd'): if by == 'file': raise NotImplementedError('File-wise bus factor') elif by == 'projectd': blame = self.blame(ignore_globs=ignore_globs, include_globs=include_globs, by='repository') blame = blame.sort_values(by=['loc'], ascending=False) total = blame['loc'].sum() cumulative = 0 tc = 0 for idx in range(blame.shape[0]): cumulative += blame.ix[idx, 'loc'] tc += 1 if cumulative >= total / 2: break return pd.DataFrame([['projectd', tc]], columns=['projectd', 'bus factor']) elif by == 'repository': df = pd.DataFrame(columns=['repository', 'bus factor']) for repo in self.repos: try: df = df.append(repo.bus_factor(ignore_globs=include_globs, include_globs=include_globs, by=by)) except GitCommandError: print('Warning! Repo: %s couldn\'t be inspected' % (repo, )) df.reset_index() return df
An experimental heuristic for truck factor of a repository calculated by the current distribution of blame in the repository's primary branch. The factor is the fewest number of contributors whose contributions make up at least 50% of the codebase's LOC :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return:
def command(self): print('pynYNAB CSV import') args = self.parser.parse_args() verify_common_args(args) verify_csvimport(args.schema, args.accountname) client = clientfromkwargs(**args) delta = do_csvimport(args, client) client.push(expected_delta=delta)
Manually import a CSV into a nYNAB budget
def command(self): print('pynYNAB OFX import') args = self.parser.parse_args() verify_common_args(args) client = clientfromkwargs(**args) delta = do_ofximport(args.file, client) client.push(expected_delta=delta)
Manually import an OFX into a nYNAB budget
def default_listener(col_attr, default): @event.listens_for(col_attr, "init_scalar", retval=True, propagate=True) def init_scalar(target, value, dict_): if default.is_callable: # the callable of ColumnDefault always accepts a context argument value = default.arg(None) elif default.is_scalar: value = default.arg else: raise NotImplementedError( "Can't invoke pre-default for a SQL-level column default") dict_[col_attr.key] = value return value
Establish a default-setting listener.
def has_coverage(self): if os.path.exists(self.git_dir + os.sep + '.coverage'): try: with open(self.git_dir + os.sep + '.coverage', 'r') as f: blob = f.read() blob = blob.split('!')[2] json.loads(blob) return True except Exception: return False else: return False
Returns a boolean for is a parseable .coverage file can be found in the repository :return: bool
def coverage(self): if not self.has_coverage(): return DataFrame(columns=['filename', 'lines_covered', 'total_lines', 'coverage']) with open(self.git_dir + os.sep + '.coverage', 'r') as f: blob = f.read() blob = blob.split('!')[2] cov = json.loads(blob) ds = [] for filename in cov['lines'].keys(): idx = 0 try: with open(filename, 'r') as f: for idx, _ in enumerate(f): pass except FileNotFoundError as e: if self.verbose: warnings.warn('Could not find file %s for coverage' % (filename, )) num_lines = idx + 1 try: short_filename = filename.split(self.git_dir + os.sep)[1] ds.append([short_filename, len(cov['lines'][filename]), num_lines]) except IndexError as e: if self.verbose: warnings.warn('Could not find file %s for coverage' % (filename, )) df = DataFrame(ds, columns=['filename', 'lines_covered', 'total_lines']) df['coverage'] = df['lines_covered'] / df['total_lines'] return df
If there is a .coverage file available, this will attempt to form a DataFrame with that information in it, which will contain the columns: * filename * lines_covered * total_lines * coverage If it can't be found or parsed, an empty DataFrame of that form will be returned. :return: DataFrame
def __check_extension(files, ignore_globs=None, include_globs=None): if include_globs is None or include_globs == []: include_globs = ['*'] out = {} for key in files.keys(): # count up the number of patterns in the ignore globs list that match if ignore_globs is not None: count_exclude = sum([1 if fnmatch.fnmatch(key, g) else 0 for g in ignore_globs]) else: count_exclude = 0 # count up the number of patterns in the include globs list that match count_include = sum([1 if fnmatch.fnmatch(key, g) else 0 for g in include_globs]) # if we have one vote or more to include and none to exclude, then we use the file. if count_include > 0 and count_exclude == 0: out[key] = files[key] return out
Internal method to filter a list of file changes by extension and ignore_dirs. :param files: :param ignore_globs: a list of globs to ignore (if none falls back to extensions and ignore_dir) :param include_globs: a list of globs to include (if none, includes all). :return: dict
def revs(self, branch='master', limit=None, skip=None, num_datapoints=None): if limit is None and skip is None and num_datapoints is not None: limit = sum(1 for _ in self.repo.iter_commits()) skip = int(float(limit) / num_datapoints) else: if limit is None: limit = sys.maxsize elif skip is not None: limit = limit * skip ds = [[x.committed_date, x.name_rev.split(' ')[0]] for x in self.repo.iter_commits(branch, max_count=limit)] df = DataFrame(ds, columns=['date', 'rev']) if skip is not None: if skip == 0: skip = 1 if df.shape[0] >= skip: df = df.ix[range(0, df.shape[0], skip)] df.reset_index() else: df = df.ix[[0]] df.reset_index() return df
Returns a dataframe of all revision tags and their timestamps. It will have the columns: * date * rev :param branch: (optional, default 'master') the branch to work in :param limit: (optional, default None), the maximum number of revisions to return, None for no limit :param skip: (optional, default None), the number of revisions to skip. Ex: skip=2 returns every other revision, None for no skipping. :param num_datapoints: (optional, default=None) if limit and skip are none, and this isn't, then num_datapoints evenly spaced revs will be used :return: DataFrame
def branches(self): # first pull the local branches local_branches = self.repo.branches data = [[x.name, True] for x in list(local_branches)] # then the remotes remote_branches = self.repo.git.branch(all=True).split('\n') if sys.version_info.major == 2: remote_branches = set([x.split('/')[-1] for x in remote_branches if 'remotes' in x]) else: remote_branches = {x.split('/')[-1] for x in remote_branches if 'remotes' in x} data += [[x, False] for x in remote_branches] df = DataFrame(data, columns=['branch', 'local']) df['repository'] = self._repo_name() return df
Returns a data frame of all branches in origin. The DataFrame will have the columns: * repository * branch * local :returns: DataFrame
def tags(self): tags = self.repo.tags df = DataFrame([x.name for x in list(tags)], columns=['tag']) df['repository'] = self._repo_name() return df
Returns a data frame of all tags in origin. The DataFrame will have the columns: * repository * tag :returns: DataFrame
def _repo_name(self): if self._git_repo_name is not None: return self._git_repo_name else: reponame = self.repo.git_dir.split(os.sep)[-2] if reponame.strip() == '': return 'unknown_repo' return reponame
Returns the name of the repository, using the local directory name. :returns: str
def bus_factor(self, by='repository', ignore_globs=None, include_globs=None): if by == 'file': raise NotImplementedError('File-wise bus factor') blame = self.blame(include_globs=include_globs, ignore_globs=ignore_globs, by=by) blame = blame.sort_values(by=['loc'], ascending=False) total = blame['loc'].sum() cumulative = 0 tc = 0 for idx in range(blame.shape[0]): cumulative += blame.ix[idx, 'loc'] tc += 1 if cumulative >= total / 2: break return DataFrame([[self._repo_name(), tc]], columns=['repository', 'bus factor'])
An experimental heuristic for truck factor of a repository calculated by the current distribution of blame in the repository's primary branch. The factor is the fewest number of contributors whose contributions make up at least 50% of the codebase's LOC :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :param by: (optional, default=repository) whether to group by repository or by file :return:
def file_owner(self, rev, filename, committer=True): try: if committer: cm = 'committer' else: cm = 'author' blame = self.repo.blame(rev, os.path.join(self.git_dir, filename)) blame = DataFrame([[x[0].committer.name, len(x[1])] for x in blame], columns=[cm, 'loc']).groupby(cm).agg( {'loc': np.sum}) if blame.shape[0] > 0: return blame['loc'].idxmax() else: return None except (GitCommandError, KeyError): if self.verbose: print('Couldn\'t Calcualte File Owner for %s' % (rev,)) return None
Returns the owner (by majority blame) of a given file in a given rev. Returns the committers' name. :param rev: :param filename: :param committer:
def file_detail(self, include_globs=None, ignore_globs=None, rev='HEAD', committer=True): # first get the blame blame = self.blame( include_globs=include_globs, ignore_globs=ignore_globs, rev=rev, committer=committer, by='file' ) blame = blame.reset_index(level=1) blame = blame.reset_index(level=1) # reduce it to files and total LOC df = blame.reindex(columns=['file', 'loc']) df = df.groupby('file').agg({'loc': np.sum}) df = df.reset_index(level=1) # map in file owners df['file_owner'] = df['file'].map(lambda x: self.file_owner(rev, x, committer=committer)) # add extension (something like the language) df['ext'] = df['file'].map(lambda x: x.split('.')[-1]) # add in last edit date for the file df['last_edit_date'] = df['file'].map(self._file_last_edit) df['last_edit_date'] = to_datetime(df['last_edit_date']) df = df.set_index('file') return df
Returns a table of all current files in the repos, with some high level information about each file (total LOC, file owner, extension, most recent edit date, etc.). :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :param committer: (optional, default=True) true if committer should be reported, false if author :return:
def _decode(self, obj, context): return b''.join(map(int2byte, [c + 0x60 for c in bytearray(obj)])).decode("utf8")
Get the python representation of the obj
def update(self, instance, validated_data): model = self.Meta.model meta = self.Meta.model._meta original_virtual_fields = list(meta.virtual_fields) # copy if hasattr(model, '_hstore_virtual_fields'): # remove hstore virtual fields from meta for field in model._hstore_virtual_fields.values(): meta.virtual_fields.remove(field) instance = super(HStoreSerializer, self).update(instance, validated_data) if hasattr(model, '_hstore_virtual_fields'): # restore original virtual fields meta.virtual_fields = original_virtual_fields return instance
temporarily remove hstore virtual fields otherwise DRF considers them many2many
def next_data(): "simulated data" t0 = time.time() lt = time.localtime(t0) tmin, tsec = lt[4],lt[5] u = np.random.random() v = np.random.random() x = np.sin( (u + tsec)/3.0) + tmin/30. + v/5.0 return t0, f next_data(): "simulated data" t0 = time.time() lt = time.localtime(t0) tmin, tsec = lt[4],lt[5] u = np.random.random() v = np.random.random() x = np.sin( (u + tsec)/3.0) + tmin/30. + v/5.0 return t0, x
simulated data
def update_image(self, data): if 1 in data.shape: data = data.squeeze() if self.conf.contrast_level is not None: clevels = [self.conf.contrast_level, 100.0-self.conf.contrast_level] imin, imax = np.percentile(data, clevels) data = np.clip((data - imin)/(imax - imin + 1.e-8), 0, 1) self.axes.images[0].set_data(data) self.canvas.draw()
update image on panel, as quickly as possible
def add_highlight_area(self, mask, label=None, col=0): patch = mask * np.ones(mask.shape) * 0.9 cmap = self.conf.cmap[col] area = self.axes.contour(patch, cmap=cmap, levels=[0, 1]) self.conf.highlight_areas.append(area) col = None if hasattr(cmap, '_lut'): rgb = [int(i*240)^255 for i in cmap._lut[0][:3]] col = '#%02x%02x%02x' % (rgb[0], rgb[1], rgb[2]) if label is not None: def fmt(*args, **kws): return label self.axes.clabel(area, fontsize=9, fmt=fmt, colors=col, rightside_up=True) if col is not None: for l in area.collections: l.set_color(col) self.canvas.draw()
add a highlighted area -- outline an arbitrarily shape -- as if drawn from a Lasso event. This takes a mask, which should be a boolean array of the same shape as the image.
def set_viewlimits(self, axes=None): if axes is None: axes = self.axes xmin, xmax, ymin, ymax = self.data_range if len(self.conf.zoom_lims) >1: zlims = self.conf.zoom_lims[-1] if axes in zlims: xmin, xmax, ymin, ymax = zlims[axes] xmin = max(self.data_range[0], xmin) xmax = min(self.data_range[1], xmax) ymin = max(self.data_range[2], ymin) ymax = min(self.data_range[3], ymax) if (xmax < self.data_range[0] or xmin > self.data_range[1] or ymax < self.data_range[2] or ymin > self.data_range[3] ): self.conf.zoom_lims.pop() return if abs(xmax-xmin) < 2: xmin = int(0.5*(xmax+xmin) - 1) xmax = xmin + 2 if abs(ymax-ymin) < 2: ymin = int(0.5*(ymax+xmin) - 1) ymax = ymin + 2 self.axes.set_xlim((xmin, xmax),emit=True) self.axes.set_ylim((ymin, ymax),emit=True) self.axes.update_datalim(((xmin, ymin), (xmax, ymax))) self.conf.datalimits = [xmin, xmax, ymin, ymax] self.redraw()
update xy limits of a plot
def BuildPanel(self): figsize = (1.0*self.size[0]/self.dpi, 1.0*self.size[1]/self.dpi) self.fig = Figure(figsize, dpi=self.dpi) self.axes = self.fig.add_axes([0.0, 0.0, 1.0, 1.0]) self.canvas = FigureCanvasWxAgg(self, -1, self.fig) self.fig.set_facecolor('#FFFFFD') self.conf.axes = self.axes self.conf.fig = self.fig self.conf.canvas= self.canvas # self.canvas.SetCursor(wx.StockCursor(wx.CURSOR_ARROW)) # This way of adding to sizer allows resizing sizer = wx.BoxSizer(wx.HORIZONTAL) sizer.Add(self.canvas, 1, wx.ALL|wx.GROW) self.SetSizer(sizer) self.Fit() self.addCanvasEvents()
builds basic GUI panel and popup menu
def calc_indices(self, shape): if len(shape) == 2: ny, nx = shape elif len(shape) == 3: ny, nx, nchan = shape inds = [] for iy in range(ny): inds.extend([(ix, iy) for ix in range(nx)]) self.conf.indices = np.array(inds)
calculates and stores the set of indices ix=[0, nx-1], iy=[0, ny-1] for data of shape (nx, ny)
def unzoom(self, event=None, set_bounds=True): lims = None if len(self.conf.zoom_lims) > 1: lims = self.conf.zoom_lims.pop() ax = self.axes if lims is None: # auto scale self.conf.zoom_lims = [None] xmin, xmax, ymin, ymax = self.data_range lims = {self.axes: [xmin, xmax, ymin, ymax]} self.set_viewlimits() self.canvas.draw()
zoom out 1 level, or to full data range
def zoom_leftup(self, event=None): if self.zoom_ini is None: return ini_x, ini_y, ini_xd, ini_yd = self.zoom_ini try: dx = abs(ini_x - event.x) dy = abs(ini_y - event.y) except: dx, dy = 0, 0 t0 = time.time() self.rbbox = None self.zoom_ini = None if (dx > 3) and (dy > 3) and (t0-self.mouse_uptime)>0.1: self.mouse_uptime = t0 zlims, tlims = {}, {} ax = self.axes xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() zlims[ax] = [xmin, xmax, ymin, ymax] if len(self.conf.zoom_lims) == 0: self.conf.zoom_lims.append(zlims) ax_inv = ax.transData.inverted try: x1, y1 = ax_inv().transform((event.x, event.y)) except: x1, y1 = self.x_lastmove, self.y_lastmove try: x0, y0 = ax_inv().transform((ini_x, ini_y)) except: x0, y0 = ini_xd, ini_yd tlims[ax] = [int(round(min(x0, x1))), int(round(max(x0, x1))), int(round(min(y0, y1))), int(round(max(y0, y1)))] self.conf.zoom_lims.append(tlims) # now apply limits: self.set_viewlimits() if callable(self.zoom_callback): self.zoom_callback(wid=self.GetId(), limits=tlims[ax])
leftup event handler for zoom mode in images
def collect_directories(self, directories): directories = util.to_absolute_paths(directories) if not self.recursive: return self._remove_blacklisted(directories) recursive_dirs = set() for dir_ in directories: walk_iter = os.walk(dir_, followlinks=True) walk_iter = [w[0] for w in walk_iter] walk_iter = util.to_absolute_paths(walk_iter) walk_iter = self._remove_blacklisted(walk_iter) recursive_dirs.update(walk_iter) return recursive_dirs
Collects all the directories into a `set` object. If `self.recursive` is set to `True` this method will iterate through and return all of the directories and the subdirectories found from `directories` that are not blacklisted. if `self.recursive` is set to `False` this will return all the directories that are not balcklisted. `directories` may be either a single object or an iterable. Recommend passing in absolute paths instead of relative. `collect_directories` will attempt to convert `directories` to absolute paths if they are not already.
def add_directories(self, directories, except_blacklisted=True): directories = util.to_absolute_paths(directories) if except_blacklisted: directories = self._remove_blacklisted(directories) self.plugin_directories.update(directories)
Adds `directories` to the set of plugin directories. `directories` may be either a single object or a iterable. `directories` can be relative paths, but will be converted into absolute paths based on the current working directory. if `except_blacklisted` is `True` all `directories` in `self.blacklisted_directories` will be removed
def set_directories(self, directories, except_blacklisted=True): directories = util.to_absolute_paths(directories) if except_blacklisted: directories = self._remove_blacklisted(directories) self.plugin_directories = directories
Sets the plugin directories to `directories`. This will delete the previous state stored in `self.plugin_directories` in favor of the `directories` passed in. `directories` may be either a single object or an iterable. `directories` can contain relative paths but will be converted into absolute paths based on the current working directory. if `except_blacklisted` is `True` all `directories` in `self.blacklisted_directories` will be removed
def remove_directories(self, directories): directories = util.to_absolute_paths(directories) self.plugin_directories = util.remove_from_set(self.plugin_directories, directories)
Removes any `directories` from the set of plugin directories. `directories` may be a single object or an iterable. Recommend passing in all paths as absolute, but the method will attemmpt to convert all paths to absolute if they are not already based on the current working directory.
def add_blacklisted_directories(self, directories, remove_from_stored_directories=True): absolute_paths = util.to_absolute_paths(directories) self.blacklisted_directories.update(absolute_paths) if remove_from_stored_directories: plug_dirs = self.plugin_directories plug_dirs = util.remove_from_set(plug_dirs, directories)
Adds `directories` to be blacklisted. Blacklisted directories will not be returned or searched recursively when calling the `collect_directories` method. `directories` may be a single instance or an iterable. Recommend passing in absolute paths, but method will try to convert to absolute paths based on the current working directory. If `remove_from_stored_directories` is true, all `directories` will be removed from `self.plugin_directories`
def set_blacklisted_directories(self, directories, remove_from_stored_directories=True): absolute_paths = util.to_absolute_paths(directories) self.blacklisted_directories = absolute_paths if remove_from_stored_directories: plug_dirs = self.plugin_directories plug_dirs = util.remove_from_set(plug_dirs, directories)
Sets the `directories` to be blacklisted. Blacklisted directories will not be returned or searched recursively when calling `collect_directories`. This will replace the previously stored set of blacklisted paths. `directories` may be a single instance or an iterable. Recommend passing in absolute paths. Method will try to convert to absolute path based on current working directory.
def remove_blacklisted_directories(self, directories): directories = util.to_absolute_paths(directories) black_dirs = self.blacklisted_directories black_dirs = util.remove_from_set(black_dirs, directories)
Attempts to remove the `directories` from the set of blacklisted directories. If a particular directory is not found in the set of blacklisted, method will continue on silently. `directories` may be a single instance or an iterable. Recommend passing in absolute paths. Method will try to convert to an absolute path if it is not already using the current working directory.
def _remove_blacklisted(self, directories): directories = util.to_absolute_paths(directories) directories = util.remove_from_set(directories, self.blacklisted_directories) return directories
Attempts to remove the blacklisted directories from `directories` and then returns whatever is left in the set. Called from the `collect_directories` method.
def plot(self, x, y, **kw): return self.frame.plot(x,y,**kw)
plot x, y values (erasing old plot), for method options see PlotPanel.plot.
def oplot(self, x, y, **kw): return self.frame.oplot(x,y,**kw)
overplot x, y values (on top of old plot), for method options see PlotPanel.oplot
def imread(filename, *args, **kwargs): with TIFFfile(filename) as tif: return tif.asarray(*args, **kwargs)
Return image data from TIFF file as numpy array. The first image series is returned if no arguments are provided. Parameters ---------- key : int, slice, or sequence of page indices Defines which pages to return as array. series : int Defines which series of pages to return as array. Examples -------- >>> image = imread('test.tif', 0)
def read_bytes(fd, byte_order, dtype, count): return numpy.fromfile(fd, byte_order+dtype[-1], count).tostring()
Read tag data from file and return as byte string.
def read_numpy(fd, byte_order, dtype, count): return numpy.fromfile(fd, byte_order+dtype[-1], count)
Read tag data from file and return as numpy array.
def read_nih_image_header(fd, byte_order, dtype, count): fd.seek(12, 1) return {'version': struct.unpack(byte_order+'H', fd.read(2))[0]}
Read NIH_IMAGE_HEADER tag from file and return as dictionary.
def read_mm_header(fd, byte_order, dtype, count): return numpy.rec.fromfile(fd, MM_HEADER, 1, byteorder=byte_order)[0]
Read MM_HEADER tag from file and return as numpy.rec.array.
def read_mm_uic1(fd, byte_order, dtype, count): t = fd.read(8*count) t = struct.unpack('%s%iI' % (byte_order, 2*count), t) return dict((MM_TAG_IDS[k], v) for k, v in zip(t[::2], t[1::2]) if k in MM_TAG_IDS)
Read MM_UIC1 tag from file and return as dictionary.
def read_mm_uic2(fd, byte_order, dtype, count): result = {'number_planes': count} values = numpy.fromfile(fd, byte_order+'I', 6*count) result['z_distance'] = values[0::6] // values[1::6] #result['date_created'] = tuple(values[2::6]) #result['time_created'] = tuple(values[3::6]) #result['date_modified'] = tuple(values[4::6]) #result['time_modified'] = tuple(values[5::6]) return result
Read MM_UIC2 tag from file and return as dictionary.
def read_mm_uic3(fd, byte_order, dtype, count): t = numpy.fromfile(fd, byte_order+'I', 2*count) return {'wavelengths': t[0::2] // t[1::2]}
Read MM_UIC3 tag from file and return as dictionary.
def read_cz_lsm_info(fd, byte_order, dtype, count): result = numpy.rec.fromfile(fd, CZ_LSM_INFO, 1, byteorder=byte_order)[0] {50350412: '1.3', 67127628: '2.0'}[result.magic_number] # validation return result
Read CS_LSM_INFO tag from file and return as numpy.rec.array.
def read_cz_lsm_time_stamps(fd, byte_order): size, count = struct.unpack(byte_order+'II', fd.read(8)) if size != (8 + 8 * count): raise ValueError("lsm_time_stamps block is too short") return struct.unpack(('%s%dd' % (byte_order, count)), fd.read(8*count))
Read LSM time stamps from file and return as list.
def read_cz_lsm_event_list(fd, byte_order): count = struct.unpack(byte_order+'II', fd.read(8))[1] events = [] while count > 0: esize, etime, etype = struct.unpack(byte_order+'IdI', fd.read(16)) etext = stripnull(fd.read(esize - 16)) events.append((etime, etype, etext)) count -= 1 return events
Read LSM events from file and return as list of (time, type, text).
def read_cz_lsm_scan_info(fd, byte_order): block = Record() blocks = [block] unpack = struct.unpack if 0x10000000 != struct.unpack(byte_order+"I", fd.read(4))[0]: raise ValueError("not a lsm_scan_info structure") fd.read(8) while True: entry, dtype, size = unpack(byte_order+"III", fd.read(12)) if dtype == 2: value = stripnull(fd.read(size)) elif dtype == 4: value = unpack(byte_order+"i", fd.read(4))[0] elif dtype == 5: value = unpack(byte_order+"d", fd.read(8))[0] else: value = 0 if entry in CZ_LSM_SCAN_INFO_ARRAYS: blocks.append(block) name = CZ_LSM_SCAN_INFO_ARRAYS[entry] newobj = [] setattr(block, name, newobj) block = newobj elif entry in CZ_LSM_SCAN_INFO_STRUCTS: blocks.append(block) newobj = Record() block.append(newobj) block = newobj elif entry in CZ_LSM_SCAN_INFO_ATTRIBUTES: name = CZ_LSM_SCAN_INFO_ATTRIBUTES[entry] setattr(block, name, value) elif entry == 0xffffffff: block = blocks.pop() else: setattr(block, "unknown_%x" % entry, value) if not blocks: break return block
Read LSM scan information from file and return as Record.
def _replace_by(module_function, warn=False): def decorate(func, module_function=module_function, warn=warn): sys.path.append(os.path.dirname(__file__)) try: module, function = module_function.split('.') func, oldfunc = getattr(__import__(module), function), func globals()['__old_' + func.__name__] = oldfunc except Exception: if warn: warnings.warn("failed to import %s" % module_function) sys.path.pop() return func return decorate
Try replace decorated function by module.function.
def decodepackbits(encoded): func = ord if sys.version[0] == '2' else lambda x: x result = [] i = 0 try: while True: n = func(encoded[i]) + 1 i += 1 if n < 129: result.extend(encoded[i:i+n]) i += n elif n > 129: result.extend(encoded[i:i+1] * (258-n)) i += 1 except IndexError: pass return b''.join(result) if sys.version[0] == '2' else bytes(result)
Decompress PackBits encoded byte string. PackBits is a simple byte-oriented run-length compression scheme.
def reorient(image, orientation): o = TIFF_ORIENTATIONS.get(orientation, orientation) if o == 'top_left': return image elif o == 'top_right': return image[..., ::-1, :] elif o == 'bottom_left': return image[..., ::-1, :, :] elif o == 'bottom_right': return image[..., ::-1, ::-1, :] elif o == 'left_top': return numpy.swapaxes(image, -3, -2) elif o == 'right_top': return numpy.swapaxes(image, -3, -2)[..., ::-1, :] elif o == 'left_bottom': return numpy.swapaxes(image, -3, -2)[..., ::-1, :, :] elif o == 'right_bottom': return numpy.swapaxes(image, -3, -2)[..., ::-1, ::-1, :]
Return reoriented view of image array. Parameters ---------- image : numpy array Non-squeezed output of asarray() functions. Axes -3 and -2 must be image length and width respectively. orientation : int or str One of TIFF_ORIENTATIONS keys or values.
def stripnull(string): i = string.find(b'\x00') return string if (i < 0) else string[:i]
Return string truncated at first null character.
def datetime_from_timestamp(n, epoch=datetime.datetime.fromordinal(693594)): return epoch + datetime.timedelta(n)
Return datetime object from timestamp in Excel serial format. Examples -------- >>> datetime_from_timestamp(40237.029999999795) datetime.datetime(2010, 2, 28, 0, 43, 11, 999982)
def close(self): if not hasattr(self, 'tiffs'): return for tif in self._tiffs.values(): if tif._fd: tif._fd.close() tif._fd = None
Close open file handle(s).
def _fromfile(self): self._fd.seek(0) try: self.byte_order = {b'II': '<', b'MM': '>'}[self._fd.read(2)] except KeyError: raise ValueError("not a valid TIFF file") version = struct.unpack(self.byte_order+'H', self._fd.read(2))[0] if version == 43: # BigTiff self.offset_size, zero = struct.unpack(self.byte_order+'HH', self._fd.read(4)) if zero or self.offset_size != 8: raise ValueError("not a valid BigTIFF file") elif version == 42: self.offset_size = 4 else: raise ValueError("not a TIFF file") self.pages = [] while True: try: page = TIFFpage(self) self.pages.append(page) except StopIteration: break if not self.pages: raise ValueError("empty TIFF file")
Read TIFF header and all page records from file.
def asarray(self, key=None, series=None): if key is None and series is None: series = 0 if series is not None: pages = self.series[series].pages else: pages = self.pages if key is None: pass elif isinstance(key, int): pages = [pages[key]] elif isinstance(key, slice): pages = pages[key] elif isinstance(key, collections.Iterable): pages = [pages[k] for k in key] else: raise TypeError('key must be an int, slice, or sequence') if len(pages) == 1: return pages[0].asarray() elif self.is_nih: result = numpy.vstack(p.asarray(colormapped=False, squeeze=False) for p in pages) if pages[0].is_palette: result = numpy.take(pages[0].color_map, result, axis=1) result = numpy.swapaxes(result, 0, 1) else: if self.is_ome and any(p is None for p in pages): firstpage = next(p for p in pages if p) nopage = numpy.zeros_like( firstpage.asarray()) result = numpy.vstack((p.asarray() if p else nopage) for p in pages) if key is None: try: result.shape = self.series[series].shape except ValueError: warnings.warn("failed to reshape %s to %s" % ( result.shape, self.series[series].shape)) result.shape = (-1,) + pages[0].shape else: result.shape = (-1,) + pages[0].shape return result
Return image data of multiple TIFF pages as numpy array. By default the first image series is returned. Parameters ---------- key : int, slice, or sequence of page indices Defines which pages to return as array. series : int Defines which series of pages to return as array.
def _fromfile(self): fd = self.parent._fd byte_order = self.parent.byte_order offset_size = self.parent.offset_size fmt = {4: 'I', 8: 'Q'}[offset_size] offset = struct.unpack(byte_order + fmt, fd.read(offset_size))[0] if not offset: raise StopIteration() # read standard tags tags = self.tags fd.seek(offset) fmt, size = {4: ('H', 2), 8: ('Q', 8)}[offset_size] try: numtags = struct.unpack(byte_order + fmt, fd.read(size))[0] except Exception: warnings.warn("corrupted page list") raise StopIteration() for _ in range(numtags): tag = TIFFtag(self.parent) tags[tag.name] = tag # read LSM info subrecords if self.is_lsm: pos = fd.tell() for name, reader in CZ_LSM_INFO_READERS.items(): try: offset = self.cz_lsm_info["offset_"+name] except KeyError: continue if not offset: continue fd.seek(offset) try: setattr(self, "cz_lsm_"+name, reader(fd, byte_order)) except ValueError: pass fd.seek(pos)
Read TIFF IFD structure and its tags from file. File cursor must be at storage position of IFD offset and is left at offset to next IFD. Raises StopIteration if offset (first bytes read) is 0.
def _fromdata(self, code, dtype, count, value, name=None): self.code = int(code) self.name = name if name else str(code) self.dtype = TIFF_DATA_TYPES[dtype] self.count = int(count) self.value = value
Initialize instance from arguments.
def set_xylims(self, limits, axes=None): if self.panel is not None: self.panel.set_xylims(limits, axes=axes)
overwrite data for trace t
def unzoom_all(self,event=None): if self.panel is not None: self.panel.unzoom_all(event=event)
zoom out full data range
def unzoom(self,event=None): if self.panel is not None: self.panel.unzoom(event=event)
zoom out 1 level, or to full data range
def set_xlabel(self,s): "set plot xlabel" if self.panel is not None: self.panel.set_xlabel(s) self.panel.canvas.draw(f set_xlabel(self,s): "set plot xlabel" if self.panel is not None: self.panel.set_xlabel(s) self.panel.canvas.draw()
set plot xlabel
def set_ylabel(self,s): "set plot xlabel" if self.panel is not None: self.panel.set_ylabel(s) self.panel.canvas.draw(f set_ylabel(self,s): "set plot xlabel" if self.panel is not None: self.panel.set_ylabel(s) self.panel.canvas.draw()
set plot xlabel
def save_figure(self,event=None, transparent=False, dpi=600): if self.panel is not None: self.panel.save_figure(event=event, transparent=transparent, dpi=dpi)
save figure image to file
def plot(self, x, y, panel='top', xlabel=None, **kws): panel = self.get_panel(panel) panel.plot(x, y, **kws) if xlabel is not None: self.xlabel = xlabel if self.xlabel is not None: self.panel_bot.set_xlabel(self.xlabel)
plot after clearing current plot
def unzoom_all(self, event=None): for p in (self.panel, self.panel_bot): p.conf.zoom_lims = [] p.conf.unzoom(full=True)
zoom out full data range
def unzoom(self, event=None, panel='top'): panel = self.get_panel(panel) panel.conf.unzoom(event=event) self.panel.set_viewlimits()
zoom out 1 level, or to full data range
def update_line(self, t, x, y, panel='top', **kws): panel = self.get_panel(panel) panel.update_line(t, x, y, **kws)
overwrite data for trace t