response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
dirs that don't contain a valid index and aren't listed in the main index
def get_corrupted_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that don't contain a valid index and aren't listed in the main index""" corrupted = {} for snapshot in snapshots.iterator(): link = snapshot.as_link() if is_corrupt(link): corrupted[link.link_dir] = link return corrupted
dirs that don't contain recognizable archive data and aren't listed in the main index
def get_unrecognized_folders(snapshots, out_dir: Path=OUTPUT_DIR) -> Dict[str, Optional[Link]]: """dirs that don't contain recognizable archive data and aren't listed in the main index""" unrecognized_folders: Dict[str, Optional[Link]] = {} for entry in (Path(out_dir) / ARCHIVE_DIR_NAME).iterdir(): if entry.is_dir(): index_exists = (entry / "index.json").exists() link = None try: link = parse_json_link_details(str(entry)) except KeyError: # Try to fix index if index_exists: try: # Last attempt to repair the detail index link_guessed = parse_json_link_details(str(entry), guess=True) write_json_link_details(link_guessed, out_dir=str(entry)) link = parse_json_link_details(str(entry)) except Exception: pass if index_exists and link is None: # index exists but it's corrupted or unparseable unrecognized_folders[str(entry)] = link elif not index_exists: # link details index doesn't exist and the folder isn't in the main index timestamp = entry.name if not snapshots.filter(timestamp=timestamp).exists(): unrecognized_folders[str(entry)] = link return unrecognized_folders
Parse Generic HTML for href tags and use only the url (support for title coming later)
def parse_generic_html_export(html_file: IO[str], root_url: Optional[str]=None, **_kwargs) -> Iterable[Link]: """Parse Generic HTML for href tags and use only the url (support for title coming later)""" html_file.seek(0) for line in html_file: parser = HrefParser() # example line # <li><a href="http://example.com/ time_added="1478739709" tags="tag1,tag2">example title</a></li> parser.feed(line) for url in parser.urls: if root_url: url_is_absolute = (url.lower().startswith('http://') or url.lower().startswith('https://')) # url = https://abc.com => True # url = /page.php?next=https://example.com => False if not url_is_absolute: # resolve it by joining it with root_url relative_path = url url = urljoin(root_url, relative_path) # https://example.com/somepage.html + /home.html # => https://example.com/home.html # special case to handle bug around // handling, crucial for urls that contain sub-urls # e.g. https://web.archive.org/web/https://example.com if did_urljoin_misbehave(root_url, relative_path, url): url = fix_urljoin_bug(url) for archivable_url in find_all_urls(url): yield Link( url=htmldecode(archivable_url), timestamp=str(datetime.now(timezone.utc).timestamp()), title=None, tags=None, sources=[html_file.name], )
Handle urljoin edge case bug where multiple slashes get turned into a single slash: - https://github.com/python/cpython/issues/96015 - https://github.com/ArchiveBox/ArchiveBox/issues/1411 This workaround only fixes the most common case of a sub-URL inside an outer URL, e.g.: https://web.archive.org/web/https://example.com/some/inner/url But there are other valid URLs containing // that are not fixed by this workaround, e.g.: https://example.com/drives/C//some/file
def did_urljoin_misbehave(root_url: str, relative_path: str, final_url: str) -> bool: """ Handle urljoin edge case bug where multiple slashes get turned into a single slash: - https://github.com/python/cpython/issues/96015 - https://github.com/ArchiveBox/ArchiveBox/issues/1411 This workaround only fixes the most common case of a sub-URL inside an outer URL, e.g.: https://web.archive.org/web/https://example.com/some/inner/url But there are other valid URLs containing // that are not fixed by this workaround, e.g.: https://example.com/drives/C//some/file """ # if relative path is actually an absolute url, cut off its own scheme so we check the path component only relative_path = relative_path.lower() if relative_path.startswith('http://') or relative_path.startswith('https://'): relative_path = relative_path.split('://', 1)[-1] # TODO: properly fix all double // getting stripped by urljoin, not just :// original_path_had_suburl = '://' in relative_path original_root_had_suburl = '://' in root_url[8:] # ignore first 8 chars because root always starts with https:// final_joined_has_suburl = '://' in final_url[8:] # ignore first 8 chars because final always starts with https:// urljoin_broke_suburls = ( (original_root_had_suburl or original_path_had_suburl) and not final_joined_has_suburl ) return urljoin_broke_suburls
recursively replace broken suburls .../http:/... with .../http://... basically equivalent to this for 99.9% of cases: url = url.replace('/http:/', '/http://') url = url.replace('/https:/', '/https://') except this handles: other schemes besides http/https (e.g. https://example.com/link/git+ssh://github.com/example) other preceding separators besides / (e.g. https://example.com/login/?next=https://example.com/home) fixing multiple suburls recursively
def fix_urljoin_bug(url: str, nesting_limit=5): """ recursively replace broken suburls .../http:/... with .../http://... basically equivalent to this for 99.9% of cases: url = url.replace('/http:/', '/http://') url = url.replace('/https:/', '/https://') except this handles: other schemes besides http/https (e.g. https://example.com/link/git+ssh://github.com/example) other preceding separators besides / (e.g. https://example.com/login/?next=https://example.com/home) fixing multiple suburls recursively """ input_url = url for _ in range(nesting_limit): url = re.sub( r'(?P<root>.+?)' # https://web.archive.org/web + r'(?P<separator>[-=/_&+%$#@!*\(\\])' # / + r'(?P<subscheme>[a-zA-Z0-9+_-]{1,32}?):/' # http:/ + r'(?P<suburl>[^/\\]+)', # example.com r"\1\2\3://\4", input_url, re.IGNORECASE | re.UNICODE, ) if url == input_url: break # nothing left to replace, all suburls are fixed input_url = url return url
Parse JSON-format bookmarks export files (produced by pinboard.in/export/, or wallabag)
def parse_generic_json_export(json_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse JSON-format bookmarks export files (produced by pinboard.in/export/, or wallabag)""" json_file.seek(0) links = json.load(json_file) if type(links) != list: raise Exception('JSON parser expects list of objects, maybe this is JSONL?') for link in links: if link: yield jsonObjectToLink(link, json_file.name)
Parse JSONL format bookmarks export files
def parse_generic_jsonl_export(json_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse JSONL format bookmarks export files""" json_file.seek(0) links = [ parse_line(line) for line in json_file ] for link in links: if link: yield jsonObjectToLink(link,json_file.name)
Parse RSS XML-format files into links
def parse_generic_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse RSS XML-format files into links""" rss_file.seek(0) feed = feedparser(rss_file.read()) for item in feed.entries: url = item.link title = item.title time = mktime(item.updated_parsed) try: tags = ','.join(map(lambda tag: tag.term, item.tags)) except AttributeError: tags = '' if url is None: # Yielding a Link with no URL will # crash on a URL validation assertion continue yield Link( url=htmldecode(url), timestamp=str(time), title=htmldecode(title) or None, tags=tags, sources=[rss_file.name], )
Parse links from a text file, ignoring other text
def parse_generic_txt_export(text_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse links from a text file, ignoring other text""" text_file.seek(0) for line in text_file.readlines(): if not line.strip(): continue # if the line is a local file path that resolves, then we can archive it try: if Path(line).exists(): yield Link( url=line, timestamp=str(datetime.now(timezone.utc).timestamp()), title=None, tags=None, sources=[text_file.name], ) except (OSError, PermissionError): # nvm, not a valid path... pass # otherwise look for anything that looks like a URL in the line for url in find_all_urls(line): yield Link( url=htmldecode(url), timestamp=str(datetime.now(timezone.utc).timestamp()), title=None, tags=None, sources=[text_file.name], )
Parse Medium RSS feed files into links
def parse_medium_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Medium RSS feed files into links""" rss_file.seek(0) root = ElementTree.parse(rss_file).getroot() items = root.find("channel").findall("item") # type: ignore for item in items: url = item.find("link").text # type: ignore title = item.find("title").text.strip() # type: ignore ts_str = item.find("pubDate").text # type: ignore time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %Z") # type: ignore yield Link( url=htmldecode(url), timestamp=str(time.timestamp()), title=htmldecode(title) or None, tags=None, sources=[rss_file.name], )
Parse netscape-format bookmarks export files (produced by all browsers)
def parse_netscape_html_export(html_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse netscape-format bookmarks export files (produced by all browsers)""" html_file.seek(0) pattern = re.compile("<a href=\"(.+?)\" add_date=\"(\\d+)\"[^>]*>(.+)</a>", re.UNICODE | re.IGNORECASE) for line in html_file: # example line # <DT><A HREF="https://example.com/?q=1+2" ADD_DATE="1497562974" LAST_MODIFIED="1497562974" ICON_URI="https://example.com/favicon.ico" ICON="data:image/png;base64,...">example bookmark title</A> match = pattern.search(line) if match: url = match.group(1) time = datetime.fromtimestamp(float(match.group(2))) title = match.group(3).strip() yield Link( url=htmldecode(url), timestamp=str(time.timestamp()), title=htmldecode(title) or None, tags=None, sources=[html_file.name], )
Parse Pinboard RSS feed files into links
def parse_pinboard_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Pinboard RSS feed files into links""" rss_file.seek(0) feed = feedparser(rss_file.read()) for item in feed.entries: url = item.link # title will start with "[priv] " if pin was marked private. useful? title = item.title time = mktime(item.updated_parsed) # all tags are in one entry.tags with spaces in it. annoying! try: tags = item.tags[0].term.replace(' ', ',') except AttributeError: tags = '' if url is None: # Yielding a Link with no URL will # crash on a URL validation assertion continue yield Link( url=htmldecode(url), timestamp=str(time), title=htmldecode(title) or None, tags=htmldecode(tags) or None, sources=[rss_file.name], )
Parse bookmarks from the Pocket API
def parse_pocket_api_export(input_buffer: IO[str], **_kwargs) -> Iterable[Link]: """Parse bookmarks from the Pocket API""" input_buffer.seek(0) pattern = re.compile(r"^pocket:\/\/(\w+)") for line in input_buffer: if should_parse_as_pocket_api(line): username = pattern.search(line).group(1) api = Pocket(POCKET_CONSUMER_KEY, POCKET_ACCESS_TOKENS[username]) api.last_since = None for article in get_pocket_articles(api, since=read_since(username)): yield link_from_article(article, sources=[line]) write_since(username, api.last_since)
Parse Pocket-format bookmarks export files (produced by getpocket.com/export/)
def parse_pocket_html_export(html_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Pocket-format bookmarks export files (produced by getpocket.com/export/)""" html_file.seek(0) pattern = re.compile("^\\s*<li><a href=\"(.+)\" time_added=\"(\\d+)\" tags=\"(.*)\">(.+)</a></li>", re.UNICODE) for line in html_file: # example line # <li><a href="http://example.com/ time_added="1478739709" tags="tag1,tag2">example title</a></li> match = pattern.search(line) if match: url = match.group(1).replace('http://www.readability.com/read?url=', '') # remove old readability prefixes to get original url time = datetime.fromtimestamp(float(match.group(2))) tags = match.group(3) title = match.group(4).replace(' — Readability', '').replace('http://www.readability.com/read?url=', '') yield Link( url=htmldecode(url), timestamp=str(time.timestamp()), title=htmldecode(title) or None, tags=tags or '', sources=[html_file.name], )
Parse bookmarks from the Readwise Reader API
def parse_readwise_reader_api_export(input_buffer: IO[str], **_kwargs) -> Iterable[Link]: """Parse bookmarks from the Readwise Reader API""" input_buffer.seek(0) pattern = re.compile(r"^readwise-reader:\/\/(\w+)") for line in input_buffer: if should_parse_as_readwise_reader_api(line): username = pattern.search(line).group(1) api = ReadwiseReaderAPI(READWISE_READER_TOKENS[username], cursor=read_cursor(username)) for article in get_readwise_reader_articles(api): yield link_from_article(article, sources=[line]) if api.cursor: write_cursor(username, api.cursor)
Parse Shaarli-specific RSS XML-format files into links
def parse_shaarli_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Shaarli-specific RSS XML-format files into links""" rss_file.seek(0) entries = rss_file.read().split('<entry>')[1:] for entry in entries: # example entry: # <entry> # <title>Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online</title> # <link href="https://www.heise.de/security/meldung/Aktuelle-Trojaner-Welle-Emotet-lauert-in-gefaelschten-Rechnungsmails-4291268.html" /> # <id>https://demo.shaarli.org/?cEV4vw</id> # <published>2019-01-30T06:06:01+00:00</published> # <updated>2019-01-30T06:06:01+00:00</updated> # <content type="html" xml:lang="en"><![CDATA[<div class="markdown"><p>&#8212; <a href="https://demo.shaarli.org/?cEV4vw">Permalink</a></p></div>]]></content> # </entry> trailing_removed = entry.split('</entry>', 1)[0] leading_removed = trailing_removed.strip() rows = leading_removed.split('\n') def get_row(key): return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0] title = str_between(get_row('title'), '<title>', '</title>').strip() url = str_between(get_row('link'), '<link href="', '" />') ts_str = str_between(get_row('published'), '<published>', '</published>') time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z") yield Link( url=htmldecode(url), timestamp=str(time.timestamp()), title=htmldecode(title) or None, tags=None, sources=[rss_file.name], )
Parse raw URLs from each line in a text file
def parse_url_list(text_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse raw URLs from each line in a text file""" text_file.seek(0) for line in text_file.readlines(): url = line.strip() if (not url) or not re.findall(URL_REGEX, url): continue yield Link( url=url, timestamp=str(datetime.now(timezone.utc).timestamp()), title=None, tags=None, sources=[text_file.name], )
Parse Wallabag Atom files into links
def parse_wallabag_atom_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]: """Parse Wallabag Atom files into links""" rss_file.seek(0) entries = rss_file.read().split('<entry>')[1:] for entry in entries: # example entry: # <entry> # <title><![CDATA[Orient Ray vs Mako: Is There Much Difference? - iknowwatches.com]]></title> # <link rel="alternate" type="text/html" # href="http://wallabag.drycat.fr/view/14041"/> # <link rel="via">https://iknowwatches.com/orient-ray-vs-mako/</link> # <id>wallabag:wallabag.drycat.fr:milosh:entry:14041</id> # <updated>2020-10-18T09:14:02+02:00</updated> # <published>2020-10-18T09:13:56+02:00</published> # <category term="montres" label="montres" /> # <content type="html" xml:lang="en"> # </entry> trailing_removed = entry.split('</entry>', 1)[0] leading_removed = trailing_removed.strip() splits_fixed = leading_removed.replace('"\n href="', '" href="') rows = splits_fixed.split('\n') def get_row(prefix): return [ row.strip() for row in rows if row.strip().startswith('<{}'.format(prefix)) ][0] title = str_between(get_row('title'), '<title><![CDATA[', ']]></title>').strip() url_inside_link = str_between(get_row('link rel="via"'), '<link rel="via">', '</link>') url_inside_attr = str_between(get_row('link rel="via"'), 'href="', '"/>') ts_str = str_between(get_row('published'), '<published>', '</published>') time = datetime.strptime(ts_str, "%Y-%m-%dT%H:%M:%S%z") try: tags = str_between(get_row('category'), 'label="', '" />') except Exception: tags = None yield Link( url=htmldecode(url_inside_attr or url_inside_link), timestamp=str(time.timestamp()), title=htmldecode(title) or None, tags=tags or '', sources=[rss_file.name], )
parse a list of URLS without touching the filesystem
def parse_links_memory(urls: List[str], root_url: Optional[str]=None): """ parse a list of URLS without touching the filesystem """ timer = TimedProgress(TIMEOUT * 4) #urls = list(map(lambda x: x + "\n", urls)) file = StringIO() file.writelines(urls) file.name = "io_string" links, parser = run_parser_functions(file, timer, root_url=root_url) timer.end() if parser is None: return [], 'Failed to parse' return links, parser
parse a list of URLs with their metadata from an RSS feed, bookmarks export, or text file
def parse_links(source_file: str, root_url: Optional[str]=None, parser: str="auto") -> Tuple[List[Link], str]: """parse a list of URLs with their metadata from an RSS feed, bookmarks export, or text file """ timer = TimedProgress(TIMEOUT * 4) with open(source_file, 'r', encoding='utf-8') as file: links, parser = run_parser_functions(file, timer, root_url=root_url, parser=parser) timer.end() if parser is None: return [], 'Failed to parse' return links, parser
download a given url's content into output/sources/domain-<timestamp>.txt
def save_file_as_source(path: str, timeout: int=TIMEOUT, filename: str='{ts}-{basename}.txt', out_dir: Path=OUTPUT_DIR) -> str: """download a given url's content into output/sources/domain-<timestamp>.txt""" ts = str(datetime.now(timezone.utc).timestamp()).split('.', 1)[0] source_path = str(OUTPUT_DIR / SOURCES_DIR_NAME / filename.format(basename=basename(path), ts=ts)) if any(path.startswith(s) for s in ('http://', 'https://', 'ftp://')): # Source is a URL that needs to be downloaded print(f' > Downloading {path} contents') timer = TimedProgress(timeout, prefix=' ') try: raw_source_text = download_url(path, timeout=timeout) raw_source_text = htmldecode(raw_source_text) timer.end() except Exception as e: timer.end() print('{}[!] Failed to download {}{}\n'.format( ANSI['red'], path, ANSI['reset'], )) print(' ', e) raise e else: # Source is a path to a local file on the filesystem with open(path, 'r') as f: raw_source_text = f.read() atomic_write(source_path, raw_source_text) log_source_saved(source_file=source_path) return source_path
Takes the passed method out of the default methods list and returns that value
def test_ignore_methods(): """ Takes the passed method out of the default methods list and returns that value """ ignored = ignore_methods(['title']) assert "title" not in ignored
https://github.com/ArchiveBox/ArchiveBox/issues/330 Unencoded content should not be rendered as it facilitates xss injections and breaks the layout.
def test_title_is_htmlencoded_in_index_html(tmp_path, process, disable_extractors_dict): """ https://github.com/ArchiveBox/ArchiveBox/issues/330 Unencoded content should not be rendered as it facilitates xss injections and breaks the layout. """ subprocess.run(['archivebox', 'add', 'http://127.0.0.1:8080/static/title_with_html.com.html'], capture_output=True, env=disable_extractors_dict) list_process = subprocess.run(["archivebox", "list", "--html"], capture_output=True) assert "<textarea>" not in list_process.stdout.decode("utf-8")
expand image dimension to add a channel dimension
def expand_channel_dim(img): """ expand image dimension to add a channel dimension """ return np.expand_dims(img, 0)
transpose the image from height, width, channel -> channel, height, width (pytorch format)
def image_hwc_to_chw(img): """ transpose the image from height, width, channel -> channel, height, width (pytorch format) """ return img.transpose((2, 0, 1))
revert image_hwc_to_chw function
def image_chw_to_hwc(img): """ revert image_hwc_to_chw function """ return img.transpose((1, 2, 0))
Args: loader: TenFpsDataLoader Returns: world_pc: (N, 3) xyz in world coordinate system world_sem: (N, d) semantic for each point grid_size: float keep only one point in each (g_size, g_size, g_size) grid
def accumulate_wrapper(loader, grid_size=0.05): """ Args: loader: TenFpsDataLoader Returns: world_pc: (N, 3) xyz in world coordinate system world_sem: (N, d) semantic for each point grid_size: float keep only one point in each (g_size, g_size, g_size) grid """ world_pc, world_rgb, poses = np.zeros((0, 3)), np.zeros((0, 3)), [] for i in range(len(loader)): frame = loader[i] print(f"{i}/{len(loader)}", frame["image_path"]) image_path = frame["image_path"] pcd = frame["pcd"] # in world coordinate pose = frame["pose"] rgb = frame["color"] world_pc = np.concatenate((world_pc, pcd), axis=0) world_rgb = np.concatenate((world_rgb, rgb), axis=0) choices = pc_utils.down_sample(world_pc, 0.05) world_pc = world_pc[choices] world_rgb = world_rgb[choices] return world_pc, world_rgb, poses
Args: points: (N, 3) boxes: (m, 8, 3) Returns: votes: (N, 4)
def get_votes(points, gt_boxes): """ Args: points: (N, 3) boxes: (m, 8, 3) Returns: votes: (N, 4) """ n_point = points.shape[0] point_votes = np.zeros((n_point, 4)).astype(np.float32) for obj_id in range(gt_boxes.shape[0]): tmp_box3d = np.expand_dims(gt_boxes[obj_id], 0) # (8, 3) # (n_point, 1) mask_pts = points_in_boxes(points[:, :3], tmp_box3d) mask_pts = mask_pts.reshape((-1,)) point_votes[mask_pts, 0] = 1.0 obj_center = np.mean(tmp_box3d, axis=1) # (1, 3) # get votes pc_roi = points[mask_pts, :3] tmp_votes = obj_center - pc_roi point_votes[mask_pts, 1:4] = tmp_votes return point_votes
Args: pose: np.array (4, 4) Returns: index: int (0, 1, 2, 3) for upright, left, upside-down and right
def decide_pose(pose): """ Args: pose: np.array (4, 4) Returns: index: int (0, 1, 2, 3) for upright, left, upside-down and right """ # pose style z_vec = pose[2, :3] z_orien = np.array( [ [0.0, -1.0, 0.0], # upright [-1.0, 0.0, 0.0], # left [0.0, 1.0, 0.0], # upside-down [1.0, 0.0, 0.0], ] # right ) corr = np.matmul(z_orien, z_vec) corr_max = np.argmax(corr) return corr_max
Args: im: (m, n)
def rotate_pose(im, rot_index): """ Args: im: (m, n) """ h, w, d = im.shape if d == 3: if rot_index == 0: new_im = im elif rot_index == 1: new_im = cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) elif rot_index == 2: new_im = cv2.rotate(im, cv2.ROTATE_180) elif rot_index == 3: new_im = cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) return new_im
Args: box: 8x3 Returns: size: [dx, dy, dz]
def get_size(box): """ Args: box: 8x3 Returns: size: [dx, dy, dz] """ distance = scipy.spatial.distance.cdist(box[0:1, :], box[1:5, :]) l = distance[0, 2] w = distance[0, 0] h = distance[0, 3] return [l, w, h]
Args: box: (8, 3) Returns: heading_angle: float
def get_heading_angle(box): """ Args: box: (8, 3) Returns: heading_angle: float """ a = box[0, 0] - box[1, 0] b = box[0, 1] - box[1, 1] heading_angle = np.arctan2(a, b) return heading_angle
Compute corners of a single box from rotation matrix Args: size: list of float [dx, dy, dz] center: np.array [x, y, z] rotmat: np.array (3, 3) Returns: corners: (8, 3)
def compute_box_3d(size, center, rotmat): """Compute corners of a single box from rotation matrix Args: size: list of float [dx, dy, dz] center: np.array [x, y, z] rotmat: np.array (3, 3) Returns: corners: (8, 3) """ l, h, w = [i / 2 for i in size] center = np.reshape(center, (-1, 3)) center = center.reshape(3) x_corners = [l, l, -l, -l, l, l, -l, -l] y_corners = [h, -h, -h, h, h, -h, -h, h] z_corners = [w, w, w, w, -w, -w, -w, -w] corners_3d = np.dot( np.transpose(rotmat), np.vstack([x_corners, y_corners, z_corners]) ) corners_3d[0, :] += center[0] corners_3d[1, :] += center[1] corners_3d[2, :] += center[2] return np.transpose(corners_3d)
7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: corners: (N, 8, 3), vertex order shown in figure above Returns: boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading] with (x, y, z) is the box center (dx, dy, dz) as the box size and heading as the clockwise rotation angle
def corners_to_boxes(corners3d): """ 7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: corners: (N, 8, 3), vertex order shown in figure above Returns: boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading] with (x, y, z) is the box center (dx, dy, dz) as the box size and heading as the clockwise rotation angle """ boxes3d = np.zeros((corners3d.shape[0], 7)) for i in range(corners3d.shape[0]): boxes3d[i, :3] = np.mean(corners3d[i, :, :], axis=0) boxes3d[i, 3:6] = get_size(corners3d[i, :, :]) boxes3d[i, 6] = get_heading_angle(corners3d[i, :, :]) return boxes3d
7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center Returns: corners: (N, 8, 3)
def boxes_to_corners_3d(boxes3d): """ 7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center Returns: corners: (N, 8, 3) """ template = np.array([[1, 1, -1], [1, -1, -1], [-1, -1, -1], [-1, 1, -1], [1, 1, 1], [1, -1, 1], [-1, -1, 1], [-1, 1, 1]] ) / 2. # corners3d: of shape (N, 3, 8) corners3d = np.tile(boxes3d[:, None, 3:6], (1, 8, 1)) * template[None, :, :] corners3d = rotate_points_along_z(corners3d.reshape(-1, 8, 3), boxes3d[:, 6]).reshape( -1, 8, 3 ) corners3d += boxes3d[:, None, 0:3] return corners3d
Args: pc: np.array (n, 3+d) boxes: np.array (m, 8, 3) Returns: mask: np.array (n, m) of type bool
def points_in_boxes(points, boxes): """ Args: pc: np.array (n, 3+d) boxes: np.array (m, 8, 3) Returns: mask: np.array (n, m) of type bool """ if len(boxes) == 0: return np.zeros([points.shape[0], 1], dtype=np.bool) points = points[:, :3] # get xyz # u = p6 - p5 u = boxes[:, 6, :] - boxes[:, 5, :] # (m, 3) # v = p6 - p7 v = boxes[:, 6, :] - boxes[:, 7, :] # (m, 3) # w = p6 - p2 w = boxes[:, 6, :] - boxes[:, 2, :] # (m, 3) # ux, vx, wx ux = np.matmul(points, u.T) # (n, m) vx = np.matmul(points, v.T) wx = np.matmul(points, w.T) # up6, up5, vp6, vp7, wp6, wp2 up6 = np.sum(u * boxes[:, 6, :], axis=1) up5 = np.sum(u * boxes[:, 5, :], axis=1) vp6 = np.sum(v * boxes[:, 6, :], axis=1) vp7 = np.sum(v * boxes[:, 7, :], axis=1) wp6 = np.sum(w * boxes[:, 6, :], axis=1) wp2 = np.sum(w * boxes[:, 2, :], axis=1) mask_u = np.logical_and(ux <= up6, ux >= up5) # (1024, n) mask_v = np.logical_and(vx <= vp6, vx >= vp7) mask_w = np.logical_and(wx <= wp6, wx >= wp2) mask = mask_u & mask_v & mask_w # (10240, n) return mask
Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
def poly_area(x,y): """ Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """ return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
Clip a polygon with another polygon. Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python Args: subjectPolygon: a list of (x,y) 2d points, any polygon. clipPolygon: a list of (x,y) 2d points, has to be *convex* Note: **points have to be counter-clockwise ordered** Return: a list of (x,y) vertex point for the intersection polygon.
def polygon_clip(subjectPolygon, clipPolygon): """ Clip a polygon with another polygon. Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python Args: subjectPolygon: a list of (x,y) 2d points, any polygon. clipPolygon: a list of (x,y) 2d points, has to be *convex* Note: **points have to be counter-clockwise ordered** Return: a list of (x,y) vertex point for the intersection polygon. """ def inside(p): return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0]) def computeIntersection(): dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]] dp = [s[0] - e[0], s[1] - e[1]] n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] n2 = s[0] * e[1] - s[1] * e[0] n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3] outputList = subjectPolygon cp1 = clipPolygon[-1] for clipVertex in clipPolygon: cp2 = clipVertex inputList = outputList outputList = [] s = inputList[-1] for subjectVertex in inputList: e = subjectVertex if inside(e): if not inside(s): outputList.append(computeIntersection()) outputList.append(e) elif inside(s): outputList.append(computeIntersection()) s = e cp1 = cp2 if len(outputList) == 0: return None return (outputList)
Compute area of two convex hull's intersection area. p1,p2 are a list of (x,y) tuples of hull vertices. return a list of (x,y) for the intersection and its volume
def convex_hull_intersection(p1, p2): """ Compute area of two convex hull's intersection area. p1,p2 are a list of (x,y) tuples of hull vertices. return a list of (x,y) for the intersection and its volume """ inter_p = polygon_clip(p1,p2) if inter_p is not None: hull_inter = scipy.spatial.ConvexHull(inter_p) return inter_p, hull_inter.volume else: return None, 0.0
corners: (8,3) no assumption on axis direction
def box3d_vol(corners): ''' corners: (8,3) no assumption on axis direction ''' a = np.sqrt(np.sum((corners[0,:] - corners[1,:])**2)) b = np.sqrt(np.sum((corners[1,:] - corners[2,:])**2)) c = np.sqrt(np.sum((corners[0,:] - corners[4,:])**2)) return a*b*c
Compute 3D bounding box IoU. Input: corners1: numpy array (8,3), assume up direction is negative Y corners2: numpy array (8,3), assume up direction is negative Y Output: iou: 3D bounding box IoU iou_2d: bird's eye view 2D bounding box IoU
def box3d_iou(corners1, corners2): ''' Compute 3D bounding box IoU. Input: corners1: numpy array (8,3), assume up direction is negative Y corners2: numpy array (8,3), assume up direction is negative Y Output: iou: 3D bounding box IoU iou_2d: bird's eye view 2D bounding box IoU ''' # corner points are in counter clockwise order rect1 = [(corners1[i,0], corners1[i,1]) for i in range(3,-1,-1)] rect2 = [(corners2[i,0], corners2[i,1]) for i in range(3,-1,-1)] area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1]) area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1]) inter, inter_area = convex_hull_intersection(rect1, rect2) iou_2d = inter_area/(area1+area2-inter_area) ymax = min(corners1[:,2].max(), corners2[:,2].max()) ymin = max(corners1[:,2].min(), corners2[:,2].min()) inter_vol = inter_area * max(0.0, ymax-ymin) vol1 = box3d_vol(corners1) vol2 = box3d_vol(corners2) iou = inter_vol / (vol1 + vol2 - inter_vol) return iou
Generic functions to compute precision/recall for object detection for a single class. Args: pred: map of {img_id: [(bbox, score)]} where bbox is numpy array gt: map of {img_id: [bbox]} ovthresh: scalar, iou threshold use_07_metric: bool, if True use VOC07 11 point method get_iou_func: function handle for get_iou_func(box1, box2) classname: int Returns: rec: numpy array of length nd prec: numpy array of length nd ap: scalar, average precision confidence: numpy array, used to find precision and recall in offline processing given specific conf_threshold
def eval_det_cls( pred, gt, ovthresh=0.25, use_07_metric=False, get_iou_func=box3d_iou, classname="", ): """Generic functions to compute precision/recall for object detection for a single class. Args: pred: map of {img_id: [(bbox, score)]} where bbox is numpy array gt: map of {img_id: [bbox]} ovthresh: scalar, iou threshold use_07_metric: bool, if True use VOC07 11 point method get_iou_func: function handle for get_iou_func(box1, box2) classname: int Returns: rec: numpy array of length nd prec: numpy array of length nd ap: scalar, average precision confidence: numpy array, used to find precision and recall in offline processing given specific conf_threshold """ tt = time.time() msg = "compute pr for single class of {}".format(classname) print(msg) class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}} npos = 0 for img_id in gt.keys(): bbox = np.array(gt[img_id]) det = [False] * len(bbox) npos += len( bbox ) # total number of gt boxes. This is max number of possible correct predictions class_recs[img_id] = {"bbox": bbox, "det": det} # pad empty list to all other imgids for img_id in pred.keys(): if img_id not in gt: class_recs[img_id] = {"bbox": np.array([]), "det": []} # construct dets image_ids = [] confidence = [] BB = [] for img_id in pred.keys(): for box, score in pred[img_id]: image_ids.append(img_id) confidence.append(score) BB.append(box) confidence = np.array(confidence) BB = np.array(BB) # (nd,4 or 8,3 or 6) # sort by confidence sorted_ind = np.argsort(-confidence) sorted_scores = np.sort(-confidence) BB = BB[sorted_ind, ...] image_ids = [image_ids[x] for x in sorted_ind] # go down dets and mark TPs and FPs # num of predicted box instances. say 100 boxes, and their img_ids (may contain a lot of duplicates, just appended) nd = len(image_ids) tp = np.zeros(nd) fp = np.zeros(nd) for d in range(nd): # global all img_ids R = class_recs[image_ids[d]] bb = BB[d, ...].astype(float) ovmax = -np.inf BBGT = R["bbox"].astype(float) if BBGT.size > 0: # compute overlaps for j in range(BBGT.shape[0]): iou = get_iou_func(bb, BBGT[j, ...]) if iou > ovmax: ovmax = iou jmax = j # check where 0.05 box confidence is used, for such low thr, fn is our hard examples to collect # if high threshold like 0.9, (fp is hard negative) we would be collecting hard FPs, this is because when we # set a high conf_threshold of 0.9, only those predicted boxes with very high confidence gets to be evaluated # so they are very likely to be TPs, in such scenario, if there is still FP, this means we encounter a hard FP # or hard negative example # confidence threshold changed from 0.05 to 0.9 to get false negatives # text file: 14 X 2 numbers of text files # for each category, we have a FP list, and a FN list # for each table list, we have full file path of 34578274_box_78.npy / npz, space, number of FPs, # for each table list, ... FNs if ovmax > ovthresh: if not R["det"][jmax]: tp[d] = 1.0 R["det"][jmax] = 1 else: # even though the IoU is more than IoU threshold, but there is already a box earlier with higher # confidence that marked this gt box as having a TP detection, as ft[d] set to 1 # img_id, no. of fp += 1 fp[d] = 1.0 # save_num_of_fp_per_img_id(classname, image_ids[d]) else: # no gt box has IoU more than threshold fp[d] = 1.0 # img_id, no. of fp += 1 # save_num_of_fp_per_img_id(classname, image_ids[d]) # compute precision recall tp_per_instance = tp.copy() fp = np.cumsum(fp) tp = np.cumsum(tp) rec = tp / float(npos + 1e-4) # avoid divide by zero in case the first detection matches a difficult # ground truth prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) ap = voc_ap(rec, prec, use_07_metric) msg = "----------------------- time for evaluating model: {} seconds".format( int(time.time() - tt) ) print(msg) return rec, prec, ap
ap = voc_ap(rec, prec, [use_07_metric]) Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11 point method (default:False).
def voc_ap(rec, prec, use_07_metric=False): """ap = voc_ap(rec, prec, [use_07_metric]) Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11 point method (default:False). """ if use_07_metric: # 11 point metric ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11.0 else: # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.0], rec, [1.0])) mpre = np.concatenate(([0.0], prec, [0.0])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
Quantize point cloud by voxel_size Returns kept indices Args: all_points: np.array (n, 3) float voxel_sz: float Returns: indices: (m, ) int
def down_sample(point_cloud, voxel_sz): """Quantize point cloud by voxel_size Returns kept indices Args: all_points: np.array (n, 3) float voxel_sz: float Returns: indices: (m, ) int """ coordinates = np.round(point_cloud / voxel_sz).astype(np.int32) _, indices = np.unique(coordinates, axis=0, return_index=True) return indices
Euler rotation matrix with clockwise logic. Rotation Args: theta: list of float [theta_x, theta_y, theta_z] Returns: R: np.array (3, 3) rotation matrix of Rz*Ry*Rx
def eulerAnglesToRotationMatrix(theta): """Euler rotation matrix with clockwise logic. Rotation Args: theta: list of float [theta_x, theta_y, theta_z] Returns: R: np.array (3, 3) rotation matrix of Rz*Ry*Rx """ R_x = np.array( [ [1, 0, 0], [0, math.cos(theta[0]), -math.sin(theta[0])], [0, math.sin(theta[0]), math.cos(theta[0])], ] ) R_y = np.array( [ [math.cos(theta[1]), 0, math.sin(theta[1])], [0, 1, 0], [-math.sin(theta[1]), 0, math.cos(theta[1])], ] ) R_z = np.array( [ [math.cos(theta[2]), -math.sin(theta[2]), 0], [math.sin(theta[2]), math.cos(theta[2]), 0], [0, 0, 1], ] ) R = np.dot(R_z, np.dot(R_y, R_x)) return R
Generate pose matrix with z-dim as height Args: pose: np.array (4, 4) Returns: urc: (4, 4) urc_inv: (4, 4)
def upright_camera_relative_transform(pose): """Generate pose matrix with z-dim as height Args: pose: np.array (4, 4) Returns: urc: (4, 4) urc_inv: (4, 4) """ # take viewing direction in camera local coordiantes (which is simply unit vector along +z) view_dir_camera = np.asarray([0, 0, 1]) R = pose[0:3, 0:3] t = pose[0:3, 3] # convert to world coordinates view_dir_world = np.dot(R, view_dir_camera) # compute heading view_dir_xy = view_dir_world[0:2] heading = math.atan2(view_dir_xy[1], view_dir_xy[0]) # compute rotation around Z to align heading with +Y zRot = -heading + math.pi / 2 # translation first, back to camera point urc_t = np.identity(4) urc_t[0:2, 3] = -1 * t[0:2] # compute rotation matrix urc_r = np.identity(4) urc_r[0:3, 0:3] = eulerAnglesToRotationMatrix([0, 0, zRot]) urc = np.dot(urc_r, urc_t) urc_inv = np.linalg.inv(urc) return urc, urc_inv
Rotation points w.r.t. rotmat Args: pc: np.array (n, 3) rotmat: np.array (4, 4) Returns: pc: (n, 3)
def rotate_pc(pc, rotmat): """Rotation points w.r.t. rotmat Args: pc: np.array (n, 3) rotmat: np.array (4, 4) Returns: pc: (n, 3) """ pc_4 = np.ones([pc.shape[0], 4]) pc_4[:, 0:3] = pc pc_4 = np.dot(pc_4, np.transpose(rotmat)) return pc_4[:, 0:3]
Rotation clockwise Args: points: np.array of np.array (B, N, 3 + C) or (N, 3 + C) for single batch angle: np.array of np.array (B, ) or (, ) for single batch angle along z-axis, angle increases x ==> y Returns: points_rot: (B, N, 3 + C) or (N, 3 + C)
def rotate_points_along_z(points, angle): """Rotation clockwise Args: points: np.array of np.array (B, N, 3 + C) or (N, 3 + C) for single batch angle: np.array of np.array (B, ) or (, ) for single batch angle along z-axis, angle increases x ==> y Returns: points_rot: (B, N, 3 + C) or (N, 3 + C) """ single_batch = len(points.shape) == 2 if single_batch: points = np.expand_dims(points, axis=0) angle = np.expand_dims(angle, axis=0) cosa = np.expand_dims(np.cos(angle), axis=1) sina = np.expand_dims(np.sin(angle), axis=1) zeros = np.zeros_like(cosa) # angle.new_zeros(points.shape[0]) ones = np.ones_like(sina) # angle.new_ones(points.shape[0]) rot_matrix = ( np.concatenate((cosa, -sina, zeros, sina, cosa, zeros, zeros, zeros, ones), axis=1) .reshape(-1, 3, 3) ) # print(rot_matrix.view(3, 3)) points_rot = np.matmul(points[:, :, :3], rot_matrix) points_rot = np.concatenate((points_rot, points[:, :, 3:]), axis=-1) if single_batch: points_rot = points_rot.squeeze(0) return points_rot
Return a Matrix3 for the angle axis. Arguments: angle_axis {Point3} -- a rotation in angle axis form.
def convert_angle_axis_to_matrix3(angle_axis): """Return a Matrix3 for the angle axis. Arguments: angle_axis {Point3} -- a rotation in angle axis form. """ matrix, jacobian = cv2.Rodrigues(angle_axis) return matrix
convert traj_str into translation and rotation matrices Args: traj_str: A space-delimited file where each line represents a camera position at a particular timestamp. The file has seven columns: * Column 1: timestamp * Columns 2-4: rotation (axis-angle representation in radians) * Columns 5-7: translation (usually in meters) Returns: ts: translation matrix Rt: rotation matrix
def TrajStringToMatrix(traj_str): """ convert traj_str into translation and rotation matrices Args: traj_str: A space-delimited file where each line represents a camera position at a particular timestamp. The file has seven columns: * Column 1: timestamp * Columns 2-4: rotation (axis-angle representation in radians) * Columns 5-7: translation (usually in meters) Returns: ts: translation matrix Rt: rotation matrix """ # line=[float(x) for x in traj_str.split()] # ts = line[0]; # R = cv2.Rodrigues(np.array(line[1:4]))[0]; # t = np.array(line[4:7]); # Rt = np.concatenate((np.concatenate((R, t[:,np.newaxis]), axis=1), [[0.0,0.0,0.0,1.0]]), axis=0) tokens = traj_str.split() assert len(tokens) == 7 ts = tokens[0] # Rotation in angle axis angle_axis = [float(tokens[1]), float(tokens[2]), float(tokens[3])] r_w_to_p = convert_angle_axis_to_matrix3(np.asarray(angle_axis)) # Translation t_w_to_p = np.asarray([float(tokens[4]), float(tokens[5]), float(tokens[6])]) extrinsics = np.eye(4, 4) extrinsics[:3, :3] = r_w_to_p extrinsics[:3, -1] = t_w_to_p Rt = np.linalg.inv(extrinsics) return (ts, Rt)
Generate 3D point coordinates and related rgb feature Args: rgb_image: (h, w, 3) rgb depth_image: (h, w) depth intrinsic: (3, 3) subsample: int resize stride world_coordinate: bool pose: (4, 4) matrix transfer from camera to world coordindate Returns: points: (N, 3) point cloud coordinates in world-coordinates if world_coordinate==True else in camera coordinates rgb_feat: (N, 3) rgb feature of each point
def generate_point( rgb_image, depth_image, intrinsic, subsample=1, world_coordinate=True, pose=None, ): """Generate 3D point coordinates and related rgb feature Args: rgb_image: (h, w, 3) rgb depth_image: (h, w) depth intrinsic: (3, 3) subsample: int resize stride world_coordinate: bool pose: (4, 4) matrix transfer from camera to world coordindate Returns: points: (N, 3) point cloud coordinates in world-coordinates if world_coordinate==True else in camera coordinates rgb_feat: (N, 3) rgb feature of each point """ intrinsic_4x4 = np.identity(4) intrinsic_4x4[:3, :3] = intrinsic u, v = np.meshgrid( range(0, depth_image.shape[1], subsample), range(0, depth_image.shape[0], subsample), ) d = depth_image[v, u] d_filter = d != 0 mat = np.vstack( ( u[d_filter] * d[d_filter], v[d_filter] * d[d_filter], d[d_filter], np.ones_like(u[d_filter]), ) ) new_points_3d = np.dot(np.linalg.inv(intrinsic_4x4), mat)[:3] if world_coordinate: new_points_3d_padding = np.vstack( (new_points_3d, np.ones((1, new_points_3d.shape[1]))) ) world_coord_padding = np.dot(pose, new_points_3d_padding) new_points_3d = world_coord_padding[:3] rgb_feat = rgb_image[v, u][d_filter] return new_points_3d.T, rgb_feat
extract original label data Args: gt_fn: str (file name of "annotation.json") after loading, we got a dict with keys 'data', 'stats', 'comment', 'confirm', 'skipped' ['data']: a list of dict for bboxes, each dict has keys: 'uid', 'label', 'modelId', 'children', 'objectId', 'segments', 'hierarchy', 'isInGroup', 'labelType', 'attributes' 'label': str 'segments': dict for boxes 'centroid': list of float (x, y, z)? 'axesLengths': list of float (x, y, z)? 'normalizedAxes': list of float len()=9 'uid' 'comments': 'stats': ... Returns: skipped: bool skipped or not boxes_corners: (n, 8, 3) box corners **world-coordinate** centers: (n, 3) **world-coordinate** sizes: (n, 3) full-sizes (no halving!) labels: list of str uids: list of str
def extract_gt(gt_fn): """extract original label data Args: gt_fn: str (file name of "annotation.json") after loading, we got a dict with keys 'data', 'stats', 'comment', 'confirm', 'skipped' ['data']: a list of dict for bboxes, each dict has keys: 'uid', 'label', 'modelId', 'children', 'objectId', 'segments', 'hierarchy', 'isInGroup', 'labelType', 'attributes' 'label': str 'segments': dict for boxes 'centroid': list of float (x, y, z)? 'axesLengths': list of float (x, y, z)? 'normalizedAxes': list of float len()=9 'uid' 'comments': 'stats': ... Returns: skipped: bool skipped or not boxes_corners: (n, 8, 3) box corners **world-coordinate** centers: (n, 3) **world-coordinate** sizes: (n, 3) full-sizes (no halving!) labels: list of str uids: list of str """ gt = json.load(open(gt_fn, "r")) skipped = gt['skipped'] if len(gt) == 0: boxes_corners = np.zeros((0, 8, 3)) centers = np.zeros((0, 3)) sizes = np.zeros((0, 3)) labels, uids = [], [] return skipped, boxes_corners, centers, sizes, labels, uids boxes_corners = [] centers = [] sizes = [] labels = [] uids = [] for data in gt['data']: l = data["label"] for delimiter in [" ", "-", "/"]: l = l.replace(delimiter, "_") if l not in class_names: print("unknown category: %s" % l) continue rotmat = np.array(data["segments"]["obbAligned"]["normalizedAxes"]).reshape( 3, 3 ) center = np.array(data["segments"]["obbAligned"]["centroid"]).reshape(-1, 3) size = np.array(data["segments"]["obbAligned"]["axesLengths"]).reshape(-1, 3) box3d = compute_box_3d(size.reshape(3).tolist(), center, rotmat) ''' Box corner order that we return is of the format below: 6 -------- 7 /| /| 5 -------- 4 . | | | | . 2 -------- 3 |/ |/ 1 -------- 0 ''' boxes_corners.append(box3d.reshape(1, 8, 3)) size = np.array(get_size(box3d)).reshape(1, 3) center = np.mean(box3d, axis=0).reshape(1, 3) # boxes_corners.append(box3d.reshape(1, 8, 3)) centers.append(center) sizes.append(size) # labels.append(l) labels.append(data["label"]) uids.append(data["uid"]) centers = np.concatenate(centers, axis=0) sizes = np.concatenate(sizes, axis=0) boxes_corners = np.concatenate(boxes_corners, axis=0) return skipped, boxes_corners, centers, sizes, labels, uids
Visualize result with open3d Args: pc: np.array of shape (n, 3) point cloud boxes: a list of m boxes, each item as a tuple: (cls, np.array (8, 3), conf) if predicted (cls, np.array (8, 3)) or just np.array (n, 8, 3) pc_color: np.array (n, 3) or None box_color: np.array (m, 3) or None visualize: bool (directly visualize or return an image) width: int used only when visualize=False height: int used only when visualize=False Returns:
def visualize_o3d( pc, boxes, pc_color=None, width=384, height=288, ): """ Visualize result with open3d Args: pc: np.array of shape (n, 3) point cloud boxes: a list of m boxes, each item as a tuple: (cls, np.array (8, 3), conf) if predicted (cls, np.array (8, 3)) or just np.array (n, 8, 3) pc_color: np.array (n, 3) or None box_color: np.array (m, 3) or None visualize: bool (directly visualize or return an image) width: int used only when visualize=False height: int used only when visualize=False Returns: """ assert pc.shape[1] == 3 ratio = max(1, pc.shape[0] // 4000) pc_sample = pc[::ratio, :] n = pc_sample.shape[0] m = len(boxes) pcd = o3d.geometry.PointCloud() pcd.points = o3d.utility.Vector3dVector(pc_sample) if pc_color is None: pc_color = np.zeros((n, 3)) pcd.colors = o3d.utility.Vector3dVector(pc_color) linesets = [] for i, item in enumerate(boxes): if isinstance(item, tuple): cls_ = item[0] assert isinstance(cls_, int) corners = item[1] else: cls_ = None corners = item assert corners.shape[0] == 8 assert corners.shape[1] == 3 if isinstance(cls_, int) and cls_ < len(COLOR_LIST): tmp_color = COLOR_LIST[cls_] else: tmp_color = (0, 0, 0) linesets.append(get_lines(corners, color=tmp_color)) o3d.visualization.draw_geometries([pcd] + linesets) return None
Args: box: np.array (8, 3) 8 corners color: line color Returns: o3d.Linset()
def get_lines(box, color=np.array([1.0, 0.0, 0.0])): """ Args: box: np.array (8, 3) 8 corners color: line color Returns: o3d.Linset() """ points = box lines = [ [0, 1], [0, 3], [1, 2], [2, 3], [4, 5], [4, 7], [5, 6], [6, 7], [0, 4], [1, 5], [2, 6], [3, 7], ] colors = [color for i in range(len(lines))] line_set = o3d.geometry.LineSet() line_set.points = o3d.utility.Vector3dVector(points) line_set.lines = o3d.utility.Vector2iVector(lines) line_set.colors = o3d.utility.Vector3dVector(colors) return line_set
Fixture that reduces thread switching interval. This makes it easier to provoke race conditions.
def fast_thread_switching(): """Fixture that reduces thread switching interval. This makes it easier to provoke race conditions. """ old = sys.getswitchinterval() sys.setswitchinterval(1e-6) yield sys.setswitchinterval(old)
Output a warning to IPython users in case any tests failed.
def pytest_terminal_summary(terminalreporter): """Output a warning to IPython users in case any tests failed.""" try: get_ipython() except NameError: return if not terminalreporter.stats.get("failed"): # Only issue the warning when there are actually failures return terminalreporter.ensure_newline() terminalreporter.write_line( "Some tests may fail when run from the IPython prompt; " "especially, but not limited to tests involving logging and warning " "handling. Unless you are certain as to the cause of the failure, " "please check that the failure occurs outside IPython as well. See " "https://docs.astropy.org/en/stable/known_issues.html#failing-logging-" "tests-when-running-the-tests-in-ipython for more information.", yellow=True, bold=True, )
Initializes the Astropy log--in most circumstances this is called automatically when importing astropy.
def _init_log(): """Initializes the Astropy log--in most circumstances this is called automatically when importing astropy. """ global log orig_logger_cls = logging.getLoggerClass() logging.setLoggerClass(AstropyLogger) try: log = logging.getLogger("astropy") log._set_defaults() finally: logging.setLoggerClass(orig_logger_cls) return log
Shut down exception and warning logging (if enabled) and clear all Astropy loggers from the logging module's cache. This involves poking some logging module internals, so much if it is 'at your own risk' and is allowed to pass silently if any exceptions occur.
def _teardown_log(): """Shut down exception and warning logging (if enabled) and clear all Astropy loggers from the logging module's cache. This involves poking some logging module internals, so much if it is 'at your own risk' and is allowed to pass silently if any exceptions occur. """ global log if log.exception_logging_enabled(): log.disable_exception_logging() if log.warnings_logging_enabled(): log.disable_warnings_logging() del log # Now for the fun stuff... try: logging._acquireLock() try: loggerDict = logging.Logger.manager.loggerDict for key in loggerDict.keys(): if key == "astropy" or key.startswith("astropy."): del loggerDict[key] finally: logging._releaseLock() except Exception: pass
Search the online Astropy documentation for the given query. Opens the results in the default web browser. Requires an active Internet connection. Parameters ---------- query : str The search query.
def online_help(query): """ Search the online Astropy documentation for the given query. Opens the results in the default web browser. Requires an active Internet connection. Parameters ---------- query : str The search query. """ import webbrowser from urllib.parse import urlencode url = online_docs_root + f"search.html?{urlencode({'q': query})}" webbrowser.open(url)
Get the filename of the config file associated with the given package or module.
def get_config_filename(packageormod=None, rootname=None): """ Get the filename of the config file associated with the given package or module. """ cfg = get_config(packageormod, rootname=rootname) while cfg.parent is not cfg: cfg = cfg.parent return cfg.filename
Gets the configuration object or section associated with a particular package or module. Parameters ---------- packageormod : str or None The package for which to retrieve the configuration object. If a string, it must be a valid package name, or if ``None``, the package from which this function is called will be used. reload : bool, optional Reload the file, even if we have it cached. rootname : str or None Name of the root configuration directory. If ``None`` and ``packageormod`` is ``None``, this defaults to be the name of the package from which this function is called. If ``None`` and ``packageormod`` is not ``None``, this defaults to ``astropy``. Returns ------- cfgobj : ``configobj.ConfigObj`` or ``configobj.Section`` If the requested package is a base package, this will be the ``configobj.ConfigObj`` for that package, or if it is a subpackage or module, it will return the relevant ``configobj.Section`` object. Raises ------ RuntimeError If ``packageormod`` is `None`, but the package this item is created from cannot be determined.
def get_config(packageormod=None, reload=False, rootname=None): """Gets the configuration object or section associated with a particular package or module. Parameters ---------- packageormod : str or None The package for which to retrieve the configuration object. If a string, it must be a valid package name, or if ``None``, the package from which this function is called will be used. reload : bool, optional Reload the file, even if we have it cached. rootname : str or None Name of the root configuration directory. If ``None`` and ``packageormod`` is ``None``, this defaults to be the name of the package from which this function is called. If ``None`` and ``packageormod`` is not ``None``, this defaults to ``astropy``. Returns ------- cfgobj : ``configobj.ConfigObj`` or ``configobj.Section`` If the requested package is a base package, this will be the ``configobj.ConfigObj`` for that package, or if it is a subpackage or module, it will return the relevant ``configobj.Section`` object. Raises ------ RuntimeError If ``packageormod`` is `None`, but the package this item is created from cannot be determined. """ if packageormod is None: packageormod = find_current_module(2) if packageormod is None: msg1 = "Cannot automatically determine get_config module, " msg2 = "because it is not called from inside a valid module" raise RuntimeError(msg1 + msg2) else: packageormod = packageormod.__name__ _autopkg = True else: _autopkg = False packageormodspl = packageormod.split(".") pkgname = packageormodspl[0] secname = ".".join(packageormodspl[1:]) if rootname is None: if _autopkg: rootname = pkgname else: rootname = "astropy" # so we don't break affiliated packages cobj = _cfgobjs.get(pkgname) if cobj is None or reload: cfgfn = None try: # This feature is intended only for use by the unit tests if _override_config_file is not None: cfgfn = _override_config_file else: cfgfn = path.join(get_config_dir(rootname=rootname), pkgname + ".cfg") cobj = configobj.ConfigObj(cfgfn, interpolation=False) except OSError: # This can happen when HOME is not set cobj = configobj.ConfigObj(interpolation=False) # This caches the object, so if the file becomes accessible, this # function won't see it unless the module is reloaded _cfgobjs[pkgname] = cobj if secname: # not the root package if secname not in cobj: cobj[secname] = {} return cobj[secname] else: return cobj
Generates a configuration file, from the list of `ConfigItem` objects for each subpackage. .. versionadded:: 4.1 Parameters ---------- pkgname : str or None The package for which to retrieve the configuration object. filename : str or file-like or None If None, the default configuration path is taken from `get_config`.
def generate_config(pkgname="astropy", filename=None, verbose=False): """Generates a configuration file, from the list of `ConfigItem` objects for each subpackage. .. versionadded:: 4.1 Parameters ---------- pkgname : str or None The package for which to retrieve the configuration object. filename : str or file-like or None If None, the default configuration path is taken from `get_config`. """ if verbose: verbosity = nullcontext filter_warnings = AstropyDeprecationWarning else: verbosity = silence filter_warnings = Warning package = importlib.import_module(pkgname) with verbosity(), warnings.catch_warnings(): warnings.simplefilter("ignore", category=filter_warnings) for mod in pkgutil.walk_packages( path=package.__path__, prefix=package.__name__ + "." ): if mod.module_finder.path.endswith(("test", "tests")) or mod.name.endswith( "setup_package" ): # Skip test and setup_package modules continue if mod.name.split(".")[-1].startswith("_"): # Skip private modules continue with contextlib.suppress(ImportError): importlib.import_module(mod.name) wrapper = TextWrapper(initial_indent="## ", subsequent_indent="## ", width=78) if filename is None: filename = get_config_filename(pkgname) with contextlib.ExitStack() as stack: if isinstance(filename, (str, os.PathLike)): fp = stack.enter_context(open(filename, "w")) else: # assume it's a file object, or io.StringIO fp = filename # Parse the subclasses, ordered by their module name subclasses = ConfigNamespace.__subclasses__() processed = set() for conf in sorted(subclasses, key=lambda x: x.__module__): mod = conf.__module__ # Skip modules for other packages, e.g. astropy modules that # would be imported when running the function for astroquery. if mod.split(".")[0] != pkgname: continue # Check that modules are not processed twice, which can happen # when they are imported in another module. if mod in processed: continue else: processed.add(mod) print_module = True for item in conf().values(): if print_module: # If this is the first item of the module, we print the # module name, but not if this is the root package... if item.module != pkgname: modname = item.module.replace(f"{pkgname}.", "") fp.write(f"[{modname}]\n\n") print_module = False fp.write(wrapper.fill(item.description) + "\n") if isinstance(item.defaultvalue, (tuple, list)): if len(item.defaultvalue) == 0: fp.write(f"# {item.name} = ,\n\n") elif len(item.defaultvalue) == 1: fp.write(f"# {item.name} = {item.defaultvalue[0]},\n\n") else: fp.write( f"# {item.name} =" f' {",".join(map(str, item.defaultvalue))}\n\n' ) else: fp.write(f"# {item.name} = {item.defaultvalue}\n\n")
Reloads configuration settings from a configuration file for the root package of the requested package/module. This overwrites any changes that may have been made in `ConfigItem` objects. This applies for any items that are based on this file, which is determined by the *root* package of ``packageormod`` (e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'`` module). Parameters ---------- packageormod : str or None The package or module name - see `get_config` for details. rootname : str or None Name of the root configuration directory - see `get_config` for details.
def reload_config(packageormod=None, rootname=None): """Reloads configuration settings from a configuration file for the root package of the requested package/module. This overwrites any changes that may have been made in `ConfigItem` objects. This applies for any items that are based on this file, which is determined by the *root* package of ``packageormod`` (e.g. ``'astropy.cfg'`` for the ``'astropy.config.configuration'`` module). Parameters ---------- packageormod : str or None The package or module name - see `get_config` for details. rootname : str or None Name of the root configuration directory - see `get_config` for details. """ sec = get_config(packageormod, True, rootname=rootname) # look for the section that is its own parent - that's the base object while sec.parent is not sec: sec = sec.parent sec.reload()
Determines if a config file can be safely replaced because it doesn't actually contain any meaningful content, i.e. if it contains only comments or is completely empty.
def is_unedited_config_file(content, template_content=None): """ Determines if a config file can be safely replaced because it doesn't actually contain any meaningful content, i.e. if it contains only comments or is completely empty. """ buffer = io.StringIO(content) raw_cfg = configobj.ConfigObj(buffer, interpolation=True) # If any of the items is set, return False return not any(len(v) > 0 for v in raw_cfg.values())
Create the default configuration file for the specified package. If the file already exists, it is updated only if it has not been modified. Otherwise the ``overwrite`` flag is needed to overwrite it. Parameters ---------- pkg : str The package to be updated. rootname : str Name of the root configuration directory. overwrite : bool Force updating the file if it already exists. Returns ------- updated : bool If the profile was updated, `True`, otherwise `False`.
def create_config_file(pkg, rootname="astropy", overwrite=False): """ Create the default configuration file for the specified package. If the file already exists, it is updated only if it has not been modified. Otherwise the ``overwrite`` flag is needed to overwrite it. Parameters ---------- pkg : str The package to be updated. rootname : str Name of the root configuration directory. overwrite : bool Force updating the file if it already exists. Returns ------- updated : bool If the profile was updated, `True`, otherwise `False`. """ # local import to prevent using the logger before it is configured from astropy.logger import log cfgfn = get_config_filename(pkg, rootname=rootname) # generate the default config template template_content = io.StringIO() generate_config(pkg, template_content) template_content.seek(0) template_content = template_content.read() doupdate = True # if the file already exists, check that it has not been modified if cfgfn is not None and path.exists(cfgfn): with open(cfgfn, encoding="latin-1") as fd: content = fd.read() doupdate = is_unedited_config_file(content, template_content) if doupdate or overwrite: with open(cfgfn, "w", encoding="latin-1") as fw: fw.write(template_content) log.info(f"The configuration file has been successfully written to {cfgfn}") return True elif not doupdate: log.warning( "The configuration file already exists and seems to " "have been customized, so it has not been updated. " "Use overwrite=True if you really want to update it." ) return False
Locates and return the home directory (or best approximation) on this system. Raises ------ OSError If the home directory cannot be located - usually means you are running Astropy on some obscure platform that doesn't have standard home directories.
def _find_home(): """Locates and return the home directory (or best approximation) on this system. Raises ------ OSError If the home directory cannot be located - usually means you are running Astropy on some obscure platform that doesn't have standard home directories. """ try: homedir = os.path.expanduser("~") except Exception: # Linux, Unix, AIX, OS X if os.name == "posix": if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find unix home directory to search for " "astropy config dir" ) elif os.name == "nt": # This is for all modern Windows (NT or after) if "MSYSTEM" in os.environ and os.environ.get("HOME"): # Likely using an msys shell; use whatever it is using for its # $HOME directory homedir = os.environ["HOME"] # See if there's a local home elif "HOMEDRIVE" in os.environ and "HOMEPATH" in os.environ: homedir = os.path.join(os.environ["HOMEDRIVE"], os.environ["HOMEPATH"]) # Maybe a user profile? elif "USERPROFILE" in os.environ: homedir = os.path.join(os.environ["USERPROFILE"]) else: try: import winreg as wreg shell_folders = r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders) homedir = wreg.QueryValueEx(key, "Personal")[0] key.Close() except Exception: # As a final possible resort, see if HOME is present if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find windows home directory to " "search for astropy config dir" ) else: # for other platforms, try HOME, although it probably isn't there if "HOME" in os.environ: homedir = os.environ["HOME"] else: raise OSError( "Could not find a home directory to search for " "astropy config dir - are you on an unsupported " "platform?" ) return homedir
Determines the package configuration directory name and creates the directory if it doesn't exist. This directory is typically ``$HOME/.astropy/config``, but if the XDG_CONFIG_HOME environment variable is set and the ``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory. If neither exists, the former will be created and symlinked to the latter. Parameters ---------- rootname : str Name of the root configuration directory. For example, if ``rootname = 'pkgname'``, the configuration directory would be ``<home>/.pkgname/`` rather than ``<home>/.astropy`` (depending on platform). Returns ------- configdir : str The absolute path to the configuration directory.
def get_config_dir(rootname="astropy"): """ Determines the package configuration directory name and creates the directory if it doesn't exist. This directory is typically ``$HOME/.astropy/config``, but if the XDG_CONFIG_HOME environment variable is set and the ``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory. If neither exists, the former will be created and symlinked to the latter. Parameters ---------- rootname : str Name of the root configuration directory. For example, if ``rootname = 'pkgname'``, the configuration directory would be ``<home>/.pkgname/`` rather than ``<home>/.astropy`` (depending on platform). Returns ------- configdir : str The absolute path to the configuration directory. """ # symlink will be set to this if the directory is created linkto = None # If using set_temp_config, that overrides all if set_temp_config._temp_path is not None: xch = set_temp_config._temp_path config_path = os.path.join(xch, rootname) if not os.path.exists(config_path): os.mkdir(config_path) return os.path.abspath(config_path) # first look for XDG_CONFIG_HOME xch = os.environ.get("XDG_CONFIG_HOME") if xch is not None and os.path.exists(xch): xchpth = os.path.join(xch, rootname) if not os.path.islink(xchpth): if os.path.exists(xchpth): return os.path.abspath(xchpth) else: linkto = xchpth return os.path.abspath(_find_or_create_root_dir("config", linkto, rootname))
Determines the Astropy cache directory name and creates the directory if it doesn't exist. This directory is typically ``$HOME/.astropy/cache``, but if the XDG_CACHE_HOME environment variable is set and the ``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory. If neither exists, the former will be created and symlinked to the latter. Parameters ---------- rootname : str Name of the root cache directory. For example, if ``rootname = 'pkgname'``, the cache directory will be ``<cache>/.pkgname/``. Returns ------- cachedir : str The absolute path to the cache directory.
def get_cache_dir(rootname="astropy"): """ Determines the Astropy cache directory name and creates the directory if it doesn't exist. This directory is typically ``$HOME/.astropy/cache``, but if the XDG_CACHE_HOME environment variable is set and the ``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory. If neither exists, the former will be created and symlinked to the latter. Parameters ---------- rootname : str Name of the root cache directory. For example, if ``rootname = 'pkgname'``, the cache directory will be ``<cache>/.pkgname/``. Returns ------- cachedir : str The absolute path to the cache directory. """ # symlink will be set to this if the directory is created linkto = None # If using set_temp_cache, that overrides all if set_temp_cache._temp_path is not None: xch = set_temp_cache._temp_path cache_path = os.path.join(xch, rootname) if not os.path.exists(cache_path): os.mkdir(cache_path) return os.path.abspath(cache_path) # first look for XDG_CACHE_HOME xch = os.environ.get("XDG_CACHE_HOME") if xch is not None and os.path.exists(xch): xchpth = os.path.join(xch, rootname) if not os.path.islink(xchpth): if os.path.exists(xchpth): return os.path.abspath(xchpth) else: linkto = xchpth return os.path.abspath(_find_or_create_root_dir("cache", linkto, rootname))
Test for regression of bug #9704
def test_set_temp_cache_resets_on_exception(tmp_path): """Test for regression of bug #9704""" t = paths.get_cache_dir() (a := tmp_path / "a").write_text("not a good cache\n") with pytest.raises(OSError), paths.set_temp_cache(a): pass assert t == paths.get_cache_dir()
Test that generate_config works with the default filename.
def test_generate_config2(tmp_path): """Test that generate_config works with the default filename.""" with set_temp_config(tmp_path): from astropy.config.configuration import generate_config generate_config("astropy") assert os.path.exists(tmp_path / "astropy" / "astropy.cfg") with open(tmp_path / "astropy" / "astropy.cfg") as fp: conf = fp.read() check_config(conf)
Tests to make sure configuration items fall back to their defaults when there's a problem accessing the astropy directory
def test_config_noastropy_fallback(monkeypatch): """ Tests to make sure configuration items fall back to their defaults when there's a problem accessing the astropy directory """ # make sure the config directory is not searched monkeypatch.setenv("XDG_CONFIG_HOME", "foo") monkeypatch.delenv("XDG_CONFIG_HOME") monkeypatch.setattr(paths.set_temp_config, "_temp_path", None) # make sure the _find_or_create_root_dir function fails as though the # astropy dir could not be accessed def osraiser(dirnm, linkto, pkgname=None): raise OSError monkeypatch.setattr(paths, "_find_or_create_root_dir", osraiser) # also have to make sure the stored configuration objects are cleared monkeypatch.setattr(configuration, "_cfgobjs", {}) with pytest.raises(OSError): # make sure the config dir search fails paths.get_config_dir(rootname="astropy") # now run the basic tests, and make sure the warning about no astropy # is present test_configitem()
Generator to return a Constant object. Parameters ---------- codata, iaudata : obj Modules containing CODATA and IAU constants of interest. module : obj Namespace module of interest. not_in_module_only : bool If ``True``, ignore constants that are already in the namespace of ``module``. Returns ------- _c : Constant Constant object to process.
def _get_c(codata, iaudata, module, not_in_module_only=True): """ Generator to return a Constant object. Parameters ---------- codata, iaudata : obj Modules containing CODATA and IAU constants of interest. module : obj Namespace module of interest. not_in_module_only : bool If ``True``, ignore constants that are already in the namespace of ``module``. Returns ------- _c : Constant Constant object to process. """ from .constant import Constant for _nm, _c in (*sorted(vars(codata).items()), *sorted(vars(iaudata).items())): if not isinstance(_c, Constant): continue elif (not not_in_module_only) or (_c.abbrev not in module.__dict__): yield _c
Set constants in a given module namespace. Parameters ---------- codata, iaudata : obj Modules containing CODATA and IAU constants of interest. module : obj Namespace module to modify with the given ``codata`` and ``iaudata``. not_in_module_only : bool If ``True``, constants that are already in the namespace of ``module`` will not be modified. doclines : list or None If a list is given, this list will be modified in-place to include documentation of modified constants. This can be used to update docstring of ``module``. set_class : bool Namespace of ``module`` is populated with ``_c.__class__`` instead of just ``_c`` from :func:`_get_c`.
def _set_c( codata, iaudata, module, not_in_module_only=True, doclines=None, set_class=False ): """ Set constants in a given module namespace. Parameters ---------- codata, iaudata : obj Modules containing CODATA and IAU constants of interest. module : obj Namespace module to modify with the given ``codata`` and ``iaudata``. not_in_module_only : bool If ``True``, constants that are already in the namespace of ``module`` will not be modified. doclines : list or None If a list is given, this list will be modified in-place to include documentation of modified constants. This can be used to update docstring of ``module``. set_class : bool Namespace of ``module`` is populated with ``_c.__class__`` instead of just ``_c`` from :func:`_get_c`. """ for _c in _get_c(codata, iaudata, module, not_in_module_only=not_in_module_only): if set_class: value = _c.__class__( _c.abbrev, _c.name, _c.value, _c._unit_string, _c.uncertainty, _c.reference, ) else: value = _c setattr(module, _c.abbrev, value) if doclines is not None: doclines.append( f"{_c.abbrev:^10} {_c.value:^14.9g} {_c._unit_string:^16} {_c.name}" )
Tests for #572 demonstrating how EM constants should behave.
def test_e(): """Tests for #572 demonstrating how EM constants should behave.""" from astropy.constants import e # A test quantity E = Q(100, "V/m") # Without specifying a system e should not combine with other quantities pytest.raises(TypeError, lambda: e * E) # Try it again (as regression test on a minor issue mentioned in #745 where # repeated attempts to use e in an expression resulted in UnboundLocalError # instead of TypeError) pytest.raises(TypeError, lambda: e * E) # e.cgs is too ambiguous and should not work at all pytest.raises(TypeError, lambda: e.cgs * E) assert isinstance(e.si, Q) assert isinstance(e.gauss, Q) assert isinstance(e.esu, Q) assert e.si * E == Q(100, "eV/m") assert e.gauss * E == Q(e.gauss.value * E.value, "Fr V/m") assert e.esu * E == Q(e.esu.value * E.value, "Fr V/m")
Tests for #1263 demonstrating how g0 constant should behave.
def test_g0(): """Tests for #1263 demonstrating how g0 constant should behave.""" from astropy.constants import g0 # g0 is an exactly defined constant, so it shouldn't be changing assert g0.value == 9.80665 # default is S.I. assert g0.si.value == 9.80665 assert g0.cgs.value == 9.80665e2 # make sure it has the necessary attributes and they're not blank assert g0.uncertainty == 0 # g0 is a *defined* quantity assert g0.name assert g0.reference assert g0.unit # Check that its unit have the correct physical type assert g0.unit.physical_type == "acceleration"
b_wien should give the correct peak wavelength for given blackbody temperature. The Sun is used in this test.
def test_b_wien(): """b_wien should give the correct peak wavelength for given blackbody temperature. The Sun is used in this test. """ from astropy import units as u from astropy.constants import b_wien t = 5778 * u.K w = (b_wien / t).to(u.nm) assert round(w.value) == 502
Check that Constant and Quantity views can be taken (#3537, #3538).
def test_view(): """Check that Constant and Quantity views can be taken (#3537, #3538).""" from astropy.constants import c c2 = c.view(Constant) assert c2 == c assert c2.value == c.value # make sure it has the necessary attributes and they're not blank assert c2.uncertainty == 0 # c is a *defined* quantity assert c2.name == c.name assert c2.reference == c.reference assert c2.unit == c.unit q1 = c.view(Q) assert q1 == c assert q1.value == c.value assert type(q1) is Q assert not hasattr(q1, "reference") q2 = Q(c) assert q2 == c assert q2.value == c.value assert type(q2) is Q assert not hasattr(q2, "reference") c3 = Q(c, subok=True) assert c3 == c assert c3.value == c.value # make sure it has the necessary attributes and they're not blank assert c3.uncertainty == 0 # c is a *defined* quantity assert c3.name == c.name assert c3.reference == c.reference assert c3.unit == c.unit c4 = Q(c, subok=True, copy=False) assert c4 is c
Tests for #1263 demonstrating how g0 constant should behave.
def test_g0(): """Tests for #1263 demonstrating how g0 constant should behave.""" from astropy.constants.astropyconst13 import g0 # g0 is an exactly defined constant, so it shouldn't be changing assert g0.value == 9.80665 # default is S.I. assert g0.si.value == 9.80665 assert g0.cgs.value == 9.80665e2 # make sure it has the necessary attributes and they're not blank assert g0.uncertainty == 0 # g0 is a *defined* quantity assert g0.name assert g0.reference assert g0.unit # Check that its unit have the correct physical type assert g0.unit.physical_type == "acceleration"
b_wien should give the correct peak wavelength for given blackbody temperature. The Sun is used in this test.
def test_b_wien(): """b_wien should give the correct peak wavelength for given blackbody temperature. The Sun is used in this test. """ from astropy import units as u from astropy.constants.astropyconst13 import b_wien t = 5778 * u.K w = (b_wien / t).to(u.nm) assert round(w.value) == 502
Parsec is defined to use small-angle limit per IAU 2015 Resolution B 2. iau2012 version still uses tan(parallax).
def test_pc(): """Parsec is defined to use small-angle limit per IAU 2015 Resolution B 2. iau2012 version still uses tan(parallax). """ from astropy import units as u from astropy.constants import iau2012 plx = np.radians(1 / 3600) assert np.allclose( u.pc.to("m") / iau2012.pc.si.value, np.tan(plx) / plx, rtol=1.0e-14, atol=0 )
Ensure mass values are set up correctly. https://github.com/astropy/astropy/issues/8920
def test_masses(): """Ensure mass values are set up correctly. https://github.com/astropy/astropy/issues/8920 """ from astropy.constants import astropyconst13, astropyconst20, astropyconst40 ref_text = "Allen's Astrophysical Quantities 4th Ed." assert ( astropyconst13.M_sun.reference == ref_text and astropyconst13.M_jup.reference == ref_text and astropyconst13.M_earth.reference == ref_text ) ref_text = "IAU 2015 Resolution B 3 + CODATA 2014" assert ( astropyconst20.M_sun.reference == ref_text and astropyconst20.M_jup.reference == ref_text and astropyconst20.M_earth.reference == ref_text ) ref_text = "IAU 2015 Resolution B 3 + CODATA 2018" assert ( astropyconst40.M_sun.reference == ref_text and astropyconst40.M_jup.reference == ref_text and astropyconst40.M_earth.reference == ref_text )
Check that Constant and Quantity views can be taken (#3537, #3538).
def test_view(): """Check that Constant and Quantity views can be taken (#3537, #3538).""" from astropy.constants import c c2 = c.view(Constant) assert c2 == c assert c2.value == c.value # make sure it has the necessary attributes and they're not blank assert c2.uncertainty == 0 # c is a *defined* quantity assert c2.name == c.name assert c2.reference == c.reference assert c2.unit == c.unit q1 = c.view(Q) assert q1 == c assert q1.value == c.value assert type(q1) is Q assert not hasattr(q1, "reference") q2 = Q(c) assert q2 == c assert q2.value == c.value assert type(q2) is Q assert not hasattr(q2, "reference") c3 = Q(c, subok=True) assert c3 == c assert c3.value == c.value # make sure it has the necessary attributes and they're not blank assert c3.uncertainty == 0 # c is a *defined* quantity assert c3.name == c.name assert c3.reference == c.reference assert c3.unit == c.unit c4 = Q(c, subok=True, copy=False) assert c4 is c
Find optimal or good sizes to pad an array of ``shape`` to for better performance with `numpy.fft.*fft` and `scipy.fft.*fft`. Calculated directly with `scipy.fft.next_fast_len`, if available; otherwise looked up from list and scaled by powers of 10, if necessary.
def _next_fast_lengths(shape): """ Find optimal or good sizes to pad an array of ``shape`` to for better performance with `numpy.fft.*fft` and `scipy.fft.*fft`. Calculated directly with `scipy.fft.next_fast_len`, if available; otherwise looked up from list and scaled by powers of 10, if necessary. """ try: import scipy.fft return np.array([scipy.fft.next_fast_len(j) for j in shape]) except ImportError: pass newshape = np.empty(len(np.atleast_1d(shape)), dtype=int) for i, j in enumerate(shape): scale = 10 ** max(int(np.ceil(np.log10(j))) - _good_range, 0) for n in _good_sizes: if n * scale >= j: newshape[i] = n * scale break else: raise ValueError( f"No next fast length for {j} found in list of _good_sizes " f"<= {_good_sizes[-1] * scale}." ) return newshape
Convolve an array with a kernel. This routine differs from `scipy.ndimage.convolve` because it includes a special treatment for ``NaN`` values. Rather than including ``NaN`` values in the array in the convolution calculation, which causes large ``NaN`` holes in the convolved array, ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Parameters ---------- array : `~astropy.nddata.NDData` or array-like The array to convolve. This should be a 1, 2, or 3-dimensional array or a list or a set of nested lists representing a 1, 2, or 3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of the `~astropy.nddata.NDData` will be used as the ``mask`` argument. kernel : `numpy.ndarray` or `~astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array, and the dimensions should be odd in all directions. If a masked array, the masked values will be replaced by ``fill_value``. boundary : str, optional A flag indicating how to handle boundaries: * `None` Set the ``result`` values to zero where the kernel extends beyond the edge of the array. * 'fill' Set values outside the array boundary to ``fill_value`` (default). * 'wrap' Periodic boundary that wrap to the other side of ``array``. * 'extend' Set values outside the array to the nearest ``array`` value. fill_value : float, optional The value to use outside the array when using ``boundary='fill'``. normalize_kernel : bool, optional Whether to normalize the kernel to have a sum of one. nan_treatment : {'interpolate', 'fill'}, optional The method used to handle NaNs in the input ``array``: * ``'interpolate'``: ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. * ``'fill'``: ``NaN`` values are replaced by ``fill_value`` prior to convolution. preserve_nan : bool, optional After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray, optional A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked if it is masked in either ``mask`` *or* ``array.mask``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". Returns ------- result : `numpy.ndarray` An array with the same dimensions and as the input array, convolved with kernel. The data type depends on the input array type. If array is a floating point type, then the return array keeps the same data type, otherwise the type is ``numpy.float``. Notes ----- For masked arrays, masked values are treated as NaNs. The convolution is always done at ``numpy.float`` precision.
def convolve( array, kernel, boundary="fill", fill_value=0.0, nan_treatment="interpolate", normalize_kernel=True, mask=None, preserve_nan=False, normalization_zero_tol=1e-8, ): """ Convolve an array with a kernel. This routine differs from `scipy.ndimage.convolve` because it includes a special treatment for ``NaN`` values. Rather than including ``NaN`` values in the array in the convolution calculation, which causes large ``NaN`` holes in the convolved array, ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Parameters ---------- array : `~astropy.nddata.NDData` or array-like The array to convolve. This should be a 1, 2, or 3-dimensional array or a list or a set of nested lists representing a 1, 2, or 3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of the `~astropy.nddata.NDData` will be used as the ``mask`` argument. kernel : `numpy.ndarray` or `~astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array, and the dimensions should be odd in all directions. If a masked array, the masked values will be replaced by ``fill_value``. boundary : str, optional A flag indicating how to handle boundaries: * `None` Set the ``result`` values to zero where the kernel extends beyond the edge of the array. * 'fill' Set values outside the array boundary to ``fill_value`` (default). * 'wrap' Periodic boundary that wrap to the other side of ``array``. * 'extend' Set values outside the array to the nearest ``array`` value. fill_value : float, optional The value to use outside the array when using ``boundary='fill'``. normalize_kernel : bool, optional Whether to normalize the kernel to have a sum of one. nan_treatment : {'interpolate', 'fill'}, optional The method used to handle NaNs in the input ``array``: * ``'interpolate'``: ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. * ``'fill'``: ``NaN`` values are replaced by ``fill_value`` prior to convolution. preserve_nan : bool, optional After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray, optional A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked if it is masked in either ``mask`` *or* ``array.mask``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". Returns ------- result : `numpy.ndarray` An array with the same dimensions and as the input array, convolved with kernel. The data type depends on the input array type. If array is a floating point type, then the return array keeps the same data type, otherwise the type is ``numpy.float``. Notes ----- For masked arrays, masked values are treated as NaNs. The convolution is always done at ``numpy.float`` precision. """ if boundary not in BOUNDARY_OPTIONS: raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}") if nan_treatment not in ("interpolate", "fill"): raise ValueError("nan_treatment must be one of 'interpolate','fill'") # OpenMP support is disabled at the C src code level, changing this will have # no effect. n_threads = 1 # Keep refs to originals passed_kernel = kernel passed_array = array # The C routines all need float type inputs (so, a particular # bit size, endianness, etc.). So we have to convert, which also # has the effect of making copies so we don't modify the inputs. # After this, the variables we work with will be array_internal, and # kernel_internal. However -- we do want to keep track of what type # the input array was so we can cast the result to that at the end # if it's a floating point type. Don't bother with this for lists -- # just always push those as float. # It is always necessary to make a copy of kernel (since it is modified), # but, if we just so happen to be lucky enough to have the input array # have exactly the desired type, we just alias to array_internal # Convert kernel to ndarray if not already # Copy or alias array to array_internal array_internal = _copy_input_if_needed( passed_array, dtype=float, order="C", nan_treatment=nan_treatment, mask=mask, fill_value=np.nan, ) array_dtype = getattr(passed_array, "dtype", array_internal.dtype) # Copy or alias kernel to kernel_internal kernel_internal = _copy_input_if_needed( passed_kernel, dtype=float, order="C", nan_treatment=None, mask=None, fill_value=fill_value, ) # Make sure kernel has all odd axes if has_even_axis(kernel_internal): raise KernelSizeError("Kernel size must be odd in all axes.") # If both image array and kernel are Kernel instances # constrain convolution method # This must occur before the main alias/copy of ``passed_kernel`` to # ``kernel_internal`` as it is used for filling masked kernels. if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel): warnings.warn( "Both array and kernel are Kernel instances, hardwiring " "the following parameters: boundary='fill', fill_value=0," " normalize_Kernel=True, nan_treatment='interpolate'", AstropyUserWarning, ) boundary = "fill" fill_value = 0 normalize_kernel = True nan_treatment = "interpolate" # ----------------------------------------------------------------------- # From this point onwards refer only to ``array_internal`` and # ``kernel_internal``. # Assume both are base np.ndarrays and NOT subclasses e.g. NOT # ``Kernel`` nor ``np.ma.maskedarray`` classes. # ----------------------------------------------------------------------- # Check dimensionality if array_internal.ndim == 0: raise ValueError("cannot convolve 0-dimensional arrays") elif array_internal.ndim > 3: raise NotImplementedError( "convolve only supports 1, 2, and 3-dimensional arrays at this time" ) elif array_internal.ndim != kernel_internal.ndim: raise ValueError("array and kernel have differing number of dimensions.") elif array_internal.size == 0: raise ValueError("cannot convolve empty array") array_shape = np.array(array_internal.shape) kernel_shape = np.array(kernel_internal.shape) pad_width = kernel_shape // 2 # For boundary=None only the center space is convolved. All array indices within a # distance kernel.shape//2 from the edge are completely ignored (zeroed). # E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2 # are convolved. It is therefore not possible to use this method to convolve an # array by a kernel that is larger (see note below) than the array - as ALL pixels # would be ignored leaving an array of only zeros. # Note: For even kernels the correctness condition is array_shape > kernel_shape. # For odd kernels it is: # array_shape >= kernel_shape OR # array_shape > kernel_shape-1 OR # array_shape > 2*(kernel_shape//2). # Since the latter is equal to the former two for even lengths, the latter condition is # complete. if boundary is None and not np.all(array_shape > 2 * pad_width): raise KernelSizeError( "for boundary=None all kernel axes must be smaller than array's - " "use boundary in ['fill', 'extend', 'wrap'] instead." ) # NaN interpolation significantly slows down the C convolution # computation. Since nan_treatment = 'interpolate', is the default # check whether it is even needed, if not, don't interpolate. # NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any() nan_interpolate = (nan_treatment == "interpolate") and np.isnan( array_internal.sum() ) # Check if kernel is normalizable if normalize_kernel or nan_interpolate: kernel_sum = kernel_internal.sum() kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol) if kernel_sum < 1.0 / MAX_NORMALIZATION or kernel_sums_to_zero: if nan_interpolate: raise ValueError( "Setting nan_treatment='interpolate' " "requires the kernel to be normalized, " "but the input kernel has a sum close " "to zero. For a zero-sum kernel and " "data with NaNs, set nan_treatment='fill'." ) else: raise ValueError( "The kernel can't be normalized, because " "its sum is close to zero. The sum of the " f"given kernel is < {1.0 / MAX_NORMALIZATION:.2f}. " "For a zero-sum kernel, set normalize_kernel=False " "or pass a custom normalization function to " "normalize_kernel." ) # Mark the NaN values so we can replace them later if interpolate_nan is # not set if preserve_nan or nan_treatment == "fill": initially_nan = np.isnan(array_internal) if nan_treatment == "fill": array_internal[initially_nan] = fill_value # Avoid any memory allocation within the C code. Allocate output array # here and pass through instead. result = np.zeros(array_internal.shape, dtype=float, order="C") embed_result_within_padded_region = True array_to_convolve = array_internal if boundary in ("fill", "extend", "wrap"): embed_result_within_padded_region = False if boundary == "fill": # This method is faster than using numpy.pad(..., mode='constant') array_to_convolve = np.full( array_shape + 2 * pad_width, fill_value=fill_value, dtype=float, order="C", ) # Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of # [pad_width[0]:-pad_width[0]] # to account for when the kernel has size of 1 making pad_width = 0. if array_internal.ndim == 1: array_to_convolve[pad_width[0] : array_shape[0] + pad_width[0]] = ( array_internal ) elif array_internal.ndim == 2: array_to_convolve[ pad_width[0] : array_shape[0] + pad_width[0], pad_width[1] : array_shape[1] + pad_width[1], ] = array_internal else: array_to_convolve[ pad_width[0] : array_shape[0] + pad_width[0], pad_width[1] : array_shape[1] + pad_width[1], pad_width[2] : array_shape[2] + pad_width[2], ] = array_internal else: np_pad_mode_dict = {"fill": "constant", "extend": "edge", "wrap": "wrap"} np_pad_mode = np_pad_mode_dict[boundary] pad_width = kernel_shape // 2 if array_internal.ndim == 1: np_pad_width = (pad_width[0],) elif array_internal.ndim == 2: np_pad_width = ((pad_width[0],), (pad_width[1],)) else: np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],)) array_to_convolve = np.pad( array_internal, pad_width=np_pad_width, mode=np_pad_mode ) _convolveNd_c( result, array_to_convolve, kernel_internal, nan_interpolate, embed_result_within_padded_region, n_threads, ) # So far, normalization has only occurred for nan_treatment == 'interpolate' # because this had to happen within the C extension so as to ignore # any NaNs if normalize_kernel: if not nan_interpolate: result /= kernel_sum elif nan_interpolate: result *= kernel_sum if nan_interpolate and not preserve_nan and np.isnan(result.sum()): warnings.warn( "nan_treatment='interpolate', however, NaN values detected " "post convolution. A contiguous region of NaN values, larger " "than the kernel size, are present in the input array. " "Increase the kernel size to avoid this.", AstropyUserWarning, ) if preserve_nan: result[initially_nan] = np.nan # Convert result to original data type array_unit = getattr(passed_array, "unit", None) if array_unit is not None: result <<= array_unit if isinstance(passed_array, Kernel): if isinstance(passed_array, Kernel1D): new_result = Kernel1D(array=result) elif isinstance(passed_array, Kernel2D): new_result = Kernel2D(array=result) else: raise TypeError("Only 1D and 2D Kernels are supported.") new_result._is_bool = False new_result._separable = passed_array._separable if isinstance(passed_kernel, Kernel): new_result._separable = new_result._separable and passed_kernel._separable return new_result if array_dtype.kind == "f": # Try to preserve the input type if it's a floating point type return result.astype(array_dtype, copy=False) else: return result
Convolve an ndarray with an nd-kernel. Returns a convolved image with ``shape = array.shape``. Assumes kernel is centered. `convolve_fft` is very similar to `convolve` in that it replaces ``NaN`` values in the original image with interpolated values using the kernel as an interpolation function. However, it also includes many additional options specific to the implementation. `convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways: * It can treat ``NaN`` values as zeros or interpolate over them. * ``inf`` values are treated as ``NaN`` * It optionally pads to the nearest faster sizes to improve FFT speed. These sizes are optimized for the numpy and scipy implementations, and ``fftconvolve`` uses them by default as well; when using other external functions (see below), results may vary. * Its only valid ``mode`` is 'same' (i.e., the same shape array is returned) * It lets you use your own fft, e.g., `pyFFTW <https://pypi.org/project/pyFFTW/>`_ or `pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to performance improvements, depending on your system configuration. pyFFTW3 is threaded, and therefore may yield significant performance benefits on multi-core machines at the cost of greater memory requirements. Specify the ``fftn`` and ``ifftn`` keywords to override the default, which is `numpy.fft.fftn` and `numpy.fft.ifftn`. The `scipy.fft` functions also offer somewhat better performance and a multi-threaded option. Parameters ---------- array : `numpy.ndarray` Array to be convolved with ``kernel``. It can be of any dimensionality, though only 1, 2, and 3d arrays have been tested. kernel : `numpy.ndarray` or `astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array. The dimensions *do not* have to be odd in all directions, unlike in the non-fft `convolve` function. The kernel will be normalized if ``normalize_kernel`` is set. It is assumed to be centered (i.e., shifts may result if your kernel is asymmetric) boundary : {'fill', 'wrap'}, optional A flag indicating how to handle boundaries: * 'fill': set values outside the array boundary to fill_value (default) * 'wrap': periodic boundary The `None` and 'extend' parameters are not supported for FFT-based convolution. fill_value : float, optional The value to use outside the array when using boundary='fill'. nan_treatment : {'interpolate', 'fill'}, optional The method used to handle NaNs in the input ``array``: * ``'interpolate'``: ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. * ``'fill'``: ``NaN`` values are replaced by ``fill_value`` prior to convolution. normalize_kernel : callable or boolean, optional If specified, this is the function to divide kernel by to normalize it. e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be: ``kernel = kernel / np.sum(kernel)``. If True, defaults to ``normalize_kernel = np.sum``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". preserve_nan : bool, optional After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray, optional A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked if it is masked in either ``mask`` *or* ``array.mask``. crop : bool, optional Default on. Return an image of the size of the larger of the input image and the kernel. If the image and kernel are asymmetric in opposite directions, will return the largest image in both directions. For example, if an input image has shape [100,3] but a kernel with shape [6,6] is used, the output will be [100,6]. return_fft : bool, optional Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is ``ifft(fft(image)*fft(kernel))``). Useful for making PSDs. fft_pad : bool, optional Default on. Zero-pad image to the nearest size supporting more efficient execution of the FFT, generally values factorizable into the first 3-5 prime numbers. With ``boundary='wrap'``, this will be disabled. psf_pad : bool, optional Zero-pad image to be at least the sum of the image sizes to avoid edge-wrapping when smoothing. This is enabled by default with ``boundary='fill'``, but it can be overridden with a boolean option. ``boundary='wrap'`` and ``psf_pad=True`` are not compatible. min_wt : float, optional If ignoring ``NaN`` / zeros, force all grid points with a weight less than this value to ``NaN`` (the weight of a grid point with *no* ignored neighbors is 1.0). If ``min_wt`` is zero, then all zero-weight points will be set to zero instead of ``NaN`` (which they would be otherwise, because 1/0 = nan). See the examples below. allow_huge : bool, optional Allow huge arrays in the FFT? If False, will raise an exception if the array or kernel size is >1 GB. fftn : callable, optional The fft function. Can be overridden to use your own ffts, e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``. ifftn : callable, optional The inverse fft function. Can be overridden the same way ``fttn``. complex_dtype : complex type, optional Which complex dtype to use. `numpy` has a range of options, from 64 to 256. dealias: bool, optional Default off. Zero-pad image to enable explicit dealiasing of convolution. With ``boundary='wrap'``, this will be disabled. Note that for an input of nd dimensions this will increase the size of the temporary arrays by at least ``1.5**nd``. This may result in significantly more memory usage. Returns ------- default : ndarray ``array`` convolved with ``kernel``. If ``return_fft`` is set, returns ``fft(array) * fft(kernel)``. If crop is not set, returns the image, but with the fft-padded size instead of the input size. Raises ------ `ValueError` If the array is bigger than 1 GB after padding, will raise this exception unless ``allow_huge`` is True. See Also -------- convolve: Convolve is a non-fft version of this code. It is more memory efficient and for small kernels can be faster. Notes ----- With ``psf_pad=True`` and a large PSF, the resulting data can become large and consume a lot of memory. See Issue https://github.com/astropy/astropy/pull/4366 and the update in https://github.com/astropy/astropy/pull/11533 for further details. Dealiasing of pseudospectral convolutions is necessary for numerical stability of the underlying algorithms. A common method for handling this is to zero pad the image by at least 1/2 to eliminate the wavenumbers which have been aliased by convolution. This is so that the aliased 1/3 of the results of the convolution computation can be thrown out. See https://doi.org/10.1175/1520-0469(1971)028%3C1074:OTEOAI%3E2.0.CO;2 https://iopscience.iop.org/article/10.1088/1742-6596/318/7/072037 Note that if dealiasing is necessary to your application, but your process is memory constrained, you may want to consider using FFTW++: https://github.com/dealias/fftwpp. It includes python wrappers for a pseudospectral convolution which will implicitly dealias your convolution without the need for additional padding. Note that one cannot use FFTW++'s convlution directly in this method as in handles the entire convolution process internally. Additionally, FFTW++ includes other useful pseudospectral methods to consider. Examples -------- >>> convolve_fft([1, 0, 3], [1, 1, 1]) array([0.33333333, 1.33333333, 1. ]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1]) array([0.5, 2. , 1.5]) >>> convolve_fft([1, 0, 3], [0, 1, 0]) # doctest: +FLOAT_CMP array([ 1.00000000e+00, -3.70074342e-17, 3.00000000e+00]) >>> convolve_fft([1, 2, 3], [1]) array([1., 2., 3.]) >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate') array([1., 0., 3.]) >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate', ... min_wt=1e-8) array([ 1., nan, 3.]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate') array([0.5, 2. , 1.5]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True) array([0.5, 2. , 1.5]) >>> import scipy.fft # optional - requires scipy >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True, ... fftn=scipy.fft.fftn, ifftn=scipy.fft.ifftn) array([0.5, 2. , 1.5]) >>> fft_mp = lambda a: scipy.fft.fftn(a, workers=-1) # use all available cores >>> ifft_mp = lambda a: scipy.fft.ifftn(a, workers=-1) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True, fftn=fft_mp, ifftn=ifft_mp) array([0.5, 2. , 1.5])
def convolve_fft( array, kernel, boundary="fill", fill_value=0.0, nan_treatment="interpolate", normalize_kernel=True, normalization_zero_tol=1e-8, preserve_nan=False, mask=None, crop=True, return_fft=False, fft_pad=None, psf_pad=None, min_wt=0.0, allow_huge=False, fftn=np.fft.fftn, ifftn=np.fft.ifftn, complex_dtype=complex, dealias=False, ): """ Convolve an ndarray with an nd-kernel. Returns a convolved image with ``shape = array.shape``. Assumes kernel is centered. `convolve_fft` is very similar to `convolve` in that it replaces ``NaN`` values in the original image with interpolated values using the kernel as an interpolation function. However, it also includes many additional options specific to the implementation. `convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways: * It can treat ``NaN`` values as zeros or interpolate over them. * ``inf`` values are treated as ``NaN`` * It optionally pads to the nearest faster sizes to improve FFT speed. These sizes are optimized for the numpy and scipy implementations, and ``fftconvolve`` uses them by default as well; when using other external functions (see below), results may vary. * Its only valid ``mode`` is 'same' (i.e., the same shape array is returned) * It lets you use your own fft, e.g., `pyFFTW <https://pypi.org/project/pyFFTW/>`_ or `pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to performance improvements, depending on your system configuration. pyFFTW3 is threaded, and therefore may yield significant performance benefits on multi-core machines at the cost of greater memory requirements. Specify the ``fftn`` and ``ifftn`` keywords to override the default, which is `numpy.fft.fftn` and `numpy.fft.ifftn`. The `scipy.fft` functions also offer somewhat better performance and a multi-threaded option. Parameters ---------- array : `numpy.ndarray` Array to be convolved with ``kernel``. It can be of any dimensionality, though only 1, 2, and 3d arrays have been tested. kernel : `numpy.ndarray` or `astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array. The dimensions *do not* have to be odd in all directions, unlike in the non-fft `convolve` function. The kernel will be normalized if ``normalize_kernel`` is set. It is assumed to be centered (i.e., shifts may result if your kernel is asymmetric) boundary : {'fill', 'wrap'}, optional A flag indicating how to handle boundaries: * 'fill': set values outside the array boundary to fill_value (default) * 'wrap': periodic boundary The `None` and 'extend' parameters are not supported for FFT-based convolution. fill_value : float, optional The value to use outside the array when using boundary='fill'. nan_treatment : {'interpolate', 'fill'}, optional The method used to handle NaNs in the input ``array``: * ``'interpolate'``: ``NaN`` values are replaced with interpolated values using the kernel as an interpolation function. Note that if the kernel has a sum equal to zero, NaN interpolation is not possible and will raise an exception. * ``'fill'``: ``NaN`` values are replaced by ``fill_value`` prior to convolution. normalize_kernel : callable or boolean, optional If specified, this is the function to divide kernel by to normalize it. e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be: ``kernel = kernel / np.sum(kernel)``. If True, defaults to ``normalize_kernel = np.sum``. normalization_zero_tol : float, optional The absolute tolerance on whether the kernel is different than zero. If the kernel sums to zero to within this precision, it cannot be normalized. Default is "1e-8". preserve_nan : bool, optional After performing convolution, should pixels that were originally NaN again become NaN? mask : None or ndarray, optional A "mask" array. Shape must match ``array``, and anything that is masked (i.e., not 0/`False`) will be set to NaN for the convolution. If `None`, no masking will be performed unless ``array`` is a masked array. If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is masked if it is masked in either ``mask`` *or* ``array.mask``. crop : bool, optional Default on. Return an image of the size of the larger of the input image and the kernel. If the image and kernel are asymmetric in opposite directions, will return the largest image in both directions. For example, if an input image has shape [100,3] but a kernel with shape [6,6] is used, the output will be [100,6]. return_fft : bool, optional Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is ``ifft(fft(image)*fft(kernel))``). Useful for making PSDs. fft_pad : bool, optional Default on. Zero-pad image to the nearest size supporting more efficient execution of the FFT, generally values factorizable into the first 3-5 prime numbers. With ``boundary='wrap'``, this will be disabled. psf_pad : bool, optional Zero-pad image to be at least the sum of the image sizes to avoid edge-wrapping when smoothing. This is enabled by default with ``boundary='fill'``, but it can be overridden with a boolean option. ``boundary='wrap'`` and ``psf_pad=True`` are not compatible. min_wt : float, optional If ignoring ``NaN`` / zeros, force all grid points with a weight less than this value to ``NaN`` (the weight of a grid point with *no* ignored neighbors is 1.0). If ``min_wt`` is zero, then all zero-weight points will be set to zero instead of ``NaN`` (which they would be otherwise, because 1/0 = nan). See the examples below. allow_huge : bool, optional Allow huge arrays in the FFT? If False, will raise an exception if the array or kernel size is >1 GB. fftn : callable, optional The fft function. Can be overridden to use your own ffts, e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``. ifftn : callable, optional The inverse fft function. Can be overridden the same way ``fttn``. complex_dtype : complex type, optional Which complex dtype to use. `numpy` has a range of options, from 64 to 256. dealias: bool, optional Default off. Zero-pad image to enable explicit dealiasing of convolution. With ``boundary='wrap'``, this will be disabled. Note that for an input of nd dimensions this will increase the size of the temporary arrays by at least ``1.5**nd``. This may result in significantly more memory usage. Returns ------- default : ndarray ``array`` convolved with ``kernel``. If ``return_fft`` is set, returns ``fft(array) * fft(kernel)``. If crop is not set, returns the image, but with the fft-padded size instead of the input size. Raises ------ `ValueError` If the array is bigger than 1 GB after padding, will raise this exception unless ``allow_huge`` is True. See Also -------- convolve: Convolve is a non-fft version of this code. It is more memory efficient and for small kernels can be faster. Notes ----- With ``psf_pad=True`` and a large PSF, the resulting data can become large and consume a lot of memory. See Issue https://github.com/astropy/astropy/pull/4366 and the update in https://github.com/astropy/astropy/pull/11533 for further details. Dealiasing of pseudospectral convolutions is necessary for numerical stability of the underlying algorithms. A common method for handling this is to zero pad the image by at least 1/2 to eliminate the wavenumbers which have been aliased by convolution. This is so that the aliased 1/3 of the results of the convolution computation can be thrown out. See https://doi.org/10.1175/1520-0469(1971)028%3C1074:OTEOAI%3E2.0.CO;2 https://iopscience.iop.org/article/10.1088/1742-6596/318/7/072037 Note that if dealiasing is necessary to your application, but your process is memory constrained, you may want to consider using FFTW++: https://github.com/dealias/fftwpp. It includes python wrappers for a pseudospectral convolution which will implicitly dealias your convolution without the need for additional padding. Note that one cannot use FFTW++'s convlution directly in this method as in handles the entire convolution process internally. Additionally, FFTW++ includes other useful pseudospectral methods to consider. Examples -------- >>> convolve_fft([1, 0, 3], [1, 1, 1]) array([0.33333333, 1.33333333, 1. ]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1]) array([0.5, 2. , 1.5]) >>> convolve_fft([1, 0, 3], [0, 1, 0]) # doctest: +FLOAT_CMP array([ 1.00000000e+00, -3.70074342e-17, 3.00000000e+00]) >>> convolve_fft([1, 2, 3], [1]) array([1., 2., 3.]) >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate') array([1., 0., 3.]) >>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate', ... min_wt=1e-8) array([ 1., nan, 3.]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate') array([0.5, 2. , 1.5]) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True) array([0.5, 2. , 1.5]) >>> import scipy.fft # optional - requires scipy >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True, ... fftn=scipy.fft.fftn, ifftn=scipy.fft.ifftn) array([0.5, 2. , 1.5]) >>> fft_mp = lambda a: scipy.fft.fftn(a, workers=-1) # use all available cores >>> ifft_mp = lambda a: scipy.fft.ifftn(a, workers=-1) >>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate', ... normalize_kernel=True, fftn=fft_mp, ifftn=ifft_mp) array([0.5, 2. , 1.5]) """ # Checking copied from convolve.py - however, since FFTs have real & # complex components, we change the types. Only the real part will be # returned! Note that this always makes a copy. # Check kernel is kernel instance if isinstance(kernel, Kernel): kernel = kernel.array if isinstance(array, Kernel): raise TypeError( "Can't convolve two kernels with convolve_fft. Use convolve instead." ) if nan_treatment not in ("interpolate", "fill"): raise ValueError("nan_treatment must be one of 'interpolate','fill'") # Get array quantity if it exists array_unit = getattr(array, "unit", None) # Convert array dtype to complex # and ensure that list inputs become arrays array = _copy_input_if_needed( array, dtype=complex, order="C", nan_treatment=nan_treatment, mask=mask, fill_value=np.nan, ) kernel = _copy_input_if_needed( kernel, dtype=complex, order="C", nan_treatment=None, mask=None, fill_value=0 ) # Check that the number of dimensions is compatible if array.ndim != kernel.ndim: raise ValueError("Image and kernel must have same number of dimensions") arrayshape = array.shape kernshape = kernel.shape array_size_B = ( np.prod(arrayshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize ) * u.byte if array_size_B > 1 * u.GB and not allow_huge: raise ValueError( f"Size Error: Arrays will be {human_file_size(array_size_B)}. " "Use allow_huge=True to override this exception." ) # NaN and inf catching nanmaskarray = np.isnan(array) | np.isinf(array) if nan_treatment == "fill": array[nanmaskarray] = fill_value else: array[nanmaskarray] = 0 nanmaskkernel = np.isnan(kernel) | np.isinf(kernel) kernel[nanmaskkernel] = 0 if normalize_kernel is True: if kernel.sum() < 1.0 / MAX_NORMALIZATION: raise Exception( "The kernel can't be normalized, because its sum is close " "to zero. The sum of the given kernel is < " f"{1.0 / MAX_NORMALIZATION:.2f}. For a zero-sum kernel, set " "normalize_kernel=False or pass a custom normalization " "function to normalize_kernel." ) kernel_scale = kernel.sum() normalized_kernel = kernel / kernel_scale kernel_scale = 1 # if we want to normalize it, leave it normed! elif normalize_kernel: # try this. If a function is not passed, the code will just crash... I # think type checking would be better but PEPs say otherwise... kernel_scale = normalize_kernel(kernel) normalized_kernel = kernel / kernel_scale else: kernel_scale = kernel.sum() if np.abs(kernel_scale) < normalization_zero_tol: if nan_treatment == "interpolate": raise ValueError( "Cannot interpolate NaNs with an unnormalizable kernel" ) else: # the kernel's sum is near-zero, so it can't be scaled kernel_scale = 1 normalized_kernel = kernel else: # the kernel is normalizable; we'll temporarily normalize it # now and undo the normalization later. normalized_kernel = kernel / kernel_scale if boundary is None: warnings.warn( "The convolve_fft version of boundary=None is " "equivalent to the convolve boundary='fill'. There is " "no FFT equivalent to convolve's " "zero-if-kernel-leaves-boundary", AstropyUserWarning, ) if psf_pad is None: psf_pad = True if fft_pad is None: fft_pad = True elif boundary == "fill": # create a boundary region at least as large as the kernel if psf_pad is False: warnings.warn( f"psf_pad was set to {psf_pad}, which overrides the " "boundary='fill' setting.", AstropyUserWarning, ) else: psf_pad = True if fft_pad is None: # default is 'True' according to the docstring fft_pad = True elif boundary == "wrap": if psf_pad: raise ValueError("With boundary='wrap', psf_pad cannot be enabled.") psf_pad = False if fft_pad: raise ValueError("With boundary='wrap', fft_pad cannot be enabled.") fft_pad = False if dealias: raise ValueError("With boundary='wrap', dealias cannot be enabled.") fill_value = 0 # force zero; it should not be used elif boundary == "extend": raise NotImplementedError( "The 'extend' option is not implemented for fft-based convolution" ) # Add shapes elementwise for psf_pad. if psf_pad: # default=False # add the sizes along each dimension (bigger) newshape = np.array(arrayshape) + np.array(kernshape) else: # take the larger shape in each dimension (smaller) newshape = np.maximum(arrayshape, kernshape) if dealias: # Extend shape by 1/2 for dealiasing newshape += np.ceil(newshape / 2).astype(int) # Find ideal size for fft (was power of 2, now any powers of prime factors 2, 3, 5). if fft_pad: # default=True # Get optimized sizes from scipy. newshape = _next_fast_lengths(newshape) # perform a second check after padding array_size_C = ( np.prod(newshape, dtype=np.int64) * np.dtype(complex_dtype).itemsize ) * u.byte if array_size_C > 1 * u.GB and not allow_huge: raise ValueError( f"Size Error: Arrays will be {human_file_size(array_size_C)}. " "Use allow_huge=True to override this exception." ) # For future reference, this can be used to predict "almost exactly" # how much *additional* memory will be used. # size * (array + kernel + kernelfft + arrayfft + # (kernel*array)fft + # optional(weight image + weight_fft + weight_ifft) + # optional(returned_fft)) # total_memory_used_GB = (np.prod(newshape)*np.dtype(complex_dtype).itemsize # * (5 + 3*((interpolate_nan or ) and kernel_is_normalized)) # + (1 + (not return_fft)) * # np.prod(arrayshape)*np.dtype(complex_dtype).itemsize # + np.prod(arrayshape)*np.dtype(bool).itemsize # + np.prod(kernshape)*np.dtype(bool).itemsize) # ) / 1024.**3 # separate each dimension by the padding size... this is to determine the # appropriate slice size to get back to the input dimensions arrayslices = [] kernslices = [] for newdimsize, arraydimsize, kerndimsize in zip(newshape, arrayshape, kernshape): center = newdimsize - (newdimsize + 1) // 2 arrayslices += [ slice(center - arraydimsize // 2, center + (arraydimsize + 1) // 2) ] kernslices += [ slice(center - kerndimsize // 2, center + (kerndimsize + 1) // 2) ] arrayslices = tuple(arrayslices) kernslices = tuple(kernslices) if not np.all(newshape == arrayshape): if np.isfinite(fill_value): bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value else: bigarray = np.zeros(newshape, dtype=complex_dtype) bigarray[arrayslices] = array else: bigarray = array if not np.all(newshape == kernshape): bigkernel = np.zeros(newshape, dtype=complex_dtype) bigkernel[kernslices] = normalized_kernel else: bigkernel = normalized_kernel arrayfft = fftn(bigarray) # need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity kernfft = fftn(np.fft.ifftshift(bigkernel)) fftmult = arrayfft * kernfft interpolate_nan = nan_treatment == "interpolate" if interpolate_nan: if not np.isfinite(fill_value): bigimwt = np.zeros(newshape, dtype=complex_dtype) else: bigimwt = np.ones(newshape, dtype=complex_dtype) bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan wtfft = fftn(bigimwt) # You can only get to this point if kernel_is_normalized wtfftmult = wtfft * kernfft wtsm = ifftn(wtfftmult) # need to re-zero weights outside of the image (if it is padded, we # still don't weight those regions) bigimwt[arrayslices] = wtsm.real[arrayslices] else: bigimwt = 1 if np.isnan(fftmult).any(): # this check should be unnecessary; call it an insanity check raise ValueError("Encountered NaNs in convolve. This is disallowed.") fftmult *= kernel_scale if array_unit is not None: fftmult <<= array_unit if return_fft: return fftmult if interpolate_nan: with np.errstate(divide="ignore", invalid="ignore"): # divide by zeros are expected here; if the weight is zero, we want # the output to be nan or inf rifft = (ifftn(fftmult)) / bigimwt if not np.isscalar(bigimwt): if min_wt > 0.0: rifft[bigimwt < min_wt] = np.nan else: # Set anything with no weight to zero (taking into account # slight offsets due to floating-point errors). rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0 else: rifft = ifftn(fftmult) if preserve_nan: rifft[arrayslices][nanmaskarray] = np.nan return rifft[arrayslices].real if crop else rifft.real
Given a data set containing NaNs, replace the NaNs by interpolating from neighboring data points with a given kernel. Parameters ---------- array : `numpy.ndarray` Array to be convolved with ``kernel``. It can be of any dimensionality, though only 1, 2, and 3d arrays have been tested. kernel : `numpy.ndarray` or `astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array. The dimensions *do not* have to be odd in all directions, unlike in the non-fft `convolve` function. The kernel will be normalized if ``normalize_kernel`` is set. It is assumed to be centered (i.e., shifts may result if your kernel is asymmetric). The kernel *must be normalizable* (i.e., its sum cannot be zero). convolve : `convolve` or `convolve_fft` One of the two convolution functions defined in this package. Returns ------- newarray : `numpy.ndarray` A copy of the original array with NaN pixels replaced with their interpolated counterparts
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs): """ Given a data set containing NaNs, replace the NaNs by interpolating from neighboring data points with a given kernel. Parameters ---------- array : `numpy.ndarray` Array to be convolved with ``kernel``. It can be of any dimensionality, though only 1, 2, and 3d arrays have been tested. kernel : `numpy.ndarray` or `astropy.convolution.Kernel` The convolution kernel. The number of dimensions should match those for the array. The dimensions *do not* have to be odd in all directions, unlike in the non-fft `convolve` function. The kernel will be normalized if ``normalize_kernel`` is set. It is assumed to be centered (i.e., shifts may result if your kernel is asymmetric). The kernel *must be normalizable* (i.e., its sum cannot be zero). convolve : `convolve` or `convolve_fft` One of the two convolution functions defined in this package. Returns ------- newarray : `numpy.ndarray` A copy of the original array with NaN pixels replaced with their interpolated counterparts """ if not np.any(np.isnan(array)): return array.copy() newarray = array.copy() convolved = convolve( array, kernel, nan_treatment="interpolate", normalize_kernel=True, preserve_nan=False, **kwargs, ) isnan = np.isnan(array) newarray[isnan] = convolved[isnan] return newarray
Convolve two models using `~astropy.convolution.convolve_fft`. Parameters ---------- model : `~astropy.modeling.core.Model` Functional model kernel : `~astropy.modeling.core.Model` Convolution kernel mode : str Keyword representing which function to use for convolution. * 'convolve_fft' : use `~astropy.convolution.convolve_fft` function. * 'convolve' : use `~astropy.convolution.convolve`. **kwargs : dict Keyword arguments to me passed either to `~astropy.convolution.convolve` or `~astropy.convolution.convolve_fft` depending on ``mode``. Returns ------- default : `~astropy.modeling.core.CompoundModel` Convolved model
def convolve_models(model, kernel, mode="convolve_fft", **kwargs): """ Convolve two models using `~astropy.convolution.convolve_fft`. Parameters ---------- model : `~astropy.modeling.core.Model` Functional model kernel : `~astropy.modeling.core.Model` Convolution kernel mode : str Keyword representing which function to use for convolution. * 'convolve_fft' : use `~astropy.convolution.convolve_fft` function. * 'convolve' : use `~astropy.convolution.convolve`. **kwargs : dict Keyword arguments to me passed either to `~astropy.convolution.convolve` or `~astropy.convolution.convolve_fft` depending on ``mode``. Returns ------- default : `~astropy.modeling.core.CompoundModel` Convolved model """ if mode == "convolve_fft": operator = SPECIAL_OPERATORS.add( "convolve_fft", partial(convolve_fft, **kwargs) ) elif mode == "convolve": operator = SPECIAL_OPERATORS.add("convolve", partial(convolve, **kwargs)) else: raise ValueError(f"Mode {mode} is not supported.") return CompoundModel(operator, model, kernel)
Convolve two models using `~astropy.convolution.convolve_fft`. Parameters ---------- model : `~astropy.modeling.core.Model` Functional model kernel : `~astropy.modeling.core.Model` Convolution kernel bounding_box : tuple The bounding box which encompasses enough of the support of both the ``model`` and ``kernel`` so that an accurate convolution can be computed. resolution : float The resolution that one wishes to approximate the convolution integral at. cache : optional, bool Default value True. Allow for the storage of the convolution computation for later reuse. **kwargs : dict Keyword arguments to be passed either to `~astropy.convolution.convolve` or `~astropy.convolution.convolve_fft` depending on ``mode``. Returns ------- default : `~astropy.modeling.core.CompoundModel` Convolved model
def convolve_models_fft(model, kernel, bounding_box, resolution, cache=True, **kwargs): """ Convolve two models using `~astropy.convolution.convolve_fft`. Parameters ---------- model : `~astropy.modeling.core.Model` Functional model kernel : `~astropy.modeling.core.Model` Convolution kernel bounding_box : tuple The bounding box which encompasses enough of the support of both the ``model`` and ``kernel`` so that an accurate convolution can be computed. resolution : float The resolution that one wishes to approximate the convolution integral at. cache : optional, bool Default value True. Allow for the storage of the convolution computation for later reuse. **kwargs : dict Keyword arguments to be passed either to `~astropy.convolution.convolve` or `~astropy.convolution.convolve_fft` depending on ``mode``. Returns ------- default : `~astropy.modeling.core.CompoundModel` Convolved model """ operator = SPECIAL_OPERATORS.add("convolve_fft", partial(convolve_fft, **kwargs)) return Convolution(operator, model, kernel, bounding_box, resolution, cache)
Add, subtract or multiply two kernels. Parameters ---------- kernel : `astropy.convolution.Kernel` Kernel instance. value : `astropy.convolution.Kernel`, float, or int Value to operate with. operation : {'add', 'sub', 'mul'} One of the following operations: * 'add' Add two kernels * 'sub' Subtract two kernels * 'mul' Multiply kernel with number or convolve two kernels.
def kernel_arithmetics(kernel, value, operation): """ Add, subtract or multiply two kernels. Parameters ---------- kernel : `astropy.convolution.Kernel` Kernel instance. value : `astropy.convolution.Kernel`, float, or int Value to operate with. operation : {'add', 'sub', 'mul'} One of the following operations: * 'add' Add two kernels * 'sub' Subtract two kernels * 'mul' Multiply kernel with number or convolve two kernels. """ # 1D kernels if isinstance(kernel, Kernel1D) and isinstance(value, Kernel1D): if operation == "add": new_array = add_kernel_arrays_1D(kernel.array, value.array) elif operation == "sub": new_array = add_kernel_arrays_1D(kernel.array, -value.array) elif operation == "mul": raise KernelArithmeticError( "Kernel operation not supported. Maybe you want " "to use convolve(kernel1, kernel2) instead." ) new_kernel = Kernel1D(array=new_array) new_kernel._separable = kernel._separable and value._separable new_kernel._is_bool = kernel._is_bool or value._is_bool # 2D kernels elif isinstance(kernel, Kernel2D) and isinstance(value, Kernel2D): if operation == "add": new_array = add_kernel_arrays_2D(kernel.array, value.array) elif operation == "sub": new_array = add_kernel_arrays_2D(kernel.array, -value.array) elif operation == "mul": raise KernelArithmeticError( "Kernel operation not supported. Maybe you want " "to use convolve(kernel1, kernel2) instead." ) new_kernel = Kernel2D(array=new_array) new_kernel._separable = kernel._separable and value._separable new_kernel._is_bool = kernel._is_bool or value._is_bool # kernel and number elif isinstance(kernel, (Kernel1D, Kernel2D)) and np.isscalar(value): if operation != "mul": raise KernelArithmeticError("Kernel operation not supported.") new_kernel = copy.copy(kernel) new_kernel._array *= value else: raise KernelArithmeticError("Kernel operation not supported.") return new_kernel
Add two 1D kernel arrays of different size. The arrays are added with the centers lying upon each other.
def add_kernel_arrays_1D(array_1, array_2): """ Add two 1D kernel arrays of different size. The arrays are added with the centers lying upon each other. """ if array_1.size > array_2.size: new_array = array_1.copy() center = array_1.size // 2 slice_ = slice(center - array_2.size // 2, center + array_2.size // 2 + 1) new_array[slice_] += array_2 return new_array if array_2.size > array_1.size: new_array = array_2.copy() center = array_2.size // 2 slice_ = slice(center - array_1.size // 2, center + array_1.size // 2 + 1) new_array[slice_] += array_1 return new_array return array_2 + array_1
Add two 2D kernel arrays of different size. The arrays are added with the centers lying upon each other.
def add_kernel_arrays_2D(array_1, array_2): """ Add two 2D kernel arrays of different size. The arrays are added with the centers lying upon each other. """ if array_1.size > array_2.size: new_array = array_1.copy() center = [axes_size // 2 for axes_size in array_1.shape] slice_x = slice( center[1] - array_2.shape[1] // 2, center[1] + array_2.shape[1] // 2 + 1 ) slice_y = slice( center[0] - array_2.shape[0] // 2, center[0] + array_2.shape[0] // 2 + 1 ) new_array[slice_y, slice_x] += array_2 return new_array if array_2.size > array_1.size: new_array = array_2.copy() center = [axes_size // 2 for axes_size in array_2.shape] slice_x = slice( center[1] - array_1.shape[1] // 2, center[1] + array_1.shape[1] // 2 + 1 ) slice_y = slice( center[0] - array_1.shape[0] // 2, center[0] + array_1.shape[0] // 2 + 1 ) new_array[slice_y, slice_x] += array_1 return new_array return array_2 + array_1
Evaluate an analytical model function on a pixel grid. Parameters ---------- model : `~astropy.modeling.Model` or callable. Analytical model function to be discretized. A callable that is not a `~astropy.modeling.Model` instance is converted to a model using `~astropy.modeling.custom_model`. x_range : 2-tuple Lower and upper bounds of x pixel values at which the model is evaluated. The upper bound is non-inclusive. A ``x_range`` of ``(0, 3)`` means the model will be evaluated at x pixels 0, 1, and 2. The difference between the upper and lower bound must be a whole number so that the output array size is well defined. y_range : 2-tuple or `None`, optional Lower and upper bounds of y pixel values at which the model is evaluated. The upper bound is non-inclusive. A ``y_range`` of ``(0, 3)`` means the model will be evaluated at y pixels of 0, 1, and 2. The difference between the upper and lower bound must be a whole number so that the output array size is well defined. ``y_range`` is necessary only for 2D models. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following modes: * ``'center'`` (default) Discretize model by taking the value at the center of the pixel bins. * ``'linear_interp'`` Discretize model by linearly interpolating between the values at the edges (1D) or corners (2D) of the pixel bins. For 2D models, the interpolation is bilinear. * ``'oversample'`` Discretize model by taking the average of model values in the pixel bins on an oversampled grid. Use the ``factor`` keyword to set the integer oversampling factor. * ``'integrate'`` Discretize model by integrating the model over the pixel bins using `scipy.integrate.quad`. This mode conserves the model integral on a subpixel scale, but is very slow. factor : int, optional The integer oversampling factor used when ``mode='oversample'``. Ignored otherwise. Returns ------- array : `numpy.ndarray` The discretized model array. Examples -------- In this example, we define a `~astropy.modeling.functional_models.Gaussian1D` model that has been normalized so that it sums to 1.0. We then discretize this model using the ``'center'``, ``'linear_interp'``, and ``'oversample'`` (with ``factor=10``) modes. .. plot:: :show-source-link: import matplotlib.pyplot as plt import numpy as np from astropy.convolution.utils import discretize_model from astropy.modeling.models import Gaussian1D gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5) x_range = (-2, 3) x = np.arange(*x_range) y_center = discretize_model(gauss_1D, x_range, mode='center') y_edge = discretize_model(gauss_1D, x_range, mode='linear_interp') y_oversample = discretize_model(gauss_1D, x_range, mode='oversample') fig, ax = plt.subplots(figsize=(8, 6)) label = f'center (sum={y_center.sum():.3f})' ax.plot(x, y_center, '.-', label=label) label = f'linear_interp (sum={y_edge.sum():.3f})' ax.plot(x, y_edge, '.-', label=label) label = f'oversample (sum={y_oversample.sum():.3f})' ax.plot(x, y_oversample, '.-', label=label) ax.set_xlabel('x') ax.set_ylabel('Value') plt.legend()
def discretize_model(model, x_range, y_range=None, mode="center", factor=10): """ Evaluate an analytical model function on a pixel grid. Parameters ---------- model : `~astropy.modeling.Model` or callable. Analytical model function to be discretized. A callable that is not a `~astropy.modeling.Model` instance is converted to a model using `~astropy.modeling.custom_model`. x_range : 2-tuple Lower and upper bounds of x pixel values at which the model is evaluated. The upper bound is non-inclusive. A ``x_range`` of ``(0, 3)`` means the model will be evaluated at x pixels 0, 1, and 2. The difference between the upper and lower bound must be a whole number so that the output array size is well defined. y_range : 2-tuple or `None`, optional Lower and upper bounds of y pixel values at which the model is evaluated. The upper bound is non-inclusive. A ``y_range`` of ``(0, 3)`` means the model will be evaluated at y pixels of 0, 1, and 2. The difference between the upper and lower bound must be a whole number so that the output array size is well defined. ``y_range`` is necessary only for 2D models. mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional One of the following modes: * ``'center'`` (default) Discretize model by taking the value at the center of the pixel bins. * ``'linear_interp'`` Discretize model by linearly interpolating between the values at the edges (1D) or corners (2D) of the pixel bins. For 2D models, the interpolation is bilinear. * ``'oversample'`` Discretize model by taking the average of model values in the pixel bins on an oversampled grid. Use the ``factor`` keyword to set the integer oversampling factor. * ``'integrate'`` Discretize model by integrating the model over the pixel bins using `scipy.integrate.quad`. This mode conserves the model integral on a subpixel scale, but is very slow. factor : int, optional The integer oversampling factor used when ``mode='oversample'``. Ignored otherwise. Returns ------- array : `numpy.ndarray` The discretized model array. Examples -------- In this example, we define a `~astropy.modeling.functional_models.Gaussian1D` model that has been normalized so that it sums to 1.0. We then discretize this model using the ``'center'``, ``'linear_interp'``, and ``'oversample'`` (with ``factor=10``) modes. .. plot:: :show-source-link: import matplotlib.pyplot as plt import numpy as np from astropy.convolution.utils import discretize_model from astropy.modeling.models import Gaussian1D gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5) x_range = (-2, 3) x = np.arange(*x_range) y_center = discretize_model(gauss_1D, x_range, mode='center') y_edge = discretize_model(gauss_1D, x_range, mode='linear_interp') y_oversample = discretize_model(gauss_1D, x_range, mode='oversample') fig, ax = plt.subplots(figsize=(8, 6)) label = f'center (sum={y_center.sum():.3f})' ax.plot(x, y_center, '.-', label=label) label = f'linear_interp (sum={y_edge.sum():.3f})' ax.plot(x, y_edge, '.-', label=label) label = f'oversample (sum={y_oversample.sum():.3f})' ax.plot(x, y_oversample, '.-', label=label) ax.set_xlabel('x') ax.set_ylabel('Value') plt.legend() """ if not callable(model): raise TypeError("Model must be callable.") if not isinstance(model, Model): model = custom_model(model)() ndim = model.n_inputs if ndim > 2: raise ValueError("discretize_model supports only 1D and 2D models.") dxrange = np.diff(x_range)[0] if dxrange != int(dxrange): raise ValueError( "The difference between the upper and lower limit of" " 'x_range' must be a whole number." ) if y_range: dyrange = np.diff(y_range)[0] if dyrange != int(dyrange): raise ValueError( "The difference between the upper and lower limit of" " 'y_range' must be a whole number." ) if factor != int(factor): raise ValueError("factor must have an integer value") factor = int(factor) if ndim == 2 and y_range is None: raise ValueError("y_range must be specified for a 2D model") if ndim == 1 and y_range is not None: raise ValueError("y_range should not be input for a 1D model") if mode == "center": if ndim == 1: return discretize_center_1D(model, x_range) if ndim == 2: return discretize_center_2D(model, x_range, y_range) elif mode == "linear_interp": if ndim == 1: return discretize_linear_1D(model, x_range) if ndim == 2: return discretize_bilinear_2D(model, x_range, y_range) elif mode == "oversample": if ndim == 1: return discretize_oversample_1D(model, x_range, factor) if ndim == 2: return discretize_oversample_2D(model, x_range, y_range, factor) elif mode == "integrate": if ndim == 1: return discretize_integrate_1D(model, x_range) if ndim == 2: return discretize_integrate_2D(model, x_range, y_range) else: raise ValueError("Invalid mode for discretize_model.")
Discretize model by taking the value at the center of the bin.
def discretize_center_1D(model, x_range): """ Discretize model by taking the value at the center of the bin. """ x = np.arange(*x_range) return model(x)
Discretize model by taking the value at the center of the pixel.
def discretize_center_2D(model, x_range, y_range): """ Discretize model by taking the value at the center of the pixel. """ x = np.arange(*x_range) y = np.arange(*y_range) x, y = np.meshgrid(x, y) return model(x, y)
Discretize model by performing a linear interpolation.
def discretize_linear_1D(model, x_range): """ Discretize model by performing a linear interpolation. """ # Evaluate model 0.5 pixel outside the boundaries x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) values_intermediate_grid = model(x) return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1])
Discretize model by performing a bilinear interpolation.
def discretize_bilinear_2D(model, x_range, y_range): """ Discretize model by performing a bilinear interpolation. """ # Evaluate model 0.5 pixel outside the boundaries x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) x, y = np.meshgrid(x, y) values_intermediate_grid = model(x, y) # Mean in y direction values = 0.5 * (values_intermediate_grid[1:, :] + values_intermediate_grid[:-1, :]) # Mean in x direction return 0.5 * (values[:, 1:] + values[:, :-1])
Discretize model by taking the average on an oversampled grid.
def discretize_oversample_1D(model, x_range, factor=10): """ Discretize model by taking the average on an oversampled grid. """ # Evaluate model on oversampled grid x = np.linspace( x_range[0] - 0.5 * (1 - 1 / factor), x_range[1] - 0.5 * (1 + 1 / factor), num=int((x_range[1] - x_range[0]) * factor), ) values = model(x) # Reshape and compute mean values = np.reshape(values, (x.size // factor, factor)) return values.mean(axis=1)
Discretize model by taking the average on an oversampled grid.
def discretize_oversample_2D(model, x_range, y_range, factor=10): """ Discretize model by taking the average on an oversampled grid. """ # Evaluate model on oversampled grid x = np.linspace( x_range[0] - 0.5 * (1 - 1 / factor), x_range[1] - 0.5 * (1 + 1 / factor), num=int((x_range[1] - x_range[0]) * factor), ) y = np.linspace( y_range[0] - 0.5 * (1 - 1 / factor), y_range[1] - 0.5 * (1 + 1 / factor), num=int((y_range[1] - y_range[0]) * factor), ) x_grid, y_grid = np.meshgrid(x, y) values = model(x_grid, y_grid) # Reshape and compute mean shape = (y.size // factor, factor, x.size // factor, factor) values = np.reshape(values, shape) return values.mean(axis=3).mean(axis=1)
Discretize model by integrating numerically the model over the bin.
def discretize_integrate_1D(model, x_range): """ Discretize model by integrating numerically the model over the bin. """ from scipy.integrate import quad # Set up grid x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) values = np.array([]) # Integrate over all bins for i in range(x.size - 1): values = np.append(values, quad(model, x[i], x[i + 1])[0]) return values
Discretize model by integrating the model over the pixel.
def discretize_integrate_2D(model, x_range, y_range): """ Discretize model by integrating the model over the pixel. """ from scipy.integrate import dblquad # Set up grid x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5) y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5) values = np.empty((y.size - 1, x.size - 1)) # Integrate over all pixels for i in range(x.size - 1): for j in range(y.size - 1): values[j, i] = dblquad( func=lambda y, x: model(x, y), a=x[i], b=x[i + 1], gfun=lambda x: y[j], hfun=lambda x: y[j + 1], )[0] return values