python_code
stringlengths 0
66.4k
|
---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
from aepsych.config import Config
from aepsych.generators import ManualGenerator
class TestManualGenerator(unittest.TestCase):
def test_batchmanual(self):
points = np.random.rand(10, 3)
mod = ManualGenerator(
lb=[0, 0, 0], ub=[1, 1, 1], dim=3, points=points, shuffle=False
)
npt.assert_allclose(points, mod.points) # make sure they weren't shuffled
acq1 = mod.gen(num_points=2)
self.assertEqual(acq1.shape, (2, 3))
acq2 = mod.gen(num_points=3)
self.assertEqual(acq2.shape, (3, 3))
acq3 = mod.gen()
self.assertEqual(acq3.shape, (1, 3))
with self.assertWarns(RuntimeWarning):
acq4 = mod.gen(num_points=10)
self.assertEqual(acq4.shape, (4, 3))
def test_manual_generator(self):
points = [[0, 0], [0, 1], [1, 0], [1, 1]]
config_str = f"""
[common]
lb = [0, 0]
ub = [1, 1]
parnames = [par1, par2]
[ManualGenerator]
points = {points}
"""
config = Config()
config.update(config_str=config_str)
gen = ManualGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array([0, 0]))
npt.assert_equal(gen.ub.numpy(), np.array([1, 1]))
self.assertFalse(gen.finished)
p1 = list(gen.gen()[0])
p2 = list(gen.gen()[0])
p3 = list(gen.gen()[0])
p4 = list(gen.gen()[0])
self.assertEqual(sorted([p1, p2, p3, p4]), points)
self.assertTrue(gen.finished)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
# -- Path setup --------------------------------------------------------------
import os
import sys
# from pkg_resources import get_distribution
# sys.path.insert(0, os.path.abspath("../../"))
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../../"))
sys.path.insert(0, target_dir)
# base_path = os.path.abspath(os.path.join(__file__, "..", "..", "..", "aepsych"))
# print(sys.path, base_path, "======")
# sys.path.append(base_path)
# -- Project information -----------------------------------------------------
project = "AEPsych"
# copyright = "Meta, Inc."
author = "Meta, Inc."
# get version string
# version = get_distribution("aepsych").version
version = ""
release = ""
# -- General configuration ---------------------------------------------------
# Sphinx extension modules
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames
# source_suffix = [".rst", ".md"]
source_suffix = ".rst"
# The main toctree document.
index_doc = "index"
# The language for content autogenerated by Sphinx.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Default options for autodoc directives. Applied to all autodoc directives
autodoc_default_options = {
"undoc-members": True,
"show-inheritance": True,
"member-order": "bysource",
}
# show type hints in the method description
autodoc_typehints = "description"
# Inlcude init docstrings into body of autoclass directives
autoclass_content = "both"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
html_static_path = [] # for now we have no static files to track
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "aepsychdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(index_doc, "aepsych.tex", "AEPsych Documentation", "Meta, Inc.", "manual")
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(index_doc, "aepsych", "aepsych Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
index_doc,
"aepsych",
"AEPsych Documentation",
author,
"AEPsych",
"AEPsych",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
autodoc_mock_imports = ["botorch"]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import numpy as np
constants = {
"savefolder": "./databases/",
"timestamp": datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
"config_path": "./aepsych_config.ini",
"seed": 1,
}
# base parameters in case we don't want AEPsych to manage all 8.
base_params = {
"spatial_frequency": 2,
"orientation": 0,
"pedestal": 0.5,
"contrast": 0.75,
"temporal_frequency": 0,
"size": 10,
"angle_dist": 0,
"eccentricity": 0,
}
psychopy_vars = {
"setSizePix": [1680, 1050],
"setWidth": 47.475,
"setDistance": 57,
"pre_duration_s": 0.0,
"stim_duration_s": 5.0,
"post_duration_s": 1,
"response_wait": 2,
"iti": 0,
}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import experiment_config
import numpy as np
import torch
from aepsych_client import AEPsychClient
from helpers import HalfGrating
from psychopy import core, data, event, gui, monitors, visual
from psychopy.tools.filetools import toFile
def run_experiment():
seed = experiment_config.constants["seed"]
config_path = experiment_config.constants["config_path"]
torch.manual_seed(seed)
np.random.seed(seed)
expInfo = {"observer": "default_observer"}
expInfo["dateStr"] = data.getDateStr() # add the current time
# present a dialogue to change params
dlg = gui.DlgFromDict(expInfo, title="multi-D JND Exp", fixed=["dateStr"])
if dlg.OK:
toFile("lastParams.pickle", expInfo) # save params to file for next time
else:
core.quit() # the user hit cancel so exit
screen = monitors.Monitor("testMonitor", gamma=1)
screen.setSizePix(experiment_config.psychopy_vars["setSizePix"])
screen.setWidth(experiment_config.psychopy_vars["setWidth"])
screen.setDistance(experiment_config.psychopy_vars["setDistance"])
win = visual.Window(
allowGUI=True,
units="deg",
monitor=screen,
bpc=(8, 8, 8),
size=experiment_config.psychopy_vars["setSizePix"],
fullscr=False,
)
screen_text_g = visual.TextStim(win, text=None, alignHoriz="center", color="green")
screen_text_r = visual.TextStim(win, text=None, alignHoriz="center", color="red")
screen_text = visual.TextStim(win, text=None, alignHoriz="center", color="gray")
# display instructions and wait
message2 = visual.TextStim(
win,
pos=[0, +3],
text="Hit the space bar key when ready and "
"to advance to the next trial after you see a red cross.",
)
message1 = visual.TextStim(
win,
pos=[0, -3],
text="You'll see a stimulus. One side will have a grating and the other will be noise."
" "
"Press left or right corresponding to the side with noise. If you don't know, please guess.",
)
message1.draw()
message2.draw()
win.flip() # to show our newly drawn 'stimuli'
# pause until there's a keypress
event.waitKeys()
# start the trial: draw grating
clock = core.Clock()
screen_text_r.setText("+")
screen_text_r.draw(win=win)
win.flip()
aepsych_client = AEPsychClient()
aepsych_client.configure(config_path=config_path)
# create stimulus
stim = HalfGrating(**experiment_config.base_params, win=win)
i = 0
is_finished = False
while not is_finished:
ask_response = aepsych_client.ask()
trial_params = ask_response["config"]
is_finished = ask_response["is_finished"]
stim.update(trial_params)
print(trial_params)
bg_color = np.array([stim.pedestal_psychopy_scale] * 3)
win.setColor(bg_color)
win.color = bg_color
win.flip()
screen_text_r.setText("+")
screen_text_r.draw(win=win)
win.flip()
core.wait(experiment_config.psychopy_vars["iti"])
fixation_keys = []
while not fixation_keys:
fixation_keys = event.getKeys(keyList=["space"])
fixation_keys = ["space"] ## for debugging
if "space" in fixation_keys:
screen_text.setText("+")
screen_text.draw(win=win)
win.flip()
noisy_half = "left" if np.random.randint(2) == 0 else "right"
clock.reset()
keys = stim.draw(
noisy_half=noisy_half,
win=win,
pre_duration_s=experiment_config.psychopy_vars["pre_duration_s"],
stim_duration_s=experiment_config.psychopy_vars["stim_duration_s"],
)
# keys = event.waitKeys(keyList=["left", "right"]) # phil took out max wait
rt = clock.getTime()
response = noisy_half in keys
print(f"keys:{keys}, ca:{noisy_half}, acc:{response}, rt:{rt}")
win.flip()
if response:
screen_text_g.setText("Correct")
screen_text_g.draw()
win.flip()
else:
screen_text_r.setText("Incorrect")
screen_text_r.draw()
win.flip()
# inform bayesopt of the response, needed to calculate next contrast
aepsych_client.tell(config=trial_params, outcome=response, rt=rt)
# core.wait(experiment_config.psychopy_vars["post_duration_s"])
event.clearEvents()
print(f"trial {i}")
i = i + 1
win.close()
aepsych_client.finalize()
core.quit()
if __name__ == "__main__":
run_experiment()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pyglet
from psychopy import core, event
from psychopy.visual import Window
from psychopy.visual.image import ImageStim
pyglet.options["debug_gl"] = False
GL = pyglet.gl
def polar_to_cartesian(r, theta):
z = r * np.exp(1j * np.radians(theta))
return z.real, z.imag
def cartesian_to_polar(x, y):
z = x + 1j * y
return (np.abs(z), np.angle(z, deg=True))
class AnimatedGrating:
param_transforms = {"contrast": lambda x: 10 ** x, "pedestal": lambda x: 10 ** x}
def __init__(
self,
spatial_frequency: float,
orientation: float,
pedestal: float,
contrast: float,
temporal_frequency: float,
eccentricity: float,
size: float,
angle_dist: float,
win: Window,
cpd=60, # display cycles per degree
Lmin=0, # min luminance in nits
Lmax=255, # max luminance in nits
res=256, # texture resolution
noisy=False,
*args,
**kw,
):
"""Generate animated Gabor grating
Args:
spatial_frequency (float): Spatial frequency.
orientation (float): Orientation (degrees)
pedestal (float): Background luminance.
contrast (float): Stimulus contrast.
temporal_frequency (float): Temporal frequency (seconds).
eccentricity (float): Stimulus eccentricity relative to center (degrees).
size (float): Stimulus size.
angle_dist (float): Stimulus angle relative to center.
win (Window): Window to render to.
cpd (int, optional): Display cycles per degree. Defaults to 60.
"""
self.spatial_frequency = spatial_frequency
self.temporal_frequency = temporal_frequency
self.orientation = orientation
self.pedestal = pedestal
self.contrast = contrast
self.settable_params = (
"spatial_frequency",
"temporal_frequency",
"orientation",
"pedestal",
"contrast",
"size",
"eccentricity",
"angle_dist",
)
self.cpd = cpd
self.Lmin = Lmin
self.Lmax = Lmax
self.res = res
self.noisy = noisy
self.initial_phase = np.random.uniform(low=0, high=0.2, size=(1))
img = np.zeros((self.res, self.res))
self.win = win
self._stim = ImageStim(image=img, mask="gauss", win=win, *args, **kw)
# these get set on _stim
self.size = size
self.eccentricity = eccentricity
self.angle_dist = angle_dist
def update(self, trial_config):
for k, v in trial_config.items():
if k in self.settable_params:
if k in self.param_transforms:
setattr(self, k, self.param_transforms[k](v[0]))
else:
setattr(self, k, v[0])
@property
def size(self):
return self._stim.size
@size.setter
def size(self, x):
self._stim.size = x
@property
def eccentricity(self):
return cartesian_to_polar(*self._stim.pos)[0]
@eccentricity.setter
def eccentricity(self, x):
current_coords = cartesian_to_polar(*self._stim.pos)
self._stim.pos = polar_to_cartesian(x, current_coords[1])
@property
def angle_dist(self):
return cartesian_to_polar(*self._stim.pos)[1]
@angle_dist.setter
def angle_dist(self, deg):
current_coords = cartesian_to_polar(*self._stim.pos)
self._stim.pos = polar_to_cartesian(current_coords[0], deg + 90)
@property
def pedestal_psychopy_scale(self):
return self.pedestal * 2 - 1
def draw(
self,
noisy=False,
win=None,
pre_duration_s=0.1,
stim_duration_s=5.0,
*args,
**kwargs,
):
win = win or self.win
clock = core.Clock()
clock.reset()
self._stim.image = self.get_texture(self.initial_phase, noisy=noisy)
while clock.getTime() < pre_duration_s:
win.flip()
start_time = clock.getTime()
while clock.getTime() < pre_duration_s + stim_duration_s:
if self.temporal_frequency > 0:
newphase = (clock.getTime() - start_time) * self.temporal_frequency
self._stim.image = self.get_texture(
newphase + self.initial_phase, noisy=noisy
)
self._stim.draw()
def get_texture(self, phase=0, noisy=False):
pedestal_lum = self.pedestal * (self.Lmax - self.Lmin) + self.Lmin
grating_max = (self.contrast * (2 * pedestal_lum + self.Lmin) + self.Lmin) / 2
x = np.arange(0, self.res) / self.cpd + phase
y = np.arange(0, self.res) / self.cpd + phase
x_grid, y_grid = np.meshgrid(x, y)
wave = x_grid * np.cos(np.radians(self.orientation)) + y_grid * np.sin(
np.radians(self.orientation)
)
scaled_imag_wave = 1j * 2 * np.pi * self.spatial_frequency * wave
img = grating_max * np.real(np.exp(scaled_imag_wave)) + pedestal_lum
# convert from luminance to values in [-1, 1] as psychopy wants
img = img / ((self.Lmax - self.Lmin) / 2) - 1
if noisy:
flatimg = img.flatten()
np.random.shuffle(flatimg)
img = flatimg.reshape(self.res, self.res)
return img
class HalfGrating(AnimatedGrating):
"""Gabor animated grating, half of which is scrambled into white noise."""
def noisify_half_texture(self, img, noisy_half):
img = img.T # transpose so our indexing tricks work
flatimg = img.flatten()
if noisy_half == "left":
noisy = flatimg[: (self.res ** 2) // 2]
np.random.shuffle(noisy)
img = np.r_[noisy, flatimg[(self.res ** 2) // 2 :]].reshape(
self.res, self.res
)
else:
noisy = flatimg[(self.res ** 2) // 2 :]
np.random.shuffle(noisy)
img = np.r_[flatimg[: (self.res ** 2) // 2], noisy].reshape(
self.res, self.res
)
return img.T # untranspose
def get_texture(self, phase, noisy_half):
img = super().get_texture(phase, noisy=False)
img = self.noisify_half_texture(img, noisy_half)
return img
def draw(
self,
noisy_half="left",
win=None,
pre_duration_s=0.1,
stim_duration_s=5.0,
*args,
**kwargs,
):
win = win or self.win
clock = core.Clock()
clock.reset()
event.clearEvents()
self._stim.image = self.get_texture(self.initial_phase, noisy_half=noisy_half)
while clock.getTime() < pre_duration_s:
win.flip()
start_time = clock.getTime()
while True:
if self.temporal_frequency > 0:
newphase = (clock.getTime() - start_time) * self.temporal_frequency
self._stim.image = self.get_texture(
newphase + self.initial_phase, noisy_half=noisy_half
)
self._stim.draw()
keys = event.getKeys(keyList=["left", "right"])
win.flip()
if len(keys) > 0:
return keys
return keys
class ExperimentAborted(Exception):
pass
class QuitHelper:
"""Helper to quit the experiment by pressing a key twice within 500ms.
It quits by simply raising 'ExperimentAborted'. This is necessary because
from the separate thread that psychopy checks its global key events in, you
cannot raise an Exception in the main thread.
"""
def __init__(self):
self.quit_requested = False
self.debounce_timestamp = None
def request_quit(self):
"""Must be called twice in 500ms to set a flag that causes ExperimentAborted
to be raised when quit_if_requested is called. This indirection is needed if request_quit
is called from a separate thread (as with psychopy global event keys)
"""
tprev = self.debounce_timestamp
tnow = core.getTime()
if tprev is not None and tnow - tprev < 0.5:
self.quit_requested = True
self.debounce_timestamp = tnow
def quit_if_requested(self):
"""Raises ExperimentAborted if request_quit has been called twice in 500ms"""
if self.quit_requested:
raise ExperimentAborted
return True
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# we have pretty verbose messaging by default, suppress that here
import logging
import warnings
warnings.filterwarnings("ignore")
logging.disable(logging.WARNING) # disable anything below warning
import os
import time
from copy import copy
from itertools import product
from aepsych.benchmark import (
Problem,
LSEProblem,
BenchmarkLogger,
PathosBenchmark,
combine_benchmarks,
)
from aepsych.benchmark.test_functions import (
make_songetal_testfun,
novel_detection_testfun,
novel_discrimination_testfun,
)
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["MKL_THREADING_LAYER"] = "GNU"
nproc = 94
n_reps = 100
sobol_trials = 5
total_trials = 150
global_seed = 3
log_every = 5
# test functions and boundaries
novel_names = ["novel_detection", "novel_discrimination"]
novel_testfuns = [novel_detection_testfun, novel_discrimination_testfun]
novel_bounds = [{"lb": [-1, -1], "ub": [1, 1]}, {"lb": [-1, -1], "ub": [1, 1]}]
song_phenotypes = ["Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"]
song_betavals = [0.2, 0.5, 1, 2, 5, 10]
song_testfuns = [
make_songetal_testfun(p, b) for p, b in product(song_phenotypes, song_betavals)
]
song_bounds = [{"lb": [-3, -20], "ub": [4, 120]}] * len(song_testfuns)
song_names = [f"song_p{p}_b{b}" for p, b in product(song_phenotypes, song_betavals)]
all_testfuns = song_testfuns + novel_testfuns
all_bounds = song_bounds + novel_bounds
all_names = song_names + novel_names
combo_logger = BenchmarkLogger(log_every=log_every)
# benchmark configs, have to subdivide into 5
# configs Sobol, MCLSETS, and Song vs ours get set up all differently
# Song benches
bench_config_nonsobol_song = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": [
"MCLevelSetEstimation",
"BernoulliMCMutualInformation",
"MCPosteriorVariance",
],
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
"objective": "ProbitObjective",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
bench_config_sobol_song = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MCLevelSetEstimation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
"objective": "ProbitObjective",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": list(range(sobol_trials, total_trials - 1, log_every)),
},
"ModelWrapperStrategy": {
"n_trials": [1],
"refit_every": 1,
},
}
# non-Song benches
bench_config_sobol_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicGPLSETS",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicGPLSETS": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": list(range(sobol_trials, total_trials - 1, log_every)),
},
"ModelWrapperStrategy": {
"n_trials": [1],
"refit_every": 1,
},
}
bench_config_all_but_gplsets_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": [
"MonotonicMCLSE",
"MonotonicBernoulliMCMutualInformation",
"MonotonicMCPosteriorVariance",
],
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicBernoulliMCMutualInformation": {},
"MonotonicMCPosteriorVariance": {},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
bench_config_gplsets_rbf = {
"common": {"outcome_type": "single_probit", "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicGPLSETS",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicGPLSETS": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [total_trials - sobol_trials],
"refit_every": 1,
},
}
all_bench_configs = [
bench_config_sobol_song,
bench_config_nonsobol_song,
bench_config_sobol_rbf,
bench_config_all_but_gplsets_rbf,
bench_config_gplsets_rbf,
]
def make_problemobj(testfun, lb, ub):
# This constructs a Problem from a
# test function and bounds
class Inner(LSEProblem, Problem):
def f(self, x):
return testfun(x)
obj = Inner(lb=lb, ub=ub)
return obj
def make_bench(testfun, logger, name, configs, lb, ub):
# make a bench object from test function config
# and bench config
benches = []
problem = make_problemobj(testfun, lb, ub)
for config in configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
full_config["common"]["name"] = name
benches.append(
PathosBenchmark(
nproc=nproc,
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=n_reps,
)
)
return combine_benchmarks(*benches)
def aggregate_bench_results(all_benchmarks):
combo_logger = BenchmarkLogger(log_every=log_every)
for bench in all_benchmarks:
combo_logger._log.extend(bench.logger._log)
out_pd = combo_logger.pandas()
return out_pd
if __name__ == "__main__":
# one benchmark per test function
print("Creating benchmark objects...")
all_benchmarks = [
make_bench(testfun, combo_logger, name, all_bench_configs, **bounds)
for (testfun, bounds, name) in zip(all_testfuns, all_bounds, all_names)
]
# start all the benchmarks
print("Starting benchmarks...")
for bench in all_benchmarks:
bench_name = bench.combinations[0]["common"]["name"]
print(f"starting {bench_name}...")
bench.start_benchmarks()
done = False
# checkpoint every minute in case something breaks
while not done:
time.sleep(60)
print("Checkpointing benches...")
done = True
for bench in all_benchmarks:
bench_name = bench.combinations[0]["common"]["name"]
bench.collate_benchmarks(wait=False)
if bench.is_done:
print(f"bench {bench_name} is done!")
else:
done = False
temp_results = aggregate_bench_results(all_benchmarks)
temp_results.to_csv(f"bench_checkpoint_seed{global_seed}.csv")
print("Done with all benchmarks, saving!")
final_results = aggregate_bench_results(all_benchmarks)
final_results.to_csv(f"bench_final_seed{global_seed}.csv")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from copy import copy
import matplotlib.pyplot as plt
import numpy as np
import torch
from aepsych.benchmark import (
Problem,
LSEProblem,
BenchmarkLogger,
Benchmark,
combine_benchmarks,
)
from aepsych.benchmark.test_functions import (
make_songetal_testfun,
novel_detection_testfun,
novel_discrimination_testfun,
)
from aepsych.config import Config
from aepsych.plotting import plot_strat
from aepsych.strategy import SequentialStrategy
from scipy.stats import norm
global_seed = 3
refit_every = 1
figdir = "./figs/"
def plot_audiometric_lse_grids(
sobol_trials, opt_trials, phenotype="Metabolic+Sensory", beta=2
):
"""
Generates Fig. 8
"""
logger = BenchmarkLogger(log_every=5)
bench_rbf = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
bench_song = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "BernoulliMCMutualInformation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
all_bench_configs = [bench_rbf, bench_song]
testfun = make_songetal_testfun(phenotype=phenotype, beta=beta)
class AudiometricProblem(LSEProblem, Problem):
def f(self, x):
return testfun(x)
lb = [-3, -20]
ub = [4, 120]
benches = []
problem = AudiometricProblem(lb, ub)
for config in all_bench_configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
benches.append(
Benchmark(
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=1,
)
)
combo_bench = combine_benchmarks(*benches)
strats = []
for config in combo_bench.combinations:
strat = combo_bench.run_experiment(config, logger, seed=global_seed, rep=0)
strats.append(strat)
titles = [
"Monotonic RBF Model, LSE (ours)",
"Nonmonotonic RBF Model, LSE (ours)",
"Linear-Additive Model, BALD",
]
fig, axes = plt.subplots(2, 2, figsize=(7.5, 6.5))
plotting_axes = [axes[1, 0], axes[0, 1], axes[0, 0]]
fig.delaxes(axes[1, 1])
_ = [
plot_strat(
strat=strat_,
title=title_,
ax=ax_,
true_testfun=testfun,
xlabel="Frequency (kHz)",
ylabel="Intensity (dB HL)",
flipx=True,
logx=True,
show=False,
include_legend=False,
include_colorbar=False
)
for ax_, strat_, title_ in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = axes[1, 0].get_legend_handles_labels()
fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(0.8, 0.2))
cbr = fig.colorbar(axes[1, 0].images[0], ax=plotting_axes)
cbr.set_label("Probability of Detection")
return fig
def plot_novel_lse_grids(sobol_trials, opt_trials, funtype="detection"):
"""
Generates Fig. TBA
"""
logger = BenchmarkLogger(log_every=opt_trials) # we only care about final perf
bench_rbf = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "MonotonicMCLSE",
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[context,intensity]",
},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": [
"monotonic_mean_covar_factory",
],
"monotonic_idxs": ["[1]", "[]"],
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
bench_song = {
"common": {"pairwise": False, "target": 0.75},
"experiment": {
"acqf": "BernoulliMCMutualInformation",
"modelbridge_cls": "SingleProbitModelbridgeWithSongHeuristic",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "GPClassificationModel",
"parnames": "[context,intensity]",
},
"GPClassificationModel": {
"inducing_size": 100,
"dim": 2,
"mean_covar_factory": [
"song_mean_covar_factory",
],
},
"SingleProbitModelbridgeWithSongHeuristic": {"restarts": 10, "samps": 1000},
"SobolStrategy": {
"n_trials": [sobol_trials],
},
"ModelWrapperStrategy": {
"n_trials": [opt_trials],
"refit_every": [refit_every],
},
}
all_bench_configs = [bench_rbf, bench_song]
if funtype == "detection":
testfun = novel_detection_testfun
yes_label = "Detected trial"
no_label = "Nondetected trial"
elif funtype == "discrimination":
testfun = novel_discrimination_testfun
yes_label = "Correct trial"
no_label = "Incorrect trial"
else:
raise RuntimeError("unknown testfun")
class NovelProblem(LSEProblem, Problem):
def f(self, x):
return testfun(x)
lb = [-1, -1]
ub = [1, 1]
benches = []
problem = NovelProblem(lb, ub, gridsize=50)
for config in all_bench_configs:
full_config = copy(config)
full_config["common"]["lb"] = str(lb)
full_config["common"]["ub"] = str(ub)
benches.append(
Benchmark(
problem=problem,
logger=logger,
configs=full_config,
global_seed=global_seed,
n_reps=1,
)
)
combo_bench = combine_benchmarks(*benches)
strats = []
for config in combo_bench.combinations:
strat = combo_bench.run_experiment(config, logger, seed=global_seed, rep=0)
strats.append(strat)
titles = [
"Monotonic RBF Model, LSE (ours)",
"Nonmonotonic RBF Model, LSE (ours)",
"Linear-Additive Model, BALD",
]
fig, axes = plt.subplots(2, 2, figsize=(7.5, 6.5))
plotting_axes = [axes[1, 0], axes[0, 1], axes[0, 0]]
fig.delaxes(axes[1, 1])
_ = [
plot_strat(
strat=strat_,
title=title_,
ax=ax_,
true_testfun=testfun,
yes_label=yes_label,
no_label=no_label,
show=False,
include_legend=False,
include_colorbar=False
)
for ax_, strat_, title_ in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = axes[1, 0].get_legend_handles_labels()
fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(0.8, 0.2))
cbr = fig.colorbar(axes[1, 0].images[0], ax=plotting_axes)
cbr.set_label("Probability of Detection")
return fig
def plot_acquisition_examples(sobol_trials, opt_trials, target_level=0.75):
### Same model, different acqf figure ####
configs = {
"common": {
"pairwise": False,
"target": target_level,
"lb": "[-3]",
"ub": "[3]",
},
"experiment": {
"acqf": [
"MonotonicMCPosteriorVariance",
"MonotonicBernoulliMCMutualInformation",
"MonotonicMCLSE",
],
"modelbridge_cls": "MonotonicSingleProbitModelbridge",
"init_strat_cls": "SobolStrategy",
"opt_strat_cls": "ModelWrapperStrategy",
"model": "MonotonicRejectionGP",
"parnames": "[intensity]",
},
"MonotonicMCLSE": {
"target": target_level,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 100,
"mean_covar_factory": "monotonic_mean_covar_factory",
"monotonic_idxs": "[0]",
"uniform_idxs": "[]",
},
"MonotonicSingleProbitModelbridge": {"restarts": 10, "samps": 1000},
"SobolStrategy": {"n_trials": sobol_trials},
"ModelWrapperStrategy": {
"n_trials": opt_trials,
"refit_every": refit_every,
},
}
def true_testfun(x):
return norm.cdf(3 * x)
class SimpleLinearProblem(Problem):
def f(self, x):
return norm.ppf(true_testfun(x))
lb = [-3]
ub = [3]
logger = BenchmarkLogger()
problem = SimpleLinearProblem(lb, ub)
bench = Benchmark(
problem=problem,
logger=logger,
configs=configs,
global_seed=global_seed,
n_reps=1,
)
# sobol_trials
# now run each for just init trials, taking care to reseed each time
strats = []
for c in bench.combinations:
np.random.seed(global_seed)
torch.manual_seed(global_seed)
s = SequentialStrategy.from_config(Config(config_dict=c))
for _ in range(sobol_trials):
next_x = s.gen()
s.add_data(next_x, [problem.sample_y(next_x)])
strats.append(s)
# get first gen from all 3
first_gens = [s.gen() for s in strats]
fig, ax = plt.subplots(2, 2)
plot_strat(
strat=strats[0],
title=f"First active trial\n (after {sobol_trials} Sobol trials)",
ax=ax[0, 0],
true_testfun=true_testfun,
target_level=target_level,
show=False,
include_legend=False
)
samps = [
norm.cdf(s.sample(torch.Tensor(g), num_samples=10000))
for s, g in zip(strats, first_gens)
]
predictions = [np.mean(s) for s in samps]
names = ["First BALV sample", "First BALD sample", "First LSE sample"]
markers = ["s", "*", "^"]
for i in range(3):
ax[0, 0].scatter(
first_gens[i][0][0],
predictions[i],
label=names[i],
marker=markers[i],
color="black",
)
# now run them all for the full duration
for s in strats:
for _tr in range(opt_trials):
next_x = s.gen()
s.add_data(next_x, [problem.sample_y(next_x)])
plotting_axes = [ax[0, 1], ax[1, 0], ax[1, 1]]
titles = [
f"Monotonic RBF Model,\n BALV, after {sobol_trials+opt_trials} total trials",
f"Monotonic RBF Model,\n BALD, after {sobol_trials+opt_trials} total trials",
f"Monotonic RBF Model,\n LSE (ours) after {sobol_trials+opt_trials} total trials",
]
_ = [
plot_strat(
strat=s, title=t, ax=a, true_testfun=true_testfun, target_level=target_level, show=False, include_legend=False
)
for a, s, t in zip(plotting_axes, strats, titles)
]
fig.tight_layout()
handles, labels = ax[0, 0].get_legend_handles_labels()
lgd = fig.legend(handles, labels, loc="lower right", bbox_to_anchor=(1.5, 0.25))
# return legend so savefig works correctly
return fig, lgd
if __name__ == "__main__":
audio_lse_grids_fig = plot_audiometric_lse_grids(sobol_trials=5, opt_trials=45)
audio_lse_grids_fig.savefig(fname=figdir + "audio_lse_grids_fig.pdf", dpi=200)
novel_detection_lse_grids_fig = plot_novel_lse_grids(
sobol_trials=5, opt_trials=45, funtype="detection"
)
novel_detection_lse_grids_fig.savefig(
fname=figdir + "detection_lse_grids_fig.pdf", dpi=200
)
# this is extra hard, run more trials
novel_discrimination_lse_grids_fig = plot_novel_lse_grids(
sobol_trials=5, opt_trials=95, funtype="discrimination"
)
novel_discrimination_lse_grids_fig.savefig(
fname=figdir + "discrimination_lse_grids_fig.pdf", dpi=200
)
same_model_different_acq_fig, lgd = plot_acquisition_examples(
sobol_trials=5, opt_trials=15
)
same_model_different_acq_fig.savefig(
fname=figdir + "same_model_different_acq.pdf",
bbox_extra_artists=(lgd,),
bbox_inches="tight",
dpi=200,
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gpytorch
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
from botorch.utils.sampling import draw_sobol_samples
from scipy.stats import norm
sns.set_theme()
from aepsych.config import Config
from aepsych.factory import (
default_mean_covar_factory,
song_mean_covar_factory,
monotonic_mean_covar_factory,
)
from aepsych.models import GPClassificationModel, MonotonicRejectionGP
from aepsych.models.monotonic_rejection_gp import MixedDerivativeVariationalGP
from aepsych.utils import _dim_grid
global_seed = 3
def plot_prior_samps_1d():
config = Config(
config_dict={
"common": {
"outcome_type": "single_probit",
"target": 0.75,
"lb": "[-3]",
"ub": "[3]",
},
"default_mean_covar_factory": {},
"song_mean_covar_factory": {},
"monotonic_mean_covar_factory": {"monotonic_idxs": "[0]"},
}
)
lb = torch.Tensor([-3])
ub = torch.Tensor([3])
nsamps = 10
gridsize = 50
grid = _dim_grid(lower=lb, upper=ub, dim=1, gridsize=gridsize)
np.random.seed(global_seed)
torch.random.manual_seed(global_seed)
with gpytorch.settings.prior_mode(True):
rbf_mean, rbf_covar = default_mean_covar_factory(config)
rbf_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=rbf_mean,
covar_module=rbf_covar,
)
# add just two samples at high and low
rbf_model.set_train_data(
torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
)
rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))
song_mean, song_covar = song_mean_covar_factory(config)
song_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=song_mean,
covar_module=song_covar,
)
song_model.set_train_data(
torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
)
song_samps = song_model(grid).sample(torch.Size([nsamps]))
mono_mean, mono_covar = monotonic_mean_covar_factory(config)
mono_model = MonotonicRejectionGP(
likelihood="probit-bernoulli",
monotonic_idxs=[0],
mean_module=mono_mean,
covar_module=mono_covar,
)
bounds_ = torch.tensor([-3.0, 3.0])[:, None]
# Select inducing points
mono_model.inducing_points = draw_sobol_samples(
bounds=bounds_, n=mono_model.num_induc, q=1
).squeeze(1)
inducing_points_aug = mono_model._augment_with_deriv_index(
mono_model.inducing_points, 0
)
scales = ub - lb
dummy_train_x = mono_model._augment_with_deriv_index(
torch.Tensor([-3, 3])[:, None], 0
)
mono_model.model = MixedDerivativeVariationalGP(
train_x=dummy_train_x,
train_y=torch.LongTensor([0, 1]),
inducing_points=inducing_points_aug,
scales=scales,
fixed_prior_mean=torch.Tensor([0.75]),
covar_module=mono_covar,
mean_module=mono_mean,
)
mono_samps = mono_model.sample(grid, nsamps)
fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
fig.tight_layout(rect=[0.01, 0.03, 1, 0.9])
fig.suptitle("GP prior samples (probit-transformed)")
ax[0].plot(grid.squeeze(), norm.cdf(song_samps.T), "b")
ax[0].set_ylabel("Response Probability")
ax[0].set_title("Linear kernel")
ax[1].plot(grid.squeeze(), norm.cdf(rbf_samps.T), "b")
ax[1].set_xlabel("Intensity")
ax[1].set_title("RBF kernel (nonmonotonic)")
ax[2].plot(grid.squeeze(), norm.cdf(mono_samps.T), "b")
ax[2].set_title("RBF kernel (monotonic)")
return fig
def plot_prior_samps_2d():
config = Config(
config_dict={
"common": {
"outcome_type": "single_probit",
"target": 0.75,
"lb": "[-3, -3]",
"ub": "[3, 3]",
},
"default_mean_covar_factory": {},
"song_mean_covar_factory": {},
"monotonic_mean_covar_factory": {"monotonic_idxs": "[1]"},
}
)
lb = torch.Tensor([-3, -3])
ub = torch.Tensor([3, 3])
nsamps = 5
gridsize = 30
grid = _dim_grid(lower=lb, upper=ub, dim=2, gridsize=gridsize)
np.random.seed(global_seed)
torch.random.manual_seed(global_seed)
with gpytorch.settings.prior_mode(True):
rbf_mean, rbf_covar = default_mean_covar_factory(config)
rbf_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=rbf_mean,
covar_module=rbf_covar,
)
# add just two samples at high and low
rbf_model.set_train_data(torch.Tensor([-3, -3])[:, None], torch.LongTensor([0]))
rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))
song_mean, song_covar = song_mean_covar_factory(config)
song_model = GPClassificationModel(
inducing_min=lb,
inducing_max=ub,
inducing_size=100,
mean_module=song_mean,
covar_module=song_covar,
)
song_model.set_train_data(
torch.Tensor([-3, -3])[:, None], torch.LongTensor([0])
)
song_samps = song_model(grid).sample(torch.Size([nsamps]))
mono_mean, mono_covar = monotonic_mean_covar_factory(config)
mono_model = MonotonicRejectionGP(
likelihood="probit-bernoulli",
monotonic_idxs=[1],
mean_module=mono_mean,
covar_module=mono_covar,
num_induc=1000,
)
bounds_ = torch.tensor([-3.0, -3.0, 3.0, 3.0]).reshape(2, -1)
# Select inducing points
mono_model.inducing_points = draw_sobol_samples(
bounds=bounds_, n=mono_model.num_induc, q=1
).squeeze(1)
inducing_points_aug = mono_model._augment_with_deriv_index(
mono_model.inducing_points, 0
)
scales = ub - lb
dummy_train_x = mono_model._augment_with_deriv_index(
torch.Tensor([-3, 3])[None, :], 0
)
mono_model.model = MixedDerivativeVariationalGP(
train_x=dummy_train_x,
train_y=torch.LongTensor([0]),
inducing_points=inducing_points_aug,
scales=scales,
fixed_prior_mean=torch.Tensor([0.75]),
covar_module=mono_covar,
mean_module=mono_mean,
)
mono_samps = mono_model.sample(grid, nsamps)
intensity_grid = np.linspace(-3, 3, gridsize)
fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
fig.tight_layout(rect=[0, 0.03, 1, 0.9])
fig.suptitle("Prior samples")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in song_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[0].plot(intensity_grid, plotsamps, "b")
ax[0].set_title("Linear kernel model")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in rbf_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[1].plot(intensity_grid, plotsamps, "b")
ax[1].set_title("Nonmonotonic RBF kernel model")
square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in mono_samps])
plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
ax[2].plot(intensity_grid, plotsamps, "b")
ax[2].set_title("Monotonic RBF kernel model")
return fig
if __name__ == "__main__":
prior_samps_1d = plot_prior_samps_1d()
prior_samps_1d.savefig("./figs/prior_samps.pdf", dpi=200)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
from __future__ import annotations
import argparse
import json
import os
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter, PythonExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Tutorial = require(`${{CWD}}/core/Tutorial.js`);
class TutorialPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Tutorial baseUrl={{baseUrl}} tutorialID="{}"/>;
}}
}}
module.exports = TutorialPage;
"""
JS_SCRIPTS = """
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script>
""" # noqa: E501
def validate_tutorial_links(repo_dir: str) -> None:
"""Checks that all .ipynb files that present are linked on the website, and vice
versa, that any linked tutorial has an associated .ipynb file present.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.load(infile)
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
tutorials_nbs = {
fn.replace(".ipynb", "")
for fn in os.listdir(os.path.join(repo_dir, "tutorials"))
if fn[-6:] == ".ipynb"
}
missing_files = tutorial_ids - tutorials_nbs
missing_ids = tutorials_nbs - tutorial_ids
if missing_files:
raise RuntimeError(
"The following tutorials are linked on the website, but missing an "
f"associated .ipynb file: {missing_files}."
)
if missing_ids:
print( '\033[93m' + 'Warning: ' + '\x1b[0m' + "The following tutorial files are present, but are not linked on the "
"website: {}.".format(", ".join([nbid + ".ipynb" for nbid in missing_ids])))
# raise RuntimeError(
# "The following tutorial files are present, but are not linked on the "
# "website: {}.".format(", ".join([nbid + ".ipynb" for nbid in missing_ids]))
# )
def gen_tutorials(repo_dir: str) -> None:
"""Generate HTML tutorials for AEPsych Docusaurus site from Jupyter notebooks.
Also create ipynb and py versions of tutorial in Docusaurus site for
download.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.load(infile)
# create output directories if necessary
html_out_dir = os.path.join(repo_dir, "website", "_tutorials")
files_out_dir = os.path.join(repo_dir, "website", "static", "files")
for d in (html_out_dir, files_out_dir):
if not os.path.exists(d):
os.makedirs(d)
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
for tid in tutorial_ids:
print(f"Generating {tid} tutorial")
# convert notebook to HTML
ipynb_in_path = os.path.join(repo_dir, "tutorials", f"{tid}.ipynb")
with open(ipynb_in_path, "r") as infile:
nb_str = infile.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
# displayname is absent from notebook metadata
nb["metadata"]["kernelspec"]["display_name"] = "python3"
exporter = HTMLExporter(template_name="classic")
html, meta = exporter.from_notebook_node(nb)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
html_out_dir,
f"{tid}.html",
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
script = TEMPLATE.format(tid)
js_out_path = os.path.join(
repo_dir, "website", "pages", "tutorials", f"{tid}.js"
)
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output tutorial in both ipynb & py form
ipynb_out_path = os.path.join(files_out_dir, f"{tid}.ipynb")
with open(ipynb_out_path, "w") as ipynb_outfile:
ipynb_outfile.write(nb_str)
exporter = PythonExporter()
script, meta = exporter.from_notebook_node(nb)
# make sure to use python3 shebang
script = script.replace("#!/usr/bin/env python", "#!/usr/bin/env python3")
py_out_path = os.path.join(repo_dir, "website", "static", "files", f"{tid}.py")
with open(py_out_path, "w") as py_outfile:
py_outfile.write(script)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, ipynb, and py files for tutorials."
)
parser.add_argument(
"-w",
"--repo_dir",
metavar="path",
required=True,
help="aepsych repo directory.",
)
args = parser.parse_args()
validate_tutorial_links(args.repo_dir)
gen_tutorials(args.repo_dir)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the scripts directory.
from __future__ import annotations
import argparse
import os
from bs4 import BeautifulSoup
#The base_url must match the base url in the /website/siteConfig.js
# Note if it is not updated API doc searchbar will not be displayed
# 1) update base_url below
base_url = "/"
js_scripts = """
<script type="text/javascript" id="documentation_options" data-url_root="./" src="{0}js/documentation_options.js"></script>
<script type="text/javascript" src="{0}js/jquery.js"></script>
<script type="text/javascript" src="{0}js/underscore.js"></script>
<script type="text/javascript" src="{0}js/doctools.js"></script>
<script type="text/javascript" src="{0}js/language_data.js"></script>
<script type="text/javascript" src="{0}js/searchtools.js"></script>
""".format(base_url) # noqa: E501
# 2) update
# Search.loadIndex("/<<update to match baseUrl>>/js/searchindex.js"
search_js_scripts = """
<script type="text/javascript">
jQuery(function() { Search.loadIndex("/js/searchindex.js"); });
</script>
<script type="text/javascript" id="searchindexloader"></script>
"""
def parse_sphinx(input_dir, output_dir):
for cur, _, files in os.walk(input_dir):
for fname in files:
if fname.endswith(".html"):
with open(os.path.join(cur, fname), "r") as f:
soup = BeautifulSoup(f.read(), "html.parser")
doc = soup.find("div", {"class": "document"})
wrapped_doc = doc.wrap(soup.new_tag("div", **{"class": "sphinx"}))
# add js
if fname == "search.html":
out = js_scripts + search_js_scripts + str(wrapped_doc)
else:
out = js_scripts + str(wrapped_doc)
output_path = os.path.join(output_dir, os.path.relpath(cur, input_dir))
os.makedirs(output_path, exist_ok=True)
with open(os.path.join(output_path, fname), "w") as fout:
fout.write(out)
# update reference in JS file
with open(os.path.join(input_dir, "_static/searchtools.js"), "r") as js_file:
js = js_file.read()
js = js.replace(
"DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/'", "'_sphinx-sources/'"
)
with open(os.path.join(input_dir, "_static/searchtools.js"), "w") as js_file:
js_file.write(js)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Strip HTML body from Sphinx docs.")
parser.add_argument(
"-i",
"--input_dir",
metavar="path",
required=True,
help="Input directory for Sphinx HTML.",
)
parser.add_argument(
"-o",
"--output_dir",
metavar="path",
required=True,
help="Output directory in Docusaurus.",
)
args = parser.parse_args()
parse_sphinx(args.input_dir, args.output_dir)
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved.
from __future__ import annotations
import argparse
import json
import os
import shutil
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Demo = require(`${{CWD}}/core/Demo.js`);
class DemoPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Demo baseUrl={{baseUrl}} demoID="{}" hasWinDemo="{}"
hasMacDemo="{}"/>;
}}
}}
module.exports = DemoPage;
"""
JS_SCRIPTS = """
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script>
"""
def validate_demo_links(repo_dir: str) -> None:
"""Checks that all .zip files that present are linked on the website, and vice
versa, that any linked demos has an associated .zip file present.
"""
with open(os.path.join(repo_dir, "website", "demos.json")) as f:
demo_config = json.load(f)
demo_ids = {x["id"] for v in demo_config.values() for x in v}
demo_names = {
fn.replace(".zip", "")
for fn in os.listdir(os.path.join(repo_dir, "demos"))
if fn[-4:] == ".zip"
}
# Check if the ID is present in the set and if both "_Mac" and "_Win" endings exist
for id in demo_ids:
if f"{id}_Mac" in demo_names and f"{id}_Win" in demo_names:
print(f"Both '{id}_Mac' and {id}_Win' demos .zip files are present.")
elif f"{id}_Mac" in demo_names:
print(f"Only '{id}_Mac'.zip demo is present.")
else:
print(f"Only '{id}_Win'.zip demo is present.")
def gen_demos(repo_dir: str) -> None:
"""Generate HTML demos for AEPsych Docusaurus site for download."""
with open(os.path.join(repo_dir, "website", "demos.json"), "r") as f:
demo_config = json.load(f)
# create output directories if necessary
html_out_dir = os.path.join(repo_dir, "website", "_demos")
files_out_dir = os.path.join(repo_dir, "website", "static", "files", "demos")
for d in (html_out_dir, files_out_dir):
if not os.path.exists(d):
os.makedirs(d)
demo_ids = {x["id"] for v in demo_config.values() for x in v}
for d_id in demo_ids:
print(f"Generating {d_id} demo")
# convert markdown to HTML
md_in_path = os.path.join(repo_dir, "demos", "markdown", f"{d_id}.md")
with open(md_in_path, "r") as infile:
markdown_content = infile.read()
notebook_node = nbformat.v4.new_notebook()
markdown_cell = nbformat.v4.new_markdown_cell(markdown_content)
notebook_node["cells"] = [markdown_cell]
exporter = HTMLExporter(template_name="classic")
html, meta = exporter.from_notebook_node(notebook_node)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
html_out_dir,
f"{d_id}.html",
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
has_mac_demo = os.path.exists(os.path.join(repo_dir, "demos", f"{d_id}_Mac.zip"))
has_win_demo = os.path.exists(os.path.join(repo_dir, "demos", f"{d_id}_Win.zip"))
script = TEMPLATE.format(d_id,has_win_demo,has_mac_demo)
js_out_path = os.path.join(repo_dir, "website", "pages", "demos", f"{d_id}.js")
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output demo in zip format
if has_mac_demo:
mac_source_path = os.path.join(repo_dir, "demos", f"{d_id}_Mac.zip")
mac_zip_out_path = os.path.join(files_out_dir, f"{d_id}_Mac.zip")
shutil.copy(mac_source_path, mac_zip_out_path)
if has_win_demo:
win_source_path = os.path.join(repo_dir, "demos", f"{d_id}_Win.zip")
win_zip_out_path = os.path.join(files_out_dir, f"{d_id}_Win.zip")
shutil.copy(win_source_path, win_zip_out_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, and zip files for demos."
)
parser.add_argument(
"-w",
"--repo_dir",
metavar="path",
required=True,
help="aepsych repo directory.",
)
args = parser.parse_args()
validate_demo_links(args.repo_dir)
gen_demos(args.repo_dir)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Evaluation script for object localization
import json
import argparse
import torch
import itertools
import numpy as np
from collections import defaultdict
from utils import bbox_overlaps_batch, get_frm_mask
from stanfordcorenlp import StanfordCoreNLP
from tqdm import tqdm
class ANetGrdEval(object):
def __init__(self, reference_file=None, submission_file=None,
split_file=None, val_split=None, iou_thresh=0.5, verbose=False):
if not reference_file:
raise IOError('Please input a valid reference file!')
if not submission_file:
raise IOError('Please input a valid submission file!')
self.iou_thresh = iou_thresh
self.verbose = verbose
self.val_split = val_split
self.import_ref(reference_file, split_file)
self.import_sub(submission_file)
def import_ref(self, reference_file=None, split_file=None):
with open(split_file) as f:
split_dict = json.load(f)
split = {}
for s in self.val_split:
split.update({i:i for i in split_dict[s]})
with open(reference_file) as f:
ref = json.load(f)['annotations']
ref = {k:v for k,v in ref.items() if k in split}
self.ref = ref
def import_sub(self, submission_file=None):
with open(submission_file) as f:
pred = json.load(f)['results']
self.pred = pred
def gt_grd_eval(self):
ref = self.ref
pred = self.pred
print('Number of videos in the reference: {}, number of videos in the submission: {}'.format(len(ref), len(pred)))
results = defaultdict(list)
for vid, anns in ref.items():
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0:
continue # annotation not available
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']), \
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of word in sentence to evaluate
for idx in sent_idx:
sel_idx = [ind for ind, i in enumerate(ann['process_idx']) if idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
# Note that despite discouraged, a single word could be annotated across multiple boxes/frames
assert(ref_bbox.size(0) > 0)
class_name = ann['process_clss'][sel_idx[0]][ann['process_idx'][sel_idx[0]].index(idx)]
if vid not in pred:
results[class_name].append(0) # video not grounded
elif seg not in pred[vid]:
results[class_name].append(0) # segment not grounded
elif idx not in pred[vid][seg]['idx_in_sent']:
results[class_name].append(0) # object not grounded
else:
pred_ind = pred[vid][seg]['idx_in_sent'].index(idx)
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_ind])[:,:4], \
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(), \
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0), \
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
results[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
print('Number of groundable objects in this split: {}'.format(len(results)))
grd_accu = np.mean([sum(hm)*1./len(hm) for i,hm in results.items()])
print('-' * 80)
print('The overall localization accuracy is {:.4f}'.format(grd_accu))
print('-' * 80)
if self.verbose:
print('Object frequency and grounding accuracy per class (descending by object frequency):')
accu_per_clss = {(i, sum(hm)*1./len(hm)):len(hm) for i,hm in results.items()}
accu_per_clss = sorted(accu_per_clss.items(), key=lambda x:x[1], reverse=True)
for accu in accu_per_clss:
print('{} ({}): {:.4f}'.format(accu[0][0], accu[1], accu[0][1]))
return grd_accu
def precision_recall_util(self, mode='all'):
ref = self.ref
pred = self.pred
print('Number of videos in the reference: {}, number of videos in the submission: {}'.format(len(ref), len(pred)))
nlp = StanfordCoreNLP('tools/stanford-corenlp-full-2018-02-27')
props={'annotators': 'lemma','pipelineLanguage':'en', 'outputFormat':'json'}
vocab_in_split = set()
prec = defaultdict(list)
prec_per_sent = defaultdict(list)
for vid, anns in tqdm(ref.items()):
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0 or vid not in pred or seg not in pred[vid]:
continue # do not penalize if sentence not annotated
prec_per_sent_tmp = [] # for each sentence
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']),
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
idx_in_sent = {}
for box_idx, cls_lst in enumerate(ann['process_clss']):
vocab_in_split.update(set(cls_lst))
for cls_idx, cls in enumerate(cls_lst):
idx_in_sent[cls] = idx_in_sent.get(cls, []) + [ann['process_idx'][box_idx][cls_idx]]
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of gt object words
exclude_obj = {json.loads(nlp.annotate(token, properties=props)
)['sentences'][0]['tokens'][0]['lemma']: 1 for token_idx, token in
enumerate(ann['tokens']
) if (token_idx not in sent_idx and token != '')}
for pred_idx, class_name in enumerate(pred[vid][seg]['clss']):
if class_name in idx_in_sent:
gt_idx = min(idx_in_sent[class_name]) # always consider the first match...
sel_idx = [idx for idx, i in enumerate(ann['process_idx']) if gt_idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
assert (ref_bbox.size(0) > 0)
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_idx])[:, :4],
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(),
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0),
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
prec[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
prec_per_sent_tmp.append(1 if torch.max(overlap) > self.iou_thresh else 0)
elif json.loads(nlp.annotate(class_name, properties=props))['sentences'][0]['tokens'][0]['lemma'] in exclude_obj:
pass # do not penalize if gt object word not annotated (missed)
else:
if mode == 'all':
prec[class_name].append(0) # hallucinated object
prec_per_sent_tmp.append(0)
prec_per_sent[vid + seg] = prec_per_sent_tmp
nlp.close()
# recall
recall = defaultdict(list)
recall_per_sent = defaultdict(list)
for vid, anns in ref.items():
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0:
# print('no annotation available')
continue
recall_per_sent_tmp = [] # for each sentence
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']), \
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of gt object words
for gt_idx in sent_idx:
sel_idx = [idx for idx, i in enumerate(ann['process_idx']) if gt_idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
# Note that despite discouraged, a single word could be annotated across multiple boxes/frames
assert(ref_bbox.size(0) > 0)
class_name = ann['process_clss'][sel_idx[0]][ann['process_idx'][sel_idx[0]].index(gt_idx)]
if vid not in pred:
recall[class_name].append(0) # video not grounded
recall_per_sent_tmp.append(0)
elif seg not in pred[vid]:
recall[class_name].append(0) # segment not grounded
recall_per_sent_tmp.append(0)
elif class_name in pred[vid][seg]['clss']:
pred_idx = pred[vid][seg]['clss'].index(class_name) # always consider the first match...
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_idx])[:,:4], \
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(), \
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0), \
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
recall[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
recall_per_sent_tmp.append(1 if torch.max(overlap) > self.iou_thresh else 0)
else:
if mode == 'all':
recall[class_name].append(0) # object not grounded
recall_per_sent_tmp.append(0)
recall_per_sent[vid + seg] = recall_per_sent_tmp
return prec, recall, prec_per_sent, recall_per_sent, vocab_in_split
def grd_eval(self, mode='all'):
if mode == 'all':
print('Evaluating on all object words.')
elif mode == 'loc':
print('Evaluating only on correctly-predicted object words.')
else:
raise Exception('Invalid loc mode!')
prec, recall, prec_per_sent, rec_per_sent, vocab_in_split = self.precision_recall_util(mode=mode)
# compute the per-class precision, recall, and F1 scores
num_vocab = len(vocab_in_split)
print('Number of groundable objects in this split: {}'.format(num_vocab))
print('Number of objects in prec and recall: {}, {}'.format(len(prec), len(recall)))
prec_cls = np.sum([sum(hm)*1./len(hm) for i,hm in prec.items()])*1./num_vocab
recall_cls = np.sum([sum(hm)*1./len(hm) for i,hm in recall.items()])*1./num_vocab
f1_cls = 2. * prec_cls * recall_cls / (prec_cls + recall_cls)
print('-' * 80)
print('The overall precision_{0} / recall_{0} / F1_{0} are {1:.4f} / {2:.4f} / {3:.4f}'.format(mode, prec_cls, recall_cls, f1_cls))
print('-' * 80)
if self.verbose:
print('Object frequency and grounding accuracy per class (descending by object frequency):')
accu_per_clss = {}
for i in vocab_in_split:
prec_clss = sum(prec[i])*1./len(prec[i]) if i in prec else 0
recall_clss = sum(recall[i])*1./len(recall[i]) if i in recall else 0
accu_per_clss[(i, prec_clss, recall_clss)] = (len(prec[i]), len(recall[i]))
accu_per_clss = sorted(accu_per_clss.items(), key=lambda x:x[1][1], reverse=True)
for accu in accu_per_clss:
print('{} ({} / {}): {:.4f} / {:.4f}'.format(accu[0][0], accu[1][0], accu[1][1], accu[0][1], accu[0][2]))
# compute the per-sent precision, recall, and F1 scores
num_segment_without_labels = 0
prec, rec, f1 = [], [], []
for seg_id, prec_list in prec_per_sent.items():
if rec_per_sent[seg_id] == []:
# skip the segment if no target objects
num_segment_without_labels += 1
else:
current_prec = 0 if prec_list == [] else np.mean(prec_list) # avoid empty prec_list
current_rec = np.mean(rec_per_sent[seg_id])
# if precision and recall are both 0, set the f1 to be 0
if current_prec == 0.0 and current_rec == 0.0:
current_f1_score = 0.0
else:
current_f1_score = 2. * current_prec * current_rec / (current_prec + current_rec) # per-sent F1
prec.append(current_prec)
rec.append(current_rec)
f1.append(current_f1_score)
num_predictions = 0
for _, pred_seg in self.pred.items():
num_predictions += len(pred_seg)
# divide the scores with the total number of predictions
avg_prec = np.sum(prec) / (num_predictions - num_segment_without_labels)
avg_rec = np.sum(rec) / (num_predictions - num_segment_without_labels)
avg_f1 = np.sum(f1) / (num_predictions - num_segment_without_labels)
print('-' * 80)
print('The overall precision_{0}_per_sent / recall_{0}_per_sent / F1_{0}_per_sent are {1:.4f} / {2:.4f} / {3:.4f}'.format(mode, avg_prec, avg_rec, avg_f1))
print('-' * 80)
return prec_cls, recall_cls, f1_cls, avg_prec, avg_rec, avg_f1
def main(args):
grd_evaluator = ANetGrdEval(reference_file=args.reference, submission_file=args.submission,
split_file=args.split_file, val_split=args.split,
iou_thresh=args.iou_thresh, verbose=args.verbose)
if args.eval_mode == 'GT':
print('Assuming the input boxes are based upon GT sentences.')
grd_evaluator.gt_grd_eval()
elif args.eval_mode == 'gen':
print('Assuming the input boxes are based upon generated sentences.')
grd_evaluator.grd_eval(mode=args.loc_mode)
else:
raise Exception('Invalid eval mode!')
if __name__=='__main__':
parser = argparse.ArgumentParser(description='ActivityNet-Entities object grounding evaluation script.')
parser.add_argument('-s', '--submission', type=str, default='', help='submission grounding result file')
parser.add_argument('-r', '--reference', type=str, default='data/anet_entities_cleaned_class_thresh50_trainval.json', help='reference file')
parser.add_argument('--split_file', type=str, default='data/split_ids_anet_entities.json', help='path to the split file')
parser.add_argument('--split', type=str, nargs='+', default=['validation'], help='which split(s) to evaluate')
parser.add_argument('--eval_mode', type=str, default='GT',
help='GT | gen, indicating whether the input is on GT sentences or generated sentences')
parser.add_argument('--loc_mode', type=str, default='all',
help='all | loc, when the input is on generate sentences, whether consider language error or not')
parser.add_argument('--iou_thresh', type=float, default=0.5, help='the iou threshold for grounding correctness')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
main(args)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to print stats on the NP annotation file
import numpy as np
import json
import csv
import sys
src_file = sys.argv[1] # 'anet_entities.json'
dataset_file = sys.argv[2] # 'anet_captions_all_splits.json'
split_file = sys.argv[3] # 'split_ids_anet_entities.json'
if __name__ == '__main__':
with open(src_file) as f:
data = json.load(f)['database']
with open(dataset_file) as f:
raw_data = json.load(f)
split_dict = {}
with open(split_file) as f:
split = json.load(f)
for s,ids in split.items():
split_dict.update({i:s for i in ids})
num_seg = np.sum([len(dat['segments']) for vid, dat in data.items()])
total_box = {}
total_dur = []
seg_splits = {}
for vid, dat in data.items():
for seg, ann in dat['segments'].items():
total_box[split_dict[vid]] = total_box.get(split_dict[vid], 0)+len(ann['objects'])
total_dur.append(float(raw_data[vid]['timestamps'][int(seg)][1]-raw_data[vid]['timestamps'][int(seg)][0]))
seg_splits[split_dict[vid]] = seg_splits.get(split_dict[vid], 0)+1
print('number of annotated video: {}'.format(len(data)))
print('number of annotated video segments: {}'.format(num_seg))
print('number of segments in each split: {}'.format(seg_splits))
print('total duration in hr: {}'.format(np.sum(total_dur)/3600))
print('total number of noun phrase boxes: {}'.format(total_box))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Based on
# https://github.com/jiasenlu/NeuralBabyTalk/blob/master/misc/bbox_transform.py
# Licensed under The MIT License
# Copyright (c) 2017 Jiasen Lu
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import torch
import numpy as np
def bbox_overlaps_batch(anchors, gt_boxes, frm_mask=None):
"""
anchors: (N, 4) ndarray of float
gt_boxes: (b, K, 5) ndarray of float
frm_mask: (b, N, K) ndarray of bool
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
batch_size = gt_boxes.size(0)
if anchors.dim() == 2:
assert frm_mask == None, 'mask not implemented yet' # hasn't updated the mask yet
N = anchors.size(0)
K = gt_boxes.size(1)
anchors = anchors.view(1, N, 4).expand(batch_size, N, 4).contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) -
torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) -
torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
elif anchors.dim() == 3:
N = anchors.size(1)
K = gt_boxes.size(1)
if anchors.size(2) == 5:
anchors = anchors[:,:,:5].contiguous()
else:
anchors = anchors[:,:,1:6].contiguous()
gt_boxes = gt_boxes[:,:,:5].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 5).expand(batch_size, N, K, 5)
query_boxes = gt_boxes.view(batch_size, 1, K, 5).expand(batch_size, N, K, 5)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) -
torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) -
torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
if frm_mask is not None:
# proposal and gt should be on the same frame to overlap
# frm_mask = ~frm_mask # bitwise not (~) does not work with uint8 in pytorch 1.3
frm_mask = 1 - frm_mask
# print('Percentage of proposals that are in the annotated frame: {}'.format(torch.mean(frm_mask.float())))
overlaps = iw * ih / ua
overlaps *= frm_mask.type(overlaps.type())
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
else:
raise ValueError('anchors input dimension is not correct.')
return overlaps
def get_frm_mask(proposals, gt_bboxs):
# proposals: num_pps
# gt_bboxs: num_box
num_pps = proposals.shape[0]
num_box = gt_bboxs.shape[0]
return (np.tile(proposals.reshape(-1,1), (1,num_box)) != np.tile(gt_bboxs, (num_pps,1)))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to preprocess the raw annotation output to NP/object annotation files
import os
import sys
import json
import argparse
import numpy as np
from collections import Counter, defaultdict
from stanfordcorenlp import StanfordCoreNLP
def define_split(database):
with open(args.train_cap_file) as f:
train_ids = json.load(f).keys()
with open(args.val_cap_file) as f:
valtest_ids = json.load(f).keys()
val_split = np.random.rand(len(valtest_ids))>=0.5 # split a half as the test split
val_ids = [valtest_ids[i] for i,j in enumerate(val_split) if j]
test_ids = [valtest_ids[i] for i,j in enumerate(val_split) if ~j]
vid_ids = set(database.keys())
train_ann_ids = vid_ids.intersection(set(train_ids))
val_ann_ids = vid_ids.intersection(set(val_ids))
test_ann_ids = vid_ids.intersection(set(test_ids))
print('All data - total: {}, train split: {}, val split: {}, test split: {}'.format(len(train_ids+val_ids+test_ids), len(train_ids), len(val_ids), len(test_ids)))
print('Annotated data - total: {}, train split: {}, val split: {}, and test split: {}'.format(
len(vid_ids), len(train_ann_ids), len(val_ann_ids), len(test_ann_ids)))
return [train_ids, val_ids, test_ids]
def extract_attr(database, splits):
split_dict = {}
for split in splits:
split_dict.update({s:s for s in split})
print('Object classes defined on {} videos, freq threshold is {}'.format(len(split_dict), args.freq_thresh))
attr_all = [] # all the attributes
for vid_id, vid in database.items():
if split_dict.get(vid_id, -1) != -1:
for seg_id, seg in vid['segments'].items():
for obj in seg['objects']:
assert(len(obj['frame_ind']) == 1)
for box_id, box in obj['frame_ind'].items():
tmp = []
attr_lst = []
sorted_attr = sorted(box['attributes'], key=lambda x:x[0]) # the attributes are unordered
for ind, attr in enumerate(sorted_attr):
assert(attr[0] >= 0)
if len(tmp) == 0:
tmp.append(attr[1].lower()) # convert to lowercase
else:
if attr[0] == (sorted_attr[ind-1][0]+1):
tmp.append(attr[1].lower())
else:
attr_lst.append(tmp)
tmp = [attr[1].lower()]
if len(tmp) > 0: # the last one
attr_lst.append(tmp)
# exclude empty box (no attribute)
# crowd boxes are ok for now
if len(attr_lst) == 0: # or box['crowds'] == 1
pass
# print('empty attribute at video {}, segment {}, box {}'.format(vid_id, seg_id, box_id))
else:
attr_all.extend([' '.join(i) for i in attr_lst])
return attr_all
def prep_all(database, database_cap, obj_cls_lst, w2l, nlp):
w2d = {}
for ind, obj in enumerate(obj_cls_lst):
w2d[obj] = ind
avg_box = [] # number of boxes per segment
avg_attr = [] # number of attributes per box
attr_all = [] # all the attributes
crowd_all = [] # all the crowd labels
attr_dict = defaultdict(list)
with open(args.attr_to_video_file) as f:
for line in f.readlines():
line_split = line.split(',')
attr_id = line_split[0]
vid_name = line_split[-1]
attr = ','.join(line_split[1:-1])
vid_id, seg_id = vid_name.strip().split('_segment_')
attr_dict[(vid_id, str(int(seg_id)))].append([int(attr_id), attr])
print('Number of segments with attributes: {}'.format(len(attr_dict)))
vid_seg_dict = {}
for vid_id, vid in database.items():
for seg_id, _ in vid['segments'].items():
vid_seg_dict[(vid_id, seg_id)] = vid_seg_dict.get((vid_id, seg_id), 0) + 1
new_database = {}
new_database_np = {}
seg_counter = 0
for vid_id, cap in database_cap.items():
new_database_np[vid_id] = {'segments':{}}
new_seg = {}
for cap_id in range(len(cap['sentences'])):
new_obj_lst = defaultdict(list)
seg_id = str(cap_id)
new_database_np[vid_id]['segments'][seg_id] = {'objects':[]}
if vid_seg_dict.get((vid_id, seg_id), 0) == 0:
new_obj_lst['tokens'] = nlp.word_tokenize(cap['sentences'][cap_id].encode('utf-8')) # sentences not in ANet-BB
else:
vid = database[vid_id]
seg = vid['segments'][seg_id]
# preprocess attributes
attr_sent = sorted(attr_dict[(vid_id, seg_id)], key=lambda x:x[0])
start_ind = attr_sent[0][0]
# legacy token issues from our annotation tool
for ind, tup in enumerate(attr_sent):
if attr_sent[ind][1] == '\\,':
attr_sent[ind][1] = ','
new_obj_lst['tokens'] = [i[1] for i in attr_sent] # all the word tokens
for obj in seg['objects']:
assert(len(obj['frame_ind']) == 1)
np_ann = {}
box_id = obj['frame_ind'].keys()[0]
box = obj['frame_ind'].values()[0]
np_ann['frame_ind'] = int(box_id)
np_ann.update(box)
if len(box['attributes']) > 0: # just in case the attribute is empty, though it should not be
tmp = []
tmp_ind = []
tmp_obj = []
attr_lst = []
attr_ind_lst = []
tmp_np_ind = []
np_lst = []
sorted_attr = sorted(box['attributes'], key=lambda x:x[0]) # the attributes are unordered
sorted_attr = [(x[0]-start_ind, x[1]) for x in sorted_attr] # index relative to the sent
for ind, attr in enumerate(sorted_attr):
assert(attr[0] >= 0)
attr_w = attr[1].lower()
if len(tmp) == 0:
tmp.append(attr_w) # convert to lowercase
tmp_np_ind.append(attr[0])
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj.append(attr_l)
tmp_ind.append(attr[0])
else:
if attr[0] == (sorted_attr[ind-1][0]+1):
tmp.append(attr_w)
tmp_np_ind.append(attr[0])
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj.append(attr_l)
tmp_ind.append(attr[0])
else:
np_lst.append([' '.join(tmp), tmp_np_ind])
if len(tmp_obj) >= 1:
attr_lst.append(tmp_obj[-1]) # the last noun is usually the head noun
attr_ind_lst.append(tmp_ind[-1])
tmp = [attr_w]
tmp_np_ind = [attr[0]]
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj = [attr_l]
tmp_ind = [attr[0]]
else:
tmp_obj = []
tmp_ind = []
else:
tmp_obj = []
tmp_ind = []
if len(tmp) > 0: # the last one
np_lst.append([' '.join(tmp), tmp_np_ind])
if len(tmp_obj) >= 1:
attr_lst.append(tmp_obj[-1]) # the last noun is usually the head noun
attr_ind_lst.append(tmp_ind[-1])
assert(len(np_lst) > 0)
np_ann['noun_phrases'] = np_lst
np_ann.pop('attributes', None)
new_database_np[vid_id]['segments'][seg_id]['objects'].append(np_ann)
# exclude empty box (no attribute)
# crowd boxes are ok for now
if len(attr_lst) == 0: # or box['crowds'] == 1
pass
# print('empty attribute at video {}, segment {}, box {}'.format(vid_id, seg_id, box_id))
else:
new_obj_lst['process_bnd_box'].append([box['xtl'], box['ytl'], box['xbr'], box['ybr']])
new_obj_lst['frame_ind'].append(int(box_id))
new_obj_lst['crowds'].append(box['crowds'])
new_obj_lst['process_clss'].append(attr_lst)
new_obj_lst['process_idx'].append(attr_ind_lst)
avg_attr.append(len(attr_lst))
attr_all.extend([' '.join(i) for i in attr_lst])
crowd_all.append(box['crowds'])
avg_box.append(len(new_obj_lst['frame_ind'])) # cound be 0
if len(new_obj_lst['frame_ind']) == 0:
new_obj_lst['process_bnd_box'] = []
new_obj_lst['frame_ind'] = [] # all empty
new_obj_lst['crowds'] = []
new_obj_lst['process_clss'] = []
new_obj_lst['process_idx'] = []
seg_counter += 1
new_seg[seg_id] = new_obj_lst
new_database_np[vid_id]['segments'][seg_id]['tokens'] = new_obj_lst['tokens']
new_database[vid_id] = {'segments':new_seg}
# quick stats
print('Number of videos: {} (including empty ones)'.format(len(new_database)))
print('Number of segments: {}'.format(seg_counter))
print('Average number of valid segments per video: {}'.format(np.mean([len(vid['segments']) for vid_id, vid in new_database.items()])))
print('Average number of box per segment: {} and frequency: {}'.format(np.mean(avg_box), Counter(avg_box)))
print('Average number of attributes per box: {} and frequency: {} (for valid box only)'.format(np.mean(avg_attr), Counter(avg_attr)))
crowd_freq = Counter(crowd_all)
print('Percentage of crowds: {} (for valid box only)'.format(crowd_freq[1]*1./(crowd_freq[1]+crowd_freq[0])))
return new_database, new_database_np
def freq_obj_list(attr_all, nlp, props):
# generate a list of object classes
num_nn_per_attr = []
anet_obj_cls = []
nn_wo_noun = [] # noun phrases that contain no nouns
w2lemma = defaultdict(list)
for i, v in enumerate(attr_all):
if i%10000 == 0:
print(i)
out = json.loads(nlp.annotate(v.encode('utf-8'), properties=props))
assert(out['sentences'] > 0)
counter = 0
for token in out['sentences'][0]['tokens']:
if ('NN' in token['pos']) or ('PRP' in token['pos']):
lemma_w = token['lemma']
anet_obj_cls.append(lemma_w)
w2lemma[token['word']].append(lemma_w)
counter += 1
num_nn_per_attr.append(counter)
if counter == 0:
nn_wo_noun.append(v)
top_nn_wo_noun = Counter(nn_wo_noun)
print('Frequency of NPs w/o nouns:')
print(top_nn_wo_noun.most_common(10))
print('Frequency of number of nouns per attribute:')
print(Counter(num_nn_per_attr))
top_obj_cls = Counter(anet_obj_cls)
print('Top 10 objects:', top_obj_cls.most_common(20))
obj_cls_lst = []
for w,freq in top_obj_cls.items():
if freq >= args.freq_thresh:
obj_cls_lst.append(w.encode('ascii'))
w2l = {}
for w, l in w2lemma.items():
# manually correct some machine lemmatization mistakes
spec_w2l = {'outfits':'outfit', 'mariachi':'mariachi', 'barrios':'barrio', 'mans':'man', 'bags':'bag', 'aerobics':'aerobic', 'motobikes':'motobike', 'graffiti':'graffiti', 'semi':'semi', 'los':'los', 'tutus':'tutu'}
if spec_w2l.get(w, -1) != -1: # one special case...
w2l[w] = spec_w2l[w]
print('Ambiguous lemma for: {}'.format(w))
else:
assert(len(set(l)) == 1)
w2l[w] = list(set(l))[0]
print('Number of words derived from lemma visual words {}'.format(len(w2l)))
return obj_cls_lst, w2l
def main(args):
nlp = StanfordCoreNLP(args.corenlp_path)
props={'annotators': 'ssplit, tokenize, lemma','pipelineLanguage':'en', 'outputFormat':'json'}
# load anet captions
with open(args.train_cap_file) as f:
database_cap = json.load(f)
with open(args.val_cap_file) as f:
database_cap.update(json.load(f))
print('Number of videos in ActivityNet Captions (train+val): {}'.format(len(database_cap)))
# load raw annotation output anet bb
with open(args.src_file) as f:
database = json.load(f)['database']
print('Number of videos in ActivityNet-BB (train+val): {}'.format(len(database)))
if os.path.isfile(args.split_file):
with open(args.split_file) as f:
all_splits = json.load(f)
splits = [all_splits['training'], all_splits['validation'], all_splits['testing']]
else:
raise '[WARNING] Cannot find the split file! Uncomment this if you want to create a new split.'
splits = define_split(database)
all_splits = {'training':splits[0], 'validation':splits[1], 'testing':splits[2]}
with open(args.split_file, 'w') as f:
json.dump(all_splits, f)
attr_all = extract_attr(database, splits[:2]) # define object classes on train/val data
obj_cls_lst, w2l = freq_obj_list(attr_all, nlp, props)
new_database, new_database_np = prep_all(database, database_cap, obj_cls_lst, w2l, nlp)
# write raw annotation file
new_database_np = {'database':new_database_np}
with open(args.target_np_file, 'w') as f:
json.dump(new_database_np, f)
# write pre-processed annotation file
new_database = {'vocab':obj_cls_lst, 'annotations':new_database}
with open(args.target_file, 'w') as f:
json.dump(new_database, f)
nlp.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='ActivityNet-Entities dataset preprocessing script.')
parser.add_argument('--dataset_root', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/', help='dataset root directory')
parser.add_argument('--corenlp_path', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttn/tools/stanford-corenlp-full-2018-02-27', help='path to stanford core nlp toolkit')
parser.add_argument('--freq_thresh', type=int, default=50, help='frequency threshold for determining object classes')
parser.add_argument('--train_cap_file', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/raw_annotation_file/train.json')
parser.add_argument('--val_cap_file', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/raw_annotation_file/val_1.json')
args = parser.parse_args()
args.src_file = args.dataset_root+'anet_bb.json' # the raw annotation file
args.target_np_file = args.dataset_root+'anet_entities.json' # output np file
args.target_file = args.dataset_root+'anet_entities_cleaned_class_thresh'+str(args.freq_thresh)+'.json' # output object file
args.attr_to_video_file = args.dataset_root+'attr_to_video.txt' # from annotation tool
args.split_file = args.dataset_root+'split_ids_anet_entities.json' # split file
np.random.seed(123) # make reproducible
main(args)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to print stats on the object annotation file
import numpy as np
import json
import csv
# import visdom
import sys
from collections import Counter
src_file = sys.argv[1] # 'anet_entities_cleaned_class_thresh50_trainval.json'
dataset_file = sys.argv[2] # 'anet_captions_all_splits.json'
split_file = sys.argv[3] # 'split_ids_anet_entities.json'
if __name__=='__main__':
with open(src_file) as f:
data = json.load(f)['annotations']
with open(dataset_file) as f:
raw_data = json.load(f)
split_dict = {}
with open(split_file) as f:
split = json.load(f)
for s,ids in split.items():
split_dict.update({i:s for i in ids})
num_seg = np.sum([len(dat['segments']) for vid, dat in data.items()])
total_box = {}
total_dur = []
seg_splits = {}
box_per_seg = []
obj_per_box = []
count_obj = []
for vid, dat in data.items():
for seg, ann in dat['segments'].items():
total_box[split_dict[vid]] = total_box.get(split_dict[vid], 0)+len(ann['process_bnd_box'])
total_dur.append(float(raw_data[vid]['timestamps'][int(seg)][1]-raw_data[vid]['timestamps'][int(seg)][0]))
seg_splits[split_dict[vid]] = seg_splits.get(split_dict[vid], 0)+1
box_per_seg.append(len(ann['process_bnd_box']))
for c in ann['process_clss']:
obj_per_box.append(len(c))
count_obj.extend(c)
print('number of annotated video: {}'.format(len(data)))
print('number of annotated video segments: {}'.format(num_seg))
print('number of segments in each split: {}'.format(seg_splits))
print('total duration in hr: {}'.format(np.sum(total_dur)/3600))
print('total number of phrase (not object) boxes: {}'.format(total_box))
print('box per segment, mean {}, std {}, count {}'.format(np.mean(box_per_seg), np.std(box_per_seg), Counter(box_per_seg)))
print('object per box, mean {}, std {}, count {}'.format(np.mean(obj_per_box), np.std(obj_per_box), Counter(obj_per_box)))
print('Top 10 object labels: {}'.format(Counter(count_obj).most_common(10)))
"""
# visualization
vis = visdom.Visdom()
vis.histogram(X=[i for i in box_per_seg if i < 20],
opts={'numbins': 20, 'xtickmax':20, 'xtickmin':0, 'xmax':20, 'xmin':0, 'title':'Distribution of number of boxes per segment', 'xtickfont':{'size':14}, \
'ytickfont':{'size':14}, 'xlabel':'Number of boxes', 'ylabel': 'Counts'})
vis.histogram(X=[i for i in obj_per_box if i < 100],
opts={'numbins': 100, 'xtickmax':100, 'xtickmin':0, 'xmax':100, 'xmin':0, 'title':'Distribution of number of object labels per box', 'xtickfont':{'size':14}, \
'ytickfont':{'size':14}, 'xlabel':'Number of object labels', 'ylabel': 'Counts'})
"""
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import random
import shutil
import pandas as pd
import tqdm
if __name__ == "__main__":
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="/disc/data/dir")
parser.add_argument("--output_dir", type=str, default="/disc/data/dir")
return parser
params = get_parser().parse_args()
copy_mode = 'symlink'
print("Args:{}".format(json.dumps(vars(params))))
dest_ref10k_dir = os.path.join(params.output_dir, "references_10k")
dest_ref990k_dir = os.path.join(params.output_dir, "references_990k")
dest_query_40k_dir = os.path.join(params.output_dir, "queries_40k")
os.makedirs(dest_ref10k_dir, exist_ok=True)
os.makedirs(dest_ref990k_dir, exist_ok=True)
os.makedirs(dest_query_40k_dir, exist_ok=True)
print(f"Creating output directories: {dest_ref10k_dir}, {dest_ref990k_dir}, {dest_query_40k_dir}")
print(f"Copying the reference images")
reference_dir = os.path.join(params.data_dir, "references")
filenames = [f'R{ii:06d}.jpg' for ii in range(1000000)]
csv_path = os.path.join(params.data_dir, "groundtruth_matches.csv")
df = pd.read_csv(csv_path, header=None, names=['Q', 'R'])
rs = df['R'].values.tolist()
rs.sort()
is_img_in_query = {}
for filename in filenames:
is_img_in_query[filename] = False
if len(rs) == 0:
continue
if rs[0] in filename:
is_img_in_query[filename] = True
rs.pop(0)
print(f"Number of reference images that are used in query: {sum(is_img_in_query.values())}")
for filename in tqdm.tqdm(filenames):
img_path = os.path.join(reference_dir, filename)
dest_dir = dest_ref10k_dir if is_img_in_query[filename] else dest_ref990k_dir
if copy_mode == 'symlink':
os.symlink(img_path, os.path.join(dest_dir, filename))
else:
shutil.copy(img_path, os.path.join(dest_dir, filename))
print(f"Copying the query images")
train_dir = os.path.join(params.data_dir, "train")
filenames = [f'T{ii:06d}.jpg' for ii in range(1000000)]
random.seed(0)
filenames = random.sample(filenames, 40000)
for filename in tqdm.tqdm(filenames):
img_path = os.path.join(train_dir, filename)
if copy_mode == 'symlink':
os.symlink(img_path, os.path.join(dest_query_40k_dir, filename))
else:
shutil.copy(img_path, os.path.join(dest_query_40k_dir, filename))
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import tqdm
import json
import torch
from torch import device
from torchvision import transforms
from activeindex import utils
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if __name__ == '__main__':
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default='output')
parser.add_argument("--data_dir", type=str, default="/img/data/dir")
parser.add_argument("--model_name", type=str, default="torchscript")
parser.add_argument("--model_path", type=str, default="/path/to/model.torchscript.pt")
parser.add_argument("--resize_size", type=int, default=288, help="Resize images to this size. (Default: 288)")
parser.add_argument("--batch_size", type=int, default=256, help="Batch size.")
return parser
params = get_parser().parse_args()
print("__log__:{}".format(json.dumps(vars(params))))
print('>>> Creating output directory...')
os.makedirs(params.output_dir, exist_ok=True)
print('>>> Building backbone...')
model = utils.build_backbone(path=params.model_path, name=params.model_name)
model.eval()
model.to(device)
print('>>> Creating dataloader...')
NORMALIZE_IMAGENET = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
default_transform = transforms.Compose([
transforms.ToTensor(),
NORMALIZE_IMAGENET,
transforms.Resize((params.resize_size, params.resize_size)),
])
img_loader = utils.get_dataloader(params.data_dir, default_transform, batch_size=params.batch_size, collate_fn=None)
print('>>> Extracting features...')
features = []
with open(os.path.join(params.output_dir, "filenames.txt"), 'w') as f:
with torch.no_grad():
for ii, imgs in enumerate(tqdm.tqdm(img_loader)):
imgs = imgs.to(device)
fts = model(imgs)
features.append(fts.cpu())
for jj in range(fts.shape[0]):
sample_fname = img_loader.dataset.samples[ii*params.batch_size + jj]
f.write(sample_fname + "\n")
print('>>> Saving features...')
features = torch.concat(features, dim=0)
torch.save(features, os.path.join(params.output_dir, 'fts.pth'))
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from augly.image import functional as aug_functional
import torch
from torchvision import transforms
from torchvision.transforms import functional
from . import augment_queries
NORMALIZE_IMAGENET = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
UNNORMALIZE_IMAGENET = transforms.Normalize(mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], std=[1/0.229, 1/0.224, 1/0.225])
image_std = torch.Tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
def center_crop(x, scale):
""" Perform center crop such that the target area of the crop is at a given scale
Args:
x: PIL image
scale: target area scale
"""
scale = np.sqrt(scale)
new_edges_size = [int(s*scale) for s in x.size][::-1]
return functional.center_crop(x, new_edges_size)
def resize(x, scale):
""" Perform center crop such that the target area of the crop is at a given scale
Args:
x: PIL image
scale: target area scale
"""
scale = np.sqrt(scale)
new_edges_size = [int(s*scale) for s in x.size][::-1]
return functional.resize(x, new_edges_size)
def psnr(x, y):
"""
Return PSNR
Args:
x, y: Images tensor with imagenet normalization
"""
delta = 255 * (x - y) * image_std.to(x.device)
psnr = 20*np.log10(255) - 10*torch.log10(torch.mean(delta**2))
return psnr
def linf(x, y):
"""
Return Linf distance
Args:
x, y: Images tensor with imagenet normalization
"""
return torch.max(torch.abs(255 * (x - y) * image_std.to(x.device)))
attacks_dict = {
"none": lambda x : x,
"rotation": lambda x, angle: functional.rotate(x, angle, functional.InterpolationMode('bilinear'), expand=True),
"grayscale": functional.rgb_to_grayscale,
"contrast": functional.adjust_contrast,
"brightness": functional.adjust_brightness,
"hue": functional.adjust_hue,
"hflip": functional.hflip,
"vflip": functional.vflip,
"blur": functional.gaussian_blur, # sigma = ksize*0.15 + 0.35 - ksize = (sigma-0.35)/0.15
"jpeg": aug_functional.encoding_quality,
"resize": resize,
"center_crop": center_crop,
"meme_format": aug_functional.meme_format,
"overlay_emoji": aug_functional.overlay_emoji,
"overlay_onto_screenshot": aug_functional.overlay_onto_screenshot,
"auto": augment_queries.augment_img,
}
attacks = [{'attack': 'none'}] \
+ [{'attack': 'auto'}] \
+ [{'attack': 'meme_format'}] \
+ [{'attack': 'overlay_onto_screenshot'}] \
+ [{'attack': 'rotation', 'angle': angle} for angle in [25,90]] \
+ [{'attack': 'center_crop', 'scale': 0.5}] \
+ [{'attack': 'resize', 'scale': 0.5}] \
+ [{'attack': 'blur', 'kernel_size': 11}] \
+ [{'attack': 'jpeg', 'quality': 50}] \
+ [{'attack': 'hue', 'hue_factor': 0.2}] \
+ [{'attack': 'contrast', 'contrast_factor': cf} for cf in [0.5, 2.0]] \
+ [{'attack': 'brightness', 'brightness_factor': bf} for bf in [0.5, 2.0]] \
# more attacks for the full evaluation
attacks_2 = [{'attack': 'rotation', 'angle': jj} for jj in range(-90, 100,10)] \
+ [{'attack': 'center_crop', 'scale': 0.1*jj} for jj in range(1,11)] \
+ [{'attack': 'resize', 'scale': 0.1*jj} for jj in range(1,11)] \
+ [{'attack': 'blur', 'kernel_size': 1+2*jj} for jj in range(1,15)] \
+ [{'attack': 'jpeg', 'quality': 10*jj} for jj in range(1,11)] \
+ [{'attack': 'contrast', 'contrast_factor': 0.5 + 0.1*jj} for jj in range(15)] \
+ [{'attack': 'brightness', 'brightness_factor': 0.5 + 0.1*jj} for jj in range(15)] \
+ [{'attack': 'hue', 'hue_factor': -0.5 + 0.1*jj} for jj in range(0,11)] \
def generate_attacks(img, attacks=attacks):
""" Generate a list of attacked images from a PIL image. """
attacked_imgs = []
for attack in attacks:
attack = attack.copy()
attack_name = attack.pop('attack')
attacked_imgs.append(attacks_dict[attack_name](img, **attack))
return attacked_imgs
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class JND(nn.Module):
""" https://ieeexplore.ieee.org/document/7885108 """
def __init__(self, preprocess = lambda x: x):
super(JND, self).__init__()
kernel_x = [[-1., 0., 1.], [-2., 0., 2.], [-1., 0., 1.]]
kernel_y = [[1., 2., 1.], [0., 0., 0.], [-1., -2., -1.]]
kernel_lum = [[1, 1, 1, 1, 1], [1, 2, 2, 2, 1], [1, 2, 0, 2, 1], [1, 2, 2, 2, 1], [1, 1, 1, 1, 1]]
kernel_x = torch.FloatTensor(kernel_x).unsqueeze(0).unsqueeze(0)
kernel_y = torch.FloatTensor(kernel_y).unsqueeze(0).unsqueeze(0)
kernel_lum = torch.FloatTensor(kernel_lum).unsqueeze(0).unsqueeze(0)
self.weight_x = nn.Parameter(data=kernel_x, requires_grad=False)
self.weight_y = nn.Parameter(data=kernel_y, requires_grad=False)
self.weight_lum = nn.Parameter(data=kernel_lum, requires_grad=False)
self.preprocess = preprocess
def jnd_la(self, x, alpha=1.0, eps=1e-3):
""" Luminance masking: x must be in [0,255] """
la = F.conv2d(x, self.weight_lum, padding=2) / 32
mask_lum = la <= 127
la[mask_lum] = 17 * (1 - torch.sqrt(la[mask_lum]/127 + eps)) + 3
la[~mask_lum] = 3/128 * (la[~mask_lum] - 127) + 3
return alpha * la
def jnd_cm(self, x, beta=0.117):
""" Contrast masking: x must be in [0,255] """
grad_x = F.conv2d(x, self.weight_x, padding=1)
grad_y = F.conv2d(x, self.weight_y, padding=1)
cm = torch.sqrt(grad_x**2 + grad_y**2)
cm = 16 * cm**2.4 / (cm**2 + 26**2)
return beta * cm
def heatmaps(self, x, clc=0.3):
""" x must be in [0,1] """
x = 255 * self.preprocess(x)
x = 0.299 * x[...,0:1,:,:] + 0.587 * x[...,1:2,:,:] + 0.114 * x[...,2:3,:,:]
la = self.jnd_la(x)
cm = self.jnd_cm(x)
return torch.clamp_min(la + cm - clc * torch.minimum(la, cm), 5)/255 # b 1 h w
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import random
from typing import Any, Dict, List, NamedTuple, Tuple
import augly.image as imaugs
import augly.utils as utils
import numpy as np
from PIL import Image
RNG = np.random.RandomState
rng = np.random.RandomState(0)
ParametersDistributions = NamedTuple
class ParameterDistribution:
"""Define how to sample a parameter"""
def __init__(self, low: Any, high: Any):
self.low = low
self.high = high
def sample(self, rng: RNG) -> Any:
raise NotImplementedError()
class FixedVariable(ParameterDistribution):
def __init__(self, value: Any):
super().__init__(0, 0)
self.value = value
def sample(self, rng: RNG) -> Any:
return self.value
class UniformFloat(ParameterDistribution):
def sample(self, rng: RNG) -> float:
return float(rng.uniform(self.low, self.high))
class UniformInt(ParameterDistribution):
def sample(self, rng: RNG) -> int:
return int(rng.randint(self.low, self.high + 1))
class UniformColor(ParameterDistribution):
def sample(self, rng: RNG) -> Tuple[int, int, int]:
return tuple(int(rng.randint(self.low, self.high)) for _ in range(3))
class UniformChoice(ParameterDistribution):
def __init__(self, choices: List[Any]):
super().__init__(0, 0)
self.choices = choices
def sample(self, rng: RNG) -> Any:
if not self.choices:
return None
index = rng.randint(0, len(self.choices))
return self.choices[index]
class UniformBool(ParameterDistribution):
def __init__(self):
super().__init__(0, 0)
def sample(self, rng: RNG) -> bool:
return bool(UniformInt(0, 1).sample(rng))
class TextChoice(ParameterDistribution):
def sample(self, rng: RNG) -> List[int]:
length = UniformInt(self.low, self.high).sample(rng)
return [UniformInt(0, 10000).sample(rng) for _ in range(length)]
class ListPD(ParameterDistribution):
def __init__(self, pds: List[ParameterDistribution]):
super().__init__(0, 0)
self.pds = pds
def sample(self, rng: RNG) -> List[Any]:
return [pd.sample(rng) for pd in self.pds]
class TuplePD(ParameterDistribution):
def __init__(self, pds: List[ParameterDistribution]):
super().__init__(0, 0)
self.pds = pds
def sample(self, rng: RNG) -> Tuple:
return tuple(pd.sample(rng) for pd in self.pds)
class ExponentialInt(ParameterDistribution):
def __init__(self, scale: float, low: int, high: int):
super().__init__(low, high)
self.scale = scale
def sample(self, rng) -> int:
# if we sample a value larger than `high`, we need to resample a new one
# if we just take the min(x, high), it will change the distribution
while True:
r = rng.exponential(scale=self.scale)
if int(r + self.low) <= self.high:
return int(r + self.low)
class SymmetricFactor(ParameterDistribution):
def sample(self, rng: RNG) -> float:
factor = float(rng.uniform(self.low, self.high))
invert = rng.randint(0, 2)
return 1 / factor if invert else factor
class UniformLeftRightFactor(ParameterDistribution):
def sample(self, rng: np.random.RandomState) -> Tuple[float, float]:
width = float(rng.uniform(self.low, self.high))
left = rng.uniform(0, 1 - width)
right = left + width
return left, right
class MediaFilterParameters(NamedTuple):
"""Contains the parameters to apply a video filter.
This defines a unique and reproducible transformation"""
name: str
kwargs: Dict[str, Any]
def __repr__(self) -> str:
return json.dumps({**{"name": self.name}, **self.kwargs})
class MediaFilterWithPD(NamedTuple):
"""Define a filter and how to sample all its parameters"""
# filter name, must match one the function method in this file
name: str
# must contains only ParameterDistribution attributes
pd: ParametersDistributions
class AspectRatioPD(ParametersDistributions):
ratio: UniformFloat = UniformFloat(0.5, 2.0)
class BlurPD(ParametersDistributions):
radius: UniformFloat = UniformFloat(5.0, 10.0)
class BlurryMaskPD(ParametersDistributions):
background_image: UniformChoice
overlay_size: UniformFloat = UniformFloat(0.3, 0.8)
x_pos: UniformFloat = UniformFloat(0, 1.0)
y_pos: UniformFloat = UniformFloat(0, 1.0)
class BrightnessPD(ParametersDistributions):
factor: UniformFloat = UniformFloat(0.1, 1.9)
class ClipImageSizePD(ParametersDistributions):
min_resolution: UniformChoice = UniformChoice([500])
max_resolution: UniformChoice = UniformChoice([3000000])
class ConvertColorPD(ParametersDistributions):
mode: UniformChoice = UniformChoice(["P"])
colors: UniformInt = UniformInt(2, 16)
class CropPD(ParametersDistributions):
xs: UniformLeftRightFactor = UniformLeftRightFactor(0.3, 0.6)
ys: UniformLeftRightFactor = UniformLeftRightFactor(0.3, 0.6)
class EncodingQualityPD(ParametersDistributions):
quality: UniformInt = UniformInt(5, 25)
class EnhanceEdgesPD(ParametersDistributions):
pass
class GrayscalePD(ParametersDistributions):
pass
class HFlipPD(ParametersDistributions):
pass
class IdentityPD(ParametersDistributions):
pass
class OverlayEmojiPD(ParametersDistributions):
emoji_path: UniformChoice
x_pos: UniformFloat = UniformFloat(0.0, 0.8)
y_pos: UniformFloat = UniformFloat(0.0, 0.8)
opacity: UniformFloat = UniformFloat(0.5, 1.0)
emoji_size: UniformFloat = UniformFloat(0.4, 0.8)
class OverlayOntoImagePD(ParametersDistributions):
background_image: UniformChoice
overlay_size: UniformFloat = UniformFloat(0.3, 0.6)
x_pos: UniformFloat = UniformFloat(0, 0.4)
y_pos: UniformFloat = UniformFloat(0, 0.4)
class OverlayOntoScreenshotPD(ParametersDistributions):
template_filepath: UniformChoice
crop_src_to_fit: UniformChoice = UniformChoice([True])
class OverlayTextPD(ParametersDistributions):
font_file: UniformChoice
text: TextChoice = TextChoice(5, 15)
font_size: UniformFloat = UniformFloat(0.1, 0.3)
color: UniformColor = UniformColor(0, 255)
x_pos: UniformFloat = UniformFloat(0.0, 0.6)
y_pos: UniformFloat = UniformFloat(0.0, 0.6)
class PadSquarePD(ParametersDistributions):
color: UniformColor = UniformColor(0, 255)
class PerspectiveTransformPD(ParametersDistributions):
sigma: UniformFloat = UniformFloat(30.0, 60.0)
crop_out_black_border: UniformChoice = UniformChoice([True])
class PixelizationPD(ParametersDistributions):
ratio: UniformFloat = UniformFloat(0.2, 0.5)
class RotatePD(ParametersDistributions):
degrees: UniformFloat = UniformFloat(-90.0, 90.0)
class SaturationPD(ParametersDistributions):
factor: UniformFloat = UniformFloat(2.0, 5.0)
class ShufflePixelsPD(ParametersDistributions):
factor: UniformFloat = UniformFloat(0.1, 0.3)
def sample(rng: RNG, filter_with_pd: MediaFilterWithPD) -> MediaFilterParameters:
"""Sample for each ParameterDistribution attribute and
return a dict with sampled parameters
"""
kwargs = {key: pdi.sample(rng) for key, pdi in filter_with_pd.pd._asdict().items()}
return MediaFilterParameters(name=filter_with_pd.name, kwargs=kwargs)
def sample_img_filters_parameters(rng: RNG, available_filters: List[MediaFilterWithPD]) -> List[MediaFilterParameters]:
"""Sample parameters for each available filters"""
return [sample(rng, vf) for vf in available_filters]
def get_assets(emoji_dir: str, font_dir: str, screenshot_dir: str) -> Tuple[List[str], List[str], List[str]]:
emojis = []
for fn in utils.pathmgr.ls(emoji_dir):
fp = os.path.join(emoji_dir, fn)
if utils.pathmgr.isdir(fp):
emojis.extend([os.path.join(fp, f) for f in utils.pathmgr.ls(fp)])
fonts = [
os.path.join(font_dir, fn)
for fn in utils.pathmgr.ls(font_dir)
if fn.endswith(".ttf")
]
template_filenames = [
os.path.join(screenshot_dir, fn)
for fn in utils.pathmgr.ls(screenshot_dir)
if fn.split(".")[-1] != "json"
]
return emojis, fonts, template_filenames
emojis, fonts, template_filenames = get_assets(
utils.EMOJI_DIR, utils.FONTS_DIR, utils.SCREENSHOT_TEMPLATES_DIR
)
primitives = {
"color": [
MediaFilterWithPD(name="brightness", pd=BrightnessPD()),
MediaFilterWithPD(name="grayscale", pd=GrayscalePD()),
MediaFilterWithPD(name="saturation", pd=SaturationPD()),
],
"overlay": [
MediaFilterWithPD(
name="overlay_emoji",
pd=OverlayEmojiPD(emoji_path=UniformChoice(emojis)),
),
MediaFilterWithPD(
name="overlay_text", pd=OverlayTextPD(font_file=UniformChoice(fonts))
),
],
"pixel-level": [
MediaFilterWithPD(name="blur", pd=BlurPD()),
MediaFilterWithPD(name="convert_color", pd=ConvertColorPD()),
MediaFilterWithPD(name="encoding_quality", pd=EncodingQualityPD()),
MediaFilterWithPD(name="apply_pil_filter", pd=EnhanceEdgesPD()),
MediaFilterWithPD(name="pixelization", pd=PixelizationPD()),
MediaFilterWithPD(name="shuffle_pixels", pd=ShufflePixelsPD()),
],
"spatial": [
MediaFilterWithPD(name="crop", pd=CropPD()),
MediaFilterWithPD(name="hflip", pd=HFlipPD()),
MediaFilterWithPD(name="change_aspect_ratio", pd=AspectRatioPD()),
MediaFilterWithPD(
name="overlay_onto_screenshot",
pd=OverlayOntoScreenshotPD(
template_filepath=UniformChoice(template_filenames)
),
),
MediaFilterWithPD(name="pad_square", pd=PadSquarePD()),
MediaFilterWithPD(
name="perspective_transform", pd=PerspectiveTransformPD()
),
MediaFilterWithPD(name="rotate", pd=RotatePD()),
],
}
post_filters = []
def augment_img_wrapper(img, rng: RNG = rng, return_params=False):
""" Wrapper for augment_img to handle errors """
try:
return augment_img(img, rng, return_params)
except Exception as e:
print(f"Error augmenting image: {e}")
return img, ["none"]
def augment_img(img, rng: RNG = rng, return_params=False):
"""
Sample augmentation parameters for img.
Args:
img: query image.
"""
# select filters to apply
num_filters = rng.choice(np.arange(1, 5), p=[0.1, 0.2, 0.3, 0.4])
filter_types_to_apply = rng.choice(
np.asarray(list(primitives.keys())), size=num_filters, replace=False
)
filters_to_apply = [
primitives[ftype][rng.randint(0, len(primitives[ftype]))]
for ftype in filter_types_to_apply
]
filters_to_apply += post_filters
# Ensure that crop is in first position if selected and that convert_color is in last position if selected
for j, vf in enumerate(filters_to_apply):
if vf.name == "crop":
filters_to_apply[j], filters_to_apply[0] = (
filters_to_apply[0],
filters_to_apply[j],
)
if vf.name == "convert_color":
filters_to_apply[j], filters_to_apply[-1] = (
filters_to_apply[-1],
filters_to_apply[j],
)
# sample parameters for each filter
all_filters_parameters = sample_img_filters_parameters(
rng, filters_to_apply
)
# apply filters
for j, ftr in enumerate(all_filters_parameters):
aug_func = getattr(imaugs, ftr.name, None)
kwargs = ftr.kwargs
if ftr.name == "crop":
x1, x2 = kwargs.pop("xs")
y1, y2 = kwargs.pop("ys")
kwargs["x1"], kwargs["x2"] = x1, x2
kwargs["y1"], kwargs["y2"] = y1, y2
img = aug_func(image=img, **kwargs)
img = img.convert('RGB')
if return_params:
return img, all_filters_parameters
else:
return img
if __name__ == '__main__':
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default='output')
parser.add_argument("--data_dir", type=str, default="/img/data/dir/")
parser.add_argument("--seed", type=int, default=42)
return parser
params = get_parser().parse_args()
print("__log__:{}".format(json.dumps(vars(params))))
# set seed
np.random.seed(params.seed)
random.seed(params.seed)
rng = np.random.RandomState(params.seed)
# Load data
print("Loading filenames from {}".format(params.data_dir))
filenames = os.listdir(params.data_dir)
# Generate augmented images
print("Generating augmented images into {}".format(params.output_dir))
augmentations = []
os.makedirs(params.output_dir, exist_ok=True)
for filename in filenames:
img_path = os.path.join(params.data_dir, filename)
img = Image.open(img_path)
img, filters = augment_img(img, rng, return_params=True)
img.convert('RGB').save(os.path.join(params.output_dir, filename), quality=95)
augmentations.append(filters)
print(filename, "[" + ", ".join([str(ftr) for ftr in filters]) + "]")
# break
# Save augmentations
print("Saving augmentations")
with open(os.path.join(params.output_dir, "augmentations.txt"), "a") as f:
for augmentation in augmentations:
line = "[" + ", ".join([str(ftr) for ftr in augmentation]) + "]\n"
f.write(line)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import time
from typing import Callable
import faiss
import numpy as np
import torch
from torch import nn
from torchvision.transforms import functional
from . import utils, utils_img
from .attenuations import JND
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def get_targets(
target: str,
index: faiss.Index,
fts: torch.Tensor,
ivf_centroids: np.ndarray = None
) -> torch.Tensor:
"""
Get the target representations for the features.
Args:
target (str): Target representation to use.
index (faiss.Index): Index to use for retrieval.
fts (torch.Tensor): Features to get the targets for. batch_size x feature_dim
ivf_centroids (np.ndarray): Centroids of the IVF index.
Returns:
targets (torch.Tensor): Target representations for the features. batch_size x feature_dim
"""
if target == 'pq_recons':
targets = index.reconstruct_n(index.ntotal-fts.shape[0], fts.shape[0]) # reconstruct the PQ codes that have just been added
targets = torch.tensor(targets)
elif target == 'ori_ft':
fts.clone()
elif target == 'ivf_cluster':
ivf_D, ivf_I = index.quantizer.search(fts.detach().cpu().numpy(), k=1) # find the closest cluster center for each feature
targets = ivf_centroids.take(ivf_I.flatten(), axis=0) # get the cluster representation for each feature
targets = torch.tensor(targets)
elif target == 'ivf_cluster_half':
ivf_D, ivf_I = index.quantizer.search(fts.detach().cpu().numpy(), k=1)
centroids = ivf_centroids.take(ivf_I.flatten(), axis=0)
targets = (torch.tensor(centroids) + fts.clone() / 2)
else:
raise NotImplementedError(f'Invalid target: {target}')
return targets
def activate_images(
imgs: list[torch.Tensor],
ori_fts: torch.Tensor,
model: nn.Module,
index: faiss.Index,
ivf_centroids: np.ndarray,
attenuation: JND,
loss_f: Callable,
loss_i: Callable,
params: argparse.Namespace
) -> list[torch.Tensor]:
"""
Activate images.
Args:
imgs (list of torch.Tensor): Images to activate. batch_size * [3 x height x width]
model (torch.nn.Module): Model for feature extraction.
index (faiss.Index): Index to use for retrieval.
ivf_centroids (np.ndarray): Centroids of the IVF index.
attenuation (JND): To create Just Noticeable Difference heatmaps.
loss_f (Callable): Loss function to use for the indexation loss.
loss_i (Callable): Loss function to use for the image loss.
params (argparse.Namespace): Parameters.
Returns:
activated images (list of torch.Tensor): Activated images. batch_size * [3 x height x width]
"""
targets = get_targets(params.target, index, ori_fts, ivf_centroids)
targets = targets.to(device)
# Just noticeable difference heatmaps
alpha = torch.tensor([0.072*(1/0.299), 0.072*(1/0.587), 0.072*(1/0.114)])
alpha = alpha[:,None,None].to(device) # 3 x 1 x 1
heatmaps = [params.scaling * attenuation.heatmaps(img) for img in imgs]
# init distortion + optimizer + scheduler
deltas = [1e-6 * torch.randn_like(img).to(device) for img in imgs] # b (1 c h w)
for distortion in deltas:
distortion.requires_grad = True
optim_params = utils.parse_params(params.optimizer)
optimizer = utils.build_optimizer(model_params=deltas, **optim_params)
if params.scheduler is not None:
scheduler = utils.build_scheduler(optimizer=optimizer, **utils.parse_params(params.scheduler))
# begin optim
iter_time = time.time()
log_stats = []
for gd_it in range(params.iterations):
gd_it_time = time.time()
if params.scheduler is not None:
scheduler.step(gd_it)
# perceptual constraints
percep_deltas = [torch.tanh(delta) for delta in deltas] if params.use_tanh else deltas
percep_deltas = [delta * alpha for delta in percep_deltas] if params.scale_channels else percep_deltas
imgs_t = [img + hm * delta for img, hm, delta in zip(imgs, heatmaps, percep_deltas)]
# get features
batch_imgs = [functional.resize(img_t, (params.resize_size, params.resize_size)) for img_t in imgs_t]
batch_imgs = torch.stack(batch_imgs)
fts = model(batch_imgs) # b d
# compute losses
lf = loss_f(fts, targets)
li = loss_i(imgs_t, imgs)
loss = params.lambda_f * lf + params.lambda_i * li
# step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log stats
psnrs = torch.tensor([utils_img.psnr(img_t, img) for img_t, img in zip(imgs_t, imgs)])
linfs = torch.tensor([utils_img.linf(img_t, img) for img_t, img in zip(imgs_t, imgs)])
log_stats.append({
'gd_it': gd_it,
'loss': loss.item(),
'loss_f': lf.item(),
'loss_i': li.item(),
'psnr': torch.nanmean(psnrs).item(),
'linf': torch.nanmean(linfs).item(),
'lr': optimizer.param_groups[0]['lr'],
'gd_it_time': time.time() - gd_it_time,
'iter_time': time.time() - iter_time,
'max_mem': torch.cuda.max_memory_allocated() / (1024*1024),
'kw': 'optim',
})
if (gd_it+1) % params.log_freq == 0:
print(json.dumps(log_stats[-1]))
# tqdm.tqdm.write(json.dumps(log_stats[-1]))
# perceptual constraints
percep_deltas = [torch.tanh(delta) for delta in deltas] if params.use_tanh else deltas
percep_deltas = [delta * alpha for delta in percep_deltas] if params.scale_channels else percep_deltas
imgs_t = [img + hm * delta for img, hm, delta in zip(imgs, heatmaps, percep_deltas)]
return imgs_t
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import os
import faiss
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import timm
from timm import optim as timm_optim
from timm import scheduler as timm_scheduler
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from torchvision import datasets, models
from torchvision.datasets.folder import default_loader, is_image_file
# Index
def build_index_factory(idx_str, quant, fts_path, idx_path=None) -> faiss.Index:
"""
Builds index from string and fts_path. see https://github.com/facebookresearch/faiss/wiki/The-index-factory
Args:
idx_str: string describing the index
quant: quantization type, either "L2" or "IP" (Inner Product)
fts_path: path to the train features as a torch tensor .pt file
idx_path: path to save the index
"""
fts = torch.load(fts_path)
fts = fts.numpy() # b d
D = fts.shape[-1]
metric = faiss.METRIC_L2 if quant == 'L2' else faiss.METRIC_INNER_PRODUCT
index = faiss.index_factory(D, idx_str, metric)
index.train(fts)
if idx_path is not None:
print(f'Saving Index to {idx_path}...')
faiss.write_index(index, idx_path)
return index
# Arguments helpers
def bool_inst(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ValueError('Boolean value expected in args')
def parse_params(s):
"""
Parse parameters into a dictionary, used for optimizer and scheduler parsing.
Example:
"SGD,lr=0.01" -> {"name": "SGD", "lr": 0.01}
"""
s = s.replace(' ', '').split(',')
params = {}
params['name'] = s[0]
for x in s[1:]:
x = x.split('=')
params[x[0]]=float(x[1])
return params
# Optimizer and Scheduler
def build_optimizer(name, model_params, **optim_params):
""" Build optimizer from a dictionary of parameters """
tim_optimizers = sorted(name for name in timm_optim.__dict__
if name[0].isupper() and not name.startswith("__")
and callable(timm_optim.__dict__[name]))
torch_optimizers = sorted(name for name in torch.optim.__dict__
if name[0].isupper() and not name.startswith("__")
and callable(torch.optim.__dict__[name]))
if name in tim_optimizers:
return getattr(timm_optim, name)(model_params, **optim_params)
elif name in torch_optimizers:
return getattr(torch.optim, name)(model_params, **optim_params)
raise ValueError(f'Unknown optimizer "{name}", choose among {str(tim_optimizers+torch_optimizers)}')
def build_scheduler(name, optimizer, **lr_scheduler_params):
"""
Build scheduler from a dictionary of parameters
Args:
name: name of the scheduler
optimizer: optimizer to be used with the scheduler
params: dictionary of scheduler parameters
Ex:
CosineLRScheduler, optimizer {t_initial=50, cycle_mul=2, cycle_limit=3, cycle_decay=0.5, warmup_lr_init=1e-6, warmup_t=5}
"""
tim_schedulers = sorted(name for name in timm_scheduler.__dict__
if name[0].isupper() and not name.startswith("__")
and callable(timm_scheduler.__dict__[name]))
torch_schedulers = sorted(name for name in torch.optim.lr_scheduler.__dict__
if name[0].isupper() and not name.startswith("__")
and callable(torch.optim.lr_scheduler.__dict__[name]))
if name in tim_schedulers:
return getattr(timm_scheduler, name)(optimizer, **lr_scheduler_params)
elif hasattr(torch.optim.lr_scheduler, name):
return getattr(torch.optim.lr_scheduler, name)(optimizer, **lr_scheduler_params)
raise ValueError(f'Unknown scheduler "{name}", choose among {str(tim_schedulers+torch_schedulers)}')
# Model
def build_backbone(path, name):
""" Build a pretrained torchvision backbone from its name.
Args:
path: path to the checkpoint, can be an URL
name: "torchscript" or name of the architecture from torchvision (see https://pytorch.org/vision/stable/models.html)
or timm (see https://rwightman.github.io/pytorch-image-models/models/).
Returns:
model: nn.Module
"""
if name == 'torchscript':
model = torch.jit.load(path)
return model
else:
if hasattr(models, name):
model = getattr(models, name)(pretrained=True)
elif name in timm.list_models():
model = timm.models.create_model(name, num_classes=0)
else:
raise NotImplementedError('Model %s does not exist in torchvision'%name)
model.head = nn.Identity()
model.fc = nn.Identity()
if path is not None:
if path.startswith("http"):
checkpoint = torch.hub.load_state_dict_from_url(path, progress=False)
else:
checkpoint = torch.load(path)
state_dict = checkpoint
for ckpt_key in ['state_dict', 'model_state_dict', 'teacher']:
if ckpt_key in checkpoint:
state_dict = checkpoint[ckpt_key]
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
return model
# Data loading
@functools.lru_cache()
def get_image_paths(path):
logging.info(f"Resolving files in: {path}")
paths = []
for path, _, files in os.walk(path):
for filename in files:
paths.append(os.path.join(path, filename))
return sorted([fn for fn in paths if is_image_file(fn)])
class ImageFolder:
"""An image folder dataset without classes"""
def __init__(self, path, transform=None, loader=default_loader):
self.samples = get_image_paths(path)
self.loader = loader
self.transform = transform
def __getitem__(self, idx: int):
assert 0 <= idx < len(self)
img = self.loader(self.samples[idx])
if self.transform:
return self.transform(img)
return img
def __len__(self):
return len(self.samples)
def collate_fn(batch):
""" Collate function for data loader. Allows to have img of different size"""
return batch
def get_dataloader(data_dir, transform, batch_size=128, num_workers=8, collate_fn=collate_fn):
""" Get dataloader for the images in the data_dir. """
dataset = ImageFolder(data_dir, transform=transform)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn, shuffle=False, pin_memory=True, drop_last=False)
return dataloader
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import random
import faiss
import tqdm
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torchvision import transforms
from torchvision.transforms import functional
from torchvision.utils import save_image
from . import attenuations, augment_queries, utils, utils_img
from .engine import activate_images
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
def get_parser():
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('Experiments parameters')
aa("--output_dir", type=str, default="output/", help="Output directory for logs and images (Default: /output)")
aa("--verbose", type=int, default=1)
aa("--seed", type=int, default=0)
group = parser.add_argument_group('Data parameters')
aa("--fts_training_path", type=str, default="path/to/train/fts.pth")
aa("--fts_reference_path", type=str, default="path/to/train/ref_990k.pth")
aa("--data_dir", type=str, default="/path/to/disc/ref_10k.pth")
aa("--query_nonmatch_dir", type=str, default="/path/to/disc/queries_40k")
aa("--batch_size", type=int, default=16)
aa("--batch_size_eval", type=int, default=128)
aa("--resize_size", type=int, default=288, help="Resize images to this size. (Default: 288)")
group = parser.add_argument_group('Model parameters')
aa("--model_name", type=str, default="torchscript")
aa("--model_path", type=str, default="/path/to/model.torchscript.pt")
group = parser.add_argument_group('Index parameters')
aa("--idx_dir", type=str, default="indexes", help="Directory where to save the index. (Default: index_disc_sscd288)")
aa("--idx_factory", type=str, default="IVF4096,PQ8x8", help="String to create index from index factory. (Default: IVF4096,PQ8x8)")
aa("--quant", type=str, default="L2", help="Quantizer type if IVF (L2, IP, etc.)")
aa("--nprobe", type=int, default=1, help="Number of probes per query if IVF.")
aa("--kneighbors", type=int, default=100, help="Number of nearest neighbors to return")
group = parser.add_argument_group('Optimization parameters')
aa("--iterations", type=int, default=10, help="Number of iterations for image optimization. (Default: 10)")
aa("--optimizer", type=str, default="Adam,lr=1e-0", help="Optimizer to use. (Default: Adam)")
aa("--scheduler", type=str, default=None, help="Scheduler to use. (Default: None)")
aa("--target", type=str, default="pq_recons", help="Target to use. (Default: pq_recons)")
aa("--loss_f", type=str, default="cossim", help="Loss w to use. Choose among mse, cossim (Default: cossim)")
aa("--lambda_f", type=float, default=1.0, help="Weight of the feature loss. (Default: 1.0)")
aa("--lambda_i", type=float, default=1e-2, help="Weight of the image loss. (Default: 1.0)")
group = parser.add_argument_group('Distortion & Attenuation parameters')
aa("--use_attenuation", type=utils.bool_inst, default=True, help="Use heatmap attenuation")
aa("--scaling", type=float, default=3.0, help="Scaling factor for the heatmap attenuation")
aa("--scale_channels", type=utils.bool_inst, default=True, help="Scale the RGB channels of the heatmap attenuation")
aa("--use_tanh", type=utils.bool_inst, default=True, help="Use tanh for the heatmap attenuation")
group = parser.add_argument_group('Evaluation parameters')
aa("--use_attacks_2", type=utils.bool_inst, default=False, help="Use attacks_2 for augmentation evaluation. (Default: False)")
aa("--eval_retrieval", type=utils.bool_inst, default=True, help="Evaluate retrieval. (Default: True)")
aa("--eval_icd", type=utils.bool_inst, default=True, help="Evaluate icd. (Default: True)")
group = parser.add_argument_group('Misc parameters')
aa("--active", type=utils.bool_inst, default=True, help="Activate images")
aa("--save_imgs", type=utils.bool_inst, default=True, help="Save images")
aa("--log_freq", type=int, default=11, help="Log every n iterations. (Default: 1)")
aa("--debug", type=utils.bool_inst, default=False, help="Debug mode. (Default: False)")
return parser
@torch.no_grad()
def eval_retrieval(img_loader, image_indices, transform, model, index, kneighbors, use_attacks_2=False):
"""
Evaluate retrieval on the activated images.
Args:
img_loader (torch.utils.data.DataLoader): Data loader for the images.
image_indices (list): List of ground-truth image indices.
transform (torchvision.transforms): Transform to apply to the images.
model (torch.nn.Module): Model to use for feature extraction.
index (faiss.Index): Index to use for retrieval.
kneighbors (int): Number of nearest neighbors to return.
use_attacks_2 (bool): Use attacks_2 for augmentation evaluation. (Default: False)
Returns:
df (pandas.DataFrame): Dataframe with the results.
"""
logs = []
attacks = utils_img.attacks_2 if use_attacks_2 else utils_img.attacks
base_count = 0
for ii, imgs in enumerate(tqdm.tqdm(img_loader)):
# create attacks for each image of the batch
attacked_imgs = [utils_img.generate_attacks(pil_img, attacks) for pil_img in imgs] # batchsize nattacks
# create batches for each attack
batch_attacked_imgs = [[] for _ in range(len(attacks))] # nattacks 0
for jj, attacked_img_jj in enumerate(attacked_imgs):
for kk in range(len(attacks)): # nattacks 0 -> nattacks batchsize
img_jj_attack_kk = transform(attacked_img_jj[kk]).unsqueeze(0).to(device)
batch_attacked_imgs[kk].append(img_jj_attack_kk)
batch_attacked_imgs = [torch.cat(batch_attacked_img, dim=0) for batch_attacked_img in batch_attacked_imgs] # nattacks batchsize
# iterate over attacks
for kk in range(len(attacks)):
# create attack param
attack = attacks[kk].copy()
attack_name = attack.pop('attack')
param_names = ['attack_param' for _ in range(len(attack.keys()))]
attack_params = dict(zip(param_names,list(attack.values())))
# extract features
fts = model(batch_attacked_imgs[kk])
fts = fts.detach().cpu().numpy()
# retrieve nearest neighbors
retrieved_Ds, retrieved_Is = index.search(fts, k=kneighbors)
# iterate over images of the batch
for jj in range(len(batch_attacked_imgs[kk])):
image_index = image_indices[base_count+jj]
retrieved_D, retrieved_I = retrieved_Ds[jj], retrieved_Is[jj]
rank = [kk for kk in range(len(retrieved_I)) if retrieved_I[kk]==image_index]
rank = rank[0] if rank else len(retrieved_I)
logs.append({
'batch': ii,
'image_index': image_index,
"attack": attack_name,
**attack_params,
'retrieved_distances': retrieved_D,
'retrieved_indices': retrieved_I,
'rank': rank,
'r@1': 1 if rank<1 else 0,
'r@10': 1 if rank<10 else 0,
'r@100': 1 if rank<100 else 0,
'ap': 1/(rank+1),
"kw": "evaluation",
})
# update count of images
base_count += len(imgs)
df = pd.DataFrame(logs).drop(columns='kw')
return df
@torch.no_grad()
def eval_icd(img_loader, img_nonmatch_loader, image_indices, transform, model, index, kneighbors, seed=0):
"""
Evaluate icd on the activated images.
Args:
img_loader (torch.utils.data.DataLoader): Data loader for the images.
img_nonmatch_loader (torch.utils.data.DataLoader): Data loader for the non-matching images.
image_indices (list): List of ground-truth image indices.
transform (torchvision.transforms): Transform to apply to the images.
model (torch.nn.Module): Model to use for feature extraction.
index (faiss.Index): Index to use for retrieval.
kneighbors (int): Number of nearest neighbors to return.
query_nonmatch_dir (str): Directory where the non-matching images are stored.
seed (int): Seed for the random number generator. (Default: 0)
Returns:
df (pandas.DataFrame): Dataframe with the results.
"""
# stats on matching images
rng = np.random.RandomState(seed)
logs = []
ct_match = 0 # count of matching images
for ii, imgs in enumerate(tqdm.tqdm(img_loader)):
# create attack for each image of the batch
attacked_imgs = []
attack_names = []
for jj, pil_img in enumerate(imgs):
attacked_img, aug_params = augment_queries.augment_img_wrapper(pil_img, rng, return_params=True)
attack_name = "[" + ", ".join([str(ftr) for ftr in aug_params])
attacked_img = transform(attacked_img).unsqueeze(0).to(device)
attack_names.append(attack_name)
attacked_imgs.append(attacked_img)
attacked_imgs = torch.cat(attacked_imgs, dim=0)
# extract features
fts = model(attacked_imgs)
fts = fts.detach().cpu().numpy()
# nearest neighbors search
retrieved_Ds, retrieved_Is = index.search(fts, k=kneighbors)
# iterate over images of the batch
for jj in range(len(imgs)):
retrieved_D, retrieved_I = retrieved_Ds[jj], retrieved_Is[jj]
image_index = image_indices[ct_match + jj]
logs.append({
'batch': ii,
'image_index': image_index,
'attack': attack_names[jj],
'retrieved_distances': retrieved_D,
'retrieved_ids': retrieved_I,
"kw": "icd_evaluation",
})
# update count of matching images
ct_match += len(imgs)
# stats non matching images
for ii, imgs in enumerate(tqdm.tqdm(img_nonmatch_loader)):
# create attack for each image of the batch
attacked_imgs = []
attack_names = []
for jj, pil_img in enumerate(imgs):
attacked_img, aug_params = augment_queries.augment_img_wrapper(pil_img, rng, return_params=True)
attack_name = "[" + ", ".join([str(ftr) for ftr in aug_params])
attacked_img = transform(attacked_img).unsqueeze(0).to(device)
attack_names.append(attack_name)
attacked_imgs.append(attacked_img)
attacked_imgs = torch.cat(attacked_imgs, dim=0)
# extract features
fts = model(attacked_imgs)
fts = fts.detach().cpu().numpy()
# nearest neighbors search
retrieved_Ds, retrieved_Is = index.search(fts, k=kneighbors)
# iterate over images of the batch
for jj in range(len(imgs)):
retrieved_D, retrieved_I = retrieved_Ds[jj], retrieved_Is[jj]
logs.append({
'batch': ii,
'image_index': -1,
'attack': attack_names[jj],
'retrieved_distances': retrieved_D,
'retrieved_ids': retrieved_I,
"kw": "icd_evaluation",
})
icd_df = pd.DataFrame(logs).drop(columns='kw')
return icd_df
def main(params):
# Set seeds for reproductibility
torch.manual_seed(params.seed)
torch.cuda.manual_seed_all(params.seed)
np.random.seed(params.seed)
random.seed(params.seed)
# Create the directories
os.makedirs(params.idx_dir, exist_ok=True)
os.makedirs(params.output_dir, exist_ok=True)
imgs_dir = os.path.join(params.output_dir, 'imgs')
os.makedirs(imgs_dir, exist_ok=True)
print(f'>>> Starting. \n \t Index will be saved in {params.idx_dir} - images will be saved in {imgs_dir} - evaluation logs in {params.output_dir}')
# Build Index - see https://github.com/facebookresearch/faiss/wiki/Faiss-indexes
print(f'>>> Building Index')
idx_path = os.path.join(params.idx_dir, f'idx={params.idx_factory}_quant={params.quant}.index')
if os.path.exists(idx_path):
print(f'>>> Loading Index from {idx_path}')
index = faiss.read_index(idx_path)
else:
print(f'>>> Index not found. Building Index with fts from {params.fts_training_path}...')
index = utils.build_index_factory(params.idx_factory, params.quant, params.fts_training_path, idx_path)
index.nprobe = params.nprobe
if 'IVF' in params.idx_factory: # optionally get the centroids
ivf = faiss.extract_index_ivf(index)
ivf_centroids = ivf.quantizer.reconstruct_n(0, ivf.nlist)
else:
ivf_centroids = None
# Adding reference images to the index
print(f'>>> Adding reference images to the index from {params.fts_reference_path}...')
fts = torch.load(params.fts_reference_path)
index.add(fts.detach().cpu().numpy())
n_index_ref = index.ntotal
if 'IVF' in params.idx_factory:
ivf.make_direct_map()
# Build the feature extractor model
print(f'>>> Building backbone from {params.model_path}...')
model = utils.build_backbone(path=params.model_path, name=params.model_name)
model.eval()
model.to(device)
for param in model.parameters():
param.requires_grad = False
# loss for feature
cossim = nn.CosineSimilarity(dim=-1)
pdist = nn.PairwiseDistance(p=2)
def loss_f(ft, target):
if params.loss_f == 'cossim':
dists = -cossim(ft, target)
else:
dists = pdist(ft, target)**2
return torch.mean(dists)
# loss for image
mse = nn.MSELoss()
def loss_i(imgs, imgs_ori):
li = 0
bb = len(imgs)
for ii in range(bb): # imgs do not have same size so we cannot use batch mse
li += mse(imgs[ii], imgs_ori[ii])
return li/bb
# build perceptual attenuation
attenuation = attenuations.JND(preprocess = utils_img.UNNORMALIZE_IMAGENET).to(device)
attenuation.requires_grad = False
# Load images to activate
print(f'>>> Loading images from {params.data_dir}...')
transform = transforms.Compose([
transforms.ToTensor(),
utils_img.NORMALIZE_IMAGENET,
])
transform_with_resize = transforms.Compose([
transforms.ToTensor(),
utils_img.NORMALIZE_IMAGENET,
transforms.Resize((params.resize_size, params.resize_size)),
])
data_loader = utils.get_dataloader(params.data_dir, transform, params.batch_size)
print(f'>>> Activating images...')
all_imgs = []
for it, imgs in enumerate(tqdm.tqdm(data_loader)):
if params.debug and it > 5:
break
imgs = [img.to(device) for img in imgs]
# Add to index
resized_imgs = [functional.resize(img, (params.resize_size, params.resize_size)) for img in imgs]
batch_imgs = torch.stack([img for img in resized_imgs])
fts = model(batch_imgs)
index.add(fts.detach().cpu().numpy())
if 'IVF' in params.idx_factory:
ivf.make_direct_map() # update the direct map if needed
# Activate
if params.active:
imgs = activate_images(imgs, fts, model, index, ivf_centroids, attenuation, loss_f, loss_i, params)
# Save images
for ii, img in enumerate(imgs):
img = torch.clamp(utils_img.UNNORMALIZE_IMAGENET(img), 0, 1)
img = torch.round(255 * img)/255
img = img.detach().cpu()
if params.save_imgs:
save_image(img, os.path.join(imgs_dir, f'{it*params.batch_size + ii:05d}.png'))
else:
all_imgs.append(transforms.ToPILImage()(img))
if params.save_imgs:
# create loader from saved images
img_loader = utils.get_dataloader(imgs_dir, transform=None, batch_size=params.batch_size_eval)
else:
# list of images to list of batches
img_loader = [all_imgs[ii:ii + params.batch_size_eval] for ii in range(0, len(all_imgs), params.batch_size_eval)]
if params.eval_retrieval:
print(f'>>> Evaluating nearest neighbors search...')
image_indices = range(n_index_ref, index.ntotal)
df = eval_retrieval(img_loader, image_indices, transform_with_resize, model, index, params.kneighbors, params.use_attacks_2)
df.to_csv(os.path.join(params.output_dir, 'retr_df.csv'), index=False)
df.fillna(0, inplace=True)
df_mean = df.groupby(['attack', 'attack_param'], as_index=False).mean()
print(f'\n{df_mean}')
if params.eval_icd:
print(f'>>> Evaluating copy detection on query set...')
image_indices = range(n_index_ref, index.ntotal)
img_nonatch_loader = utils.get_dataloader(params.query_nonmatch_dir, transform=None, batch_size=params.batch_size_eval)
icd_df = eval_icd(img_loader, img_nonatch_loader, image_indices, transform_with_resize, model, index, params.kneighbors)
icd_df_path = os.path.join(params.output_dir,'icd_df.csv')
icd_df.to_csv(icd_df_path, index=False)
print(f'\n{icd_df}')
if __name__ == '__main__':
# generate parser / parse parameters
parser = get_parser()
params = parser.parse_args()
# run experiment
main(params)
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
setuptools.setup(
name="pterotactyl",
version="0.1.0",
author="Facebook AI Research",
description="",
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence :: Active Sensing",
],
python_requires=">=3.6",
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from pterotactyl.utility import utils
BASE_MESH_SIZE = 1824
BASE_CHART_SIZE = 25
# replay buffer used for learning RL models over the environment
class ReplayMemory:
def __init__(self, args):
self.args = args
# basic info which might be used by a learning method
# _n denotes observations occuring after the action is perfromed
self.mask = torch.zeros((self.args.mem_capacity, self.args.num_actions))
self.mask_n = torch.zeros((self.args.mem_capacity, self.args.num_actions))
self.actions = torch.zeros((self.args.mem_capacity))
self.rewards = torch.zeros(self.args.mem_capacity)
self.score = torch.zeros(self.args.mem_capacity)
self.score_n = torch.zeros(self.args.mem_capacity)
self.first_score = torch.zeros(self.args.mem_capacity)
if self.args.use_recon:
num_fingers = 1 if self.args.finger else 4
mesh_shape = BASE_MESH_SIZE + (
BASE_CHART_SIZE * self.args.num_grasps * num_fingers
)
self.mesh = torch.zeros((self.args.mem_capacity, mesh_shape, 4))
self.mesh_n = torch.zeros((self.args.mem_capacity, mesh_shape, 4))
if self.args.use_latent:
latent_size = utils.load_model_config(self.args.auto_location)[
0
].encoding_size
self.latent = torch.zeros((self.args.mem_capacity, latent_size))
self.latent_n = torch.zeros((self.args.mem_capacity, latent_size))
self.first_latent = torch.zeros((self.args.mem_capacity, latent_size))
self.position = 0
self.count_seen = 0
# add a set of transitions to the replay buffer
def push(self, action, observation, next_observation, reward):
for i in range(len(action)):
self.actions[self.position] = action[i]
self.rewards[self.position] = reward[i]
self.score[self.position] = observation["score"][i]
self.score_n[self.position] = next_observation["score"][i]
self.first_score[self.position] = observation["first_score"][i]
self.mask[self.position] = observation["mask"][i]
self.mask_n[self.position] = next_observation["mask"][i]
if self.args.use_recon:
self.mesh[self.position] = observation["mesh"][i]
self.mesh_n[self.position] = next_observation["mesh"][i]
if self.args.use_latent:
self.latent[self.position] = observation["latent"][i]
self.latent_n[self.position] = next_observation["latent"][i]
self.first_latent[self.position] = observation["first_latent"][i]
self.count_seen += 1
self.position = (self.position + 1) % self.args.mem_capacity
# sample a set of transitions from the replay buffer
def sample(self):
if (
self.count_seen < self.args.burn_in
or self.count_seen < self.args.train_batch_size
):
return None
indices = np.random.choice(
min(self.count_seen, self.args.mem_capacity), self.args.train_batch_size
)
data = {
"mask": self.mask[indices],
"mask_n": self.mask_n[indices],
"actions": self.actions[indices],
"rewards": self.rewards[indices],
"score": self.score[indices],
"score_n": self.score_n[indices],
"first_score": self.first_score[indices],
}
if self.args.use_recon:
data["mesh"] = self.mesh[indices]
data["mesh_n"] = self.mesh_n[indices]
if self.args.use_latent:
data["latent"] = self.latent[indices]
data["latent_n"] = self.latent_n[indices]
data["first_latent"] = self.first_latent[indices]
return data
# save the replay buffer to disk
def save(self, directory):
data = {
"mask": self.mask,
"mask_n": self.mask_n,
"actions": self.actions,
"rewards": self.rewards,
"score": self.score,
"first_score": self.first_score,
"position": self.position,
"count_seen": self.count_seen,
}
if self.args.use_recon:
data["mesh"] = self.mesh
data["mesh_n"] = self.mesh_n
if self.args.use_latent:
data["latent"] = self.latent
data["latent_n"] = self.latent_n
data["first_latent"] = self.first_latent
temp_path = directory + "_replay_buffer_temp.pt"
full_path = directory + "_replay_buffer.pt"
torch.save(data, temp_path)
os.rename(temp_path, full_path)
# load the replay buffer from the disk
def load(self, directory):
data = torch.load(directory + "_replay_buffer.pt")
self.mask = data["mask"]
self.mask_n = data["mask_n"]
self.actions = data["actions"]
self.actions = data["actions"]
self.rewards = data["rewards"]
self.score = data["score"]
self.first_score = data["first_score"]
self.position = data["position"]
self.count_seen = data["count_seen"]
if self.args.use_recon:
self.mesh = data["mesh"]
self.mesh_n = data["mesh_n"]
if self.args.use_latent:
self.latent = data["latent"]
self.latent_n = data["latent_n"]
self.first_latent = data["first_latent"]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import numpy as np
import torch
import torch.utils.data
from pterotactyl.utility import utils
from pterotactyl.utility import data_loaders
from pterotactyl.reconstruction.touch import model as touch_model
from pterotactyl.reconstruction.vision import model as vision_model
from pterotactyl.reconstruction.autoencoder import model as auto_model
import pterotactyl.objects as objects
from pterotactyl.simulator.scene import sampler
from pterotactyl.simulator.physics import grasping
from pterotactyl import pretrained
class ActiveTouch:
def __init__(self, args):
self.args = args
self.seed(self.args.seed)
self.current_information = {}
self.steps = 0
self.touch_chart_location = os.path.join(
os.path.dirname(objects.__file__), "touch_chart.obj"
)
self.vision_chart_location = os.path.join(
os.path.dirname(objects.__file__), "vision_charts.obj"
)
self.pretrained_recon_models()
self.setup_recon()
self.get_loaders()
self.sampler = sampler.Sampler(
grasping.Agnostic_Grasp, bs=self.args.env_batch_size, vision=False
)
# Fix seeds
def seed(self, seed):
self.seed = seed
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
# get dataloaders
def get_loaders(self):
if not self.args.eval:
self.train_data = data_loaders.mesh_loader_active(
self.args, set_type="RL_train"
)
set_type = "valid"
else:
set_type = "test"
self.valid_data = data_loaders.mesh_loader_active(self.args, set_type=set_type)
def pretrained_recon_models(self):
if self.args.pretrained_recon:
self.args.touch_location = (
os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/"
)
if self.args.use_img:
if self.args.finger:
self.args.vision_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_p/"
)
self.args.auto_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/v_t_p/"
)
else:
self.args.vision_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_g/"
)
self.args.auto_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/v_t_g/"
)
else:
if self.args.finger:
self.args.vision_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_p/"
)
self.args.auto_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/t_p/"
)
else:
self.args.vision_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_g/"
)
self.args.auto_location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/t_g/"
)
# initialize and load the correct reconstruction models
def setup_recon(self):
self.touch_verts, _ = utils.load_mesh_touch(self.touch_chart_location)
# load predtrained touch prediction model
touch_args, _ = utils.load_model_config(self.args.touch_location)
weights = self.args.touch_location + '/model'
self.touch_prediction = touch_model.Encoder().cuda()
self.touch_prediction.load_state_dict(torch.load(weights))
self.touch_prediction.eval()
# load predtrained vision prediction model
vision_args, _ = utils.load_model_config(self.args.vision_location)
weights = self.args.vision_location + '/model'
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
vision_args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
self.deform = vision_model.Deformation(
self.mesh_info, self.initial_mesh, vision_args
).cuda()
self.deform.load_state_dict(torch.load(weights))
self.deform.eval()
# load predtrained autoencoder model
if self.args.use_latent:
auto_args, _ = utils.load_model_config(self.args.auto_location)
weights = self.args.auto_location + '/model'
self.auto_encoder = auto_model.AutoEncoder(
self.mesh_info, self.initial_mesh, auto_args, only_encode=True
).cuda()
self.auto_encoder.load_state_dict(torch.load(weights), strict=False)
self.auto_encoder.eval()
# reset the environment with new objects
def reset(self, batch):
self.current_data = {}
self.steps = 0
self.current_data["first_score"] = None
self.current_data["batch"] = batch
self.current_data["mask"] = torch.zeros(
[self.args.env_batch_size, self.args.num_actions]
)
self.sampler.load_objects(batch["names"], from_dataset=True)
obs = self.compute_obs()
self.current_data["score"] = obs["score"]
return obs
# take a set in the environment with supplied actions
def step(self, actions):
self.update_masks(actions)
obs = self.compute_obs(actions=actions)
reward = self.current_data["score"] - obs["score"]
self.current_data["score"] = obs["score"]
self.steps += 1
done = self.steps == self.args.budget
return obs, reward, done
# compute the best myopic greedy actions and perfrom them
def best_step(self, greedy_checks=None):
best_actions = [None for _ in range(self.args.env_batch_size)]
best_score = [1000 for _ in range(self.args.env_batch_size)]
if greedy_checks == None or (
greedy_checks is not None and greedy_checks >= self.args.num_actions
):
for i in range(self.args.num_actions):
actions = [i for _ in range(self.args.env_batch_size)]
obs = self.compute_obs(actions)
for e, s in enumerate(obs["score"]):
if s < best_score[e] and self.current_data["mask"][e][i] == 0:
best_actions[e] = actions[e]
best_score[e] = s
else:
possible_actions = [
list(range(self.args.num_actions))
for _ in range(self.args.env_batch_size)
]
for i in range(self.args.env_batch_size):
seen = torch.where(self.current_data["mask"][i] != 0)[0]
actions = list(seen.data.cpu().numpy())
actions.sort()
actions.reverse()
for action in actions:
del possible_actions[i][action]
checks = min(greedy_checks, len(possible_actions[0]))
selected_actions = [
random.sample(possible_actions[i], checks)
for i in range(self.args.env_batch_size)
]
for i in range(checks):
actions = [
selected_actions[j][i] for j in range(self.args.env_batch_size)
]
obs = self.compute_obs(actions)
for e, s in enumerate(obs["score"]):
if s < best_score[e]:
best_actions[e] = actions[e]
best_score[e] = s
actions = np.array(best_actions)
obs, reward, done = self.step(actions)
return actions, obs, reward, done
# check the result of perfroming a specific action
def check_step(self, actions):
obs = self.compute_obs(actions=actions)
return obs
# perfrom a given action and compute the new state observations
def compute_obs(self, actions=None):
with torch.no_grad():
charts = self.get_inputs(actions)
img = self.current_data["batch"]["img"].cuda()
verts, mask = self.deform(img, charts)
if self.args.use_latent:
latent = self.auto_encoder(verts.detach(), mask)
score = self.get_score(
verts, self.current_data["batch"]["gt_points"].cuda()
)
if self.current_data["first_score"] is None:
self.current_data["first_score"] = score
if self.args.use_latent:
self.current_data["first_latent"] = latent.data.cpu()
mesh = torch.cat((verts, mask), dim=-1).data.cpu()
obs = {
"score": score.data.cpu().clone(),
"first_score": self.current_data["first_score"].clone(),
"mask": self.current_data["mask"].data.cpu().clone(),
"names": self.current_data["batch"]["names"],
"mesh": mesh.data.cpu().clone(),
}
if self.args.use_latent:
obs["first_latent"] = self.current_data["first_latent"]
obs["latent"] = latent.data.cpu()
return obs
# compute the Chamfer distance of object predictions
def get_score(self, verts, gt_points):
loss = utils.chamfer_distance(
verts, self.mesh_info["faces"], gt_points, num=self.args.number_points
)
loss = self.args.loss_coeff * loss
return loss.cpu()
# perform a given action and a convert the resulting signals into expected input for the reconstructor
def get_inputs(self, actions=None):
num_fingers = 1 if self.args.finger else 4
# this occurs if a reset is being perfromed
# here the input is defined with not touch information
if actions is None:
self.touch_charts = torch.zeros(
(self.args.env_batch_size, num_fingers, self.args.num_grasps, 25, 3)
).cuda()
self.touch_masks = torch.zeros(
(self.args.env_batch_size, num_fingers, self.args.num_grasps, 25, 1)
).cuda()
self.vision_charts = self.initial_mesh.unsqueeze(0).repeat(
self.args.env_batch_size, 1, 1
)
self.vision_masks = 3 * torch.ones(
self.vision_charts.shape[:-1]
).cuda().unsqueeze(-1)
else:
# perfrom the action
signals = self.sampler.sample(actions, touch_point_cloud=True)
if self.args.finger:
touch = (
torch.FloatTensor(
signals["touch_signal"].data.numpy().astype(np.uint8)
)[:, 1]
.permute(0, 3, 1, 2)
.cuda()
/ 255.0
)
pos = signals["finger_transfrom_pos"][:, 1].cuda()
rot = signals["finger_transform_rot_M"][:, 1].cuda()
ref_frame = {"pos": pos, "rot": rot}
# convert the touch signals to charts
touch_verts = (
self.touch_verts.unsqueeze(0)
.repeat(self.args.env_batch_size, 1, 1)
.cuda()
)
pred_touch_charts = self.touch_prediction(
touch, ref_frame, touch_verts
).contiguous()
# define the touch charts in the input mesh to the reconstructor
for i in range(self.args.env_batch_size):
if signals["touch_status"][i][1] == "touch":
self.touch_charts[i, 0, self.steps] = pred_touch_charts[i]
self.touch_masks[i, 0, self.steps] = 2
elif signals["touch_status"][i][1] == "no_touch":
self.touch_charts[i, 0, self.steps] = (
pos[i].view(1, 1, 3).repeat(1, 25, 1)
)
self.touch_masks[i, 0, self.steps] = 1
else:
self.touch_charts[i, 0, self.steps] = 0
self.touch_masks[i, 0, self.steps] = 0
else:
touch = (
signals["touch_signal"]
.view(-1, 121, 121, 3)
.permute(0, 3, 1, 2)
.cuda()
/ 255.0
)
pos = signals["finger_transfrom_pos"].view(-1, 3).cuda()
rot = signals["finger_transform_rot_M"].view(-1, 3, 3).cuda()
ref_frame = {"pos": pos, "rot": rot}
# convert the touch signals to charts
touch_verts = (
self.touch_verts.unsqueeze(0)
.repeat(self.args.env_batch_size * 4, 1, 1)
.cuda()
)
pred_touch_charts = self.touch_prediction(
touch, ref_frame, touch_verts
).contiguous()
# define the touch charts in the input mesh to the reconstructor
for i in range(self.args.env_batch_size):
for j in range(4):
if signals["touch_status"][i][j] == "touch":
self.touch_charts[i, j, self.steps] = pred_touch_charts[
i * 4 + j
]
self.touch_masks[i, j, self.steps] = 2
elif signals["touch_status"][i][j] == "no_touch":
self.touch_charts[i, j, self.steps] = (
pos[i * 4 + j].view(1, 1, 3).repeat(1, 25, 1)
)
self.touch_masks[i, j, self.steps] = 1
else:
self.touch_charts[i, j, self.steps] = 0
self.touch_masks[i, j, self.steps] = 0
charts = {
"touch_charts": self.touch_charts.view(
self.args.env_batch_size, num_fingers * 5 * 25, 3
).clone(),
"vision_charts": self.vision_charts.clone(),
"touch_masks": self.touch_masks.view(
self.args.env_batch_size, num_fingers * 5 * 25, 1
).clone(),
"vision_masks": self.vision_masks.clone(),
}
return charts
# this is perfromed due to a meoery leak in pybullet where loaded meshes are not properly deleted
def reset_pybullet(self):
self.sampler.disconnect()
del self.sampler
self.sampler = sampler.Sampler(
grasping.Agnostic_Grasp, bs=self.args.env_batch_size, vision=True
)
# update the set of action which have been performed
def update_masks(self, actions):
for i in range(actions.shape[0]):
self.current_data["mask"][i, actions[i]] = 1
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch
from pterotactyl.utility import utils
class Latent_Model(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
latent_size = utils.load_model_config(self.args.auto_location)[0].encoding_size
# for embedding previously performed actions
layers = []
layers.append(nn.Sequential(nn.Linear(self.args.num_actions, 200), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(200, 100), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(100, latent_size)))
self.action_model = nn.Sequential(*layers)
# MLP taking as input embedding of actions, a latent embedding of first prediction, and current prediction
# and predicts a value for every action
hidden_sizes = (
[latent_size * 3]
+ [args.hidden_dim for _ in range(args.layers - 1)]
+ [self.args.num_actions]
)
layers = []
for i in range(args.layers):
if i < args.layers - 1:
layers.append(
nn.Sequential(
nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]), nn.ReLU()
)
)
else:
layers.append(
nn.Sequential(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]))
)
self.model = nn.Sequential(*layers)
self.args = args
def forward(self, obs):
action_input = self.action_model(obs["mask"].float().cuda())
shape_input_1 = obs["latent"].float().cuda()
shape_input_2 = obs["first_latent"].float().cuda()
full_input = torch.cat((action_input, shape_input_1, shape_input_2), dim=-1)
if self.args.normalize:
value = torch.sigmoid(self.model(full_input)) * 2 - 1
elif self.args.use_img:
value = torch.sigmoid(self.model(full_input)) * 6 - 3
else:
value = torch.sigmoid(self.model(full_input)) * 200 - 100
return value
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import argparse
from collections import namedtuple
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from pterotactyl.policies import environment
from pterotactyl.policies.baselines import baselines
from pterotactyl.policies.supervised import model as learning_model
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
self.args = args
self.results_dir = os.path.join("results", args.exp_type, args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", args.exp_type, args.exp_id
)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
if not self.args.eval:
utils.save_config(self.checkpoint_dir, args)
def __call__(self):
# setup the environment, policy and data
self.env = environment.ActiveTouch(self.args)
self.policy = baselines.even_sampler(self.args)
train_loaders, valid_loaders = self.get_loaders()
self.step = 0
self.models = [
learning_model.Latent_Model(self.args).cuda()
for i in range(self.args.budget)
]
# logging information
writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
# evaluate the policy
if self.args.eval:
with torch.no_grad():
self.load(train=False)
self.step = self.args.budget - 1
self.validate(valid_loaders, writer)
return
else:
for i in range(self.args.budget):
params = list(self.models[i].parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
self.load(train=True)
for model in self.models:
model.eval()
self.epoch = 0
self.best_loss = 10000
self.last_improvement = 0
for j in range(self.args.epoch):
self.train(train_loaders, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
if self.check_values():
break
self.epoch += 1
self.step += 1
# load data using pytorch dataloader
def get_loaders(self):
if not self.args.eval:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=True,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
else:
train_loader = []
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
def train(self, dataloader, writer):
total_loss = 0
iterations = 0.0
self.models[self.step].train()
for v, batch in enumerate(
tqdm(dataloader, total=min(self.args.train_steps, len(dataloader)))
):
if v >= self.args.train_steps:
break
try:
obs = self.env.reset(batch)
except:
continue
# move to the correct step
with torch.no_grad():
cur_actions = []
for i in range(self.step):
values = self.models[i](obs)
for acts in cur_actions:
for e, act in enumerate(acts):
values[e][act] = 1e10
actions = torch.argmin(values, dim=1)
next_obs, reward, all_done = self.env.step(actions)
obs = next_obs
cur_actions.append(actions)
# predict action values
all_pred_values = self.models[self.step](obs)
pred_values = []
# sample some random actions and compute their value
random_actions = np.random.randint(
50, size=self.args.env_batch_size * 5
).reshape(5, self.args.env_batch_size)
target = []
for actions in random_actions:
temp_obs = self.env.check_step(actions)
if self.args.normalize:
score = (temp_obs["first_score"] - temp_obs["score"]) / temp_obs[
"first_score"
]
else:
score = temp_obs["first_score"] - temp_obs["score"]
cur_pred_values = []
for j, a in enumerate(actions):
cur_pred_values.append(all_pred_values[j, a])
pred_values.append(torch.stack(cur_pred_values))
target.append(score)
target = torch.stack(target).cuda()
pred_values = torch.stack(pred_values)
loss = ((target - pred_values) ** 2).mean()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f"Train || step {self.step + 1 } || Epoch: {self.epoch}, loss: {loss.item():.3f}, b_ptp: {self.best_loss:.3f}"
tqdm.write(message)
total_loss += loss.item()
iterations += 1.0
self.train_loss = total_loss / iterations
writer.add_scalars(
f"train_loss_{self.step}",
{self.args.exp_id: total_loss / iterations},
self.epoch,
)
# perfrom the validation
def validate(self, dataloader, writer):
observations = []
scores = []
actions = []
names = []
self.models[self.step].eval()
valid_length = int(len(dataloader) * 0.2)
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
try:
obs = self.env.reset(batch)
except:
continue
self.policy.reset()
cur_scores = [obs["score"]]
cur_actions = []
for i in range(self.step + 1):
action_values = self.models[i](obs)
for acts in cur_actions:
for e, act in enumerate(acts):
action_values[e][act] = 1e10
action = torch.argmin(action_values, dim=1)
next_obs, _, _ = self.env.step(action)
# record observation
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(action.data.cpu())
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || score: {print_score:.4f}, "
message += f"reward = {print_reward:.4f}"
tqdm.write(message)
if self.args.visualize and v == 5 and self.args.eval:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
self.current_loss = current_loss
print("*" * 30)
message = f"Total Valid || step {self.step + 1 } || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if self.args.visualize and self.args.eval:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
if not self.args.eval:
writer.add_scalars(
f"valid_loss_{self.step}", {self.args.exp_id: current_loss}, self.epoch
)
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(f"Saving with {improvement:.3f} improvement on Validation Set ")
self.best_loss = self.current_loss
self.last_improvement = 0
self.save()
return False
else:
self.last_improvement += 1
if self.last_improvement >= self.args.patience:
print(f"Over {self.args.patience} steps since last imporvement")
print("Moving to next step or exiting")
return True
def load(self, train=False):
if self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/supervised/v_t_p"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/supervised/v_t_g"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/supervised/t_p"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/supervised/t_g"
)
config_location = f"{location}/config.json"
with open(config_location) as json_file:
data = json.load(json_file)
data["auto_location"] = self.args.auto_location
data["eval"] = True
data["visualize"] = self.args.visualize
self.args = namedtuple("ObjectName", data.keys())(*data.values())
self.models = [
learning_model.Latent_Model(self.args).cuda()
for i in range(self.args.budget)
]
for i in range(self.args.budget):
self.models[i].load_state_dict(torch.load(location + f"/model_{i}"))
else:
if train:
for i in range(self.step):
self.models[i].load_state_dict(
torch.load(self.checkpoint_dir + f"/model_{i}")
)
else:
for i in range(self.args.budget):
self.models[i].load_state_dict(
torch.load(self.checkpoint_dir + f"/model_{i}")
)
def save(self):
torch.save(
self.models[self.step].state_dict(),
self.checkpoint_dir + f"/model_{self.step}",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--auto_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/auto/t_p/",
help="the location of the autoencoder part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--epoch", type=int, default=3000, help="number of epochs per step"
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--num_actions", type=int, default=50, help="number of action options"
)
parser.add_argument(
"--eval", action="store_true", default=False, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment type."
)
parser.add_argument(
"--layers", type=int, default=4, help="Number of layers in the q network"
)
parser.add_argument(
"--patience",
type=int,
default=25,
help="number of epochs without progress before stopping",
)
parser.add_argument(
"--training_actions",
type=int,
default=5,
help="number of action values learned for each object in each iteration",
)
parser.add_argument(
"--hidden_dim",
type=int,
default=200,
help="hidden dimension size in layers in the q network",
)
parser.add_argument(
"--train_steps",
type=int,
default=200,
help="number of training steps per epoch",
)
parser.add_argument(
"--normalize", type=int, default=0, help="number of training steps per epoch"
)
parser.add_argument(
"--lr", type=float, default=0.001, help="Initial learning rate."
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
args.use_recon = False
args.use_latent = True
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import torch
import torch.nn as nn
import torch.optim as optim
from pterotactyl.policies.DDQN import model
from pterotactyl.policies.baselines import baselines
# DDQN training module
class DDQN(nn.Module):
def __init__(self, args, adj_info, replay):
super().__init__()
self.args = args
self.model = self.get_model(adj_info)
self.replay = replay
self.optimizer = optim.Adam(self.model.parameters(), lr=args.lr)
self.args = args
self.random_sampler = baselines.random_sampler(self.args)
# set the value of perfromaed action to never be selected
def penalise_actions(self, values, obs):
values[obs["mask"] > 0] = -1e10
return values
# select the model type required
def get_model(self, adj):
if self.args.pretrained:
if self.args.use_latent:
if self.args.use_img:
if self.args.finger:
self.args.hidden_dim = 300
self.args.layers = 5
else:
self.args.hidden_dim = 300
self.args.layers = 5
else:
if self.args.finger:
self.args.hidden_dim = 300
self.args.layers = 5
else:
self.args.hidden_dim = 300
self.args.layers = 2
else:
if self.args.use_img:
if self.args.finger:
self.args.hidden_dim = 100
self.args.layers = 5
else:
self.args.hidden_dim = 100
self.args.layers = 5
else:
if self.args.finger:
self.args.hidden_dim = 100
self.args.layers = 5
else:
self.args.hidden_dim = 100
self.args.layers = 2
if self.args.use_latent:
return model.Latent_Model(self.args).cuda()
elif self.args.use_recon:
return model.Graph_Model(self.args, adj).cuda()
else:
print("No Model type selected")
exit()
# decrease the epsilon value
def update_epsilon(self, epsilon, args):
return max(args.epsilon_end, epsilon * args.epsilon_decay)
# add the observed transition to the replay buffer
def add_experience(self, action, observation, next_observation, reward):
self.replay.push(action, observation, next_observation, reward)
# update the parameters of the model using DDQN update rule
def update_parameters(self, target_net):
self.model.train()
batch = self.replay.sample()
if batch is None:
return None
# get observations
not_done_mask = batch["mask"].cuda().sum(dim=1) < self.args.budget - 1
actions = batch["actions"].cuda()
rewards = batch["rewards"].cuda()
cur_score = batch["score"].cuda()
first_score = batch["first_score"].cuda()
# normalize if needed
if self.args.normalization == "first":
rewards = rewards / first_score
elif self.args.normalization == "current":
rewards = rewards / cur_score
# Standard DDQN update rule
all_q_values_cur = self.forward(batch, penalize=False)
q_values = all_q_values_cur.gather(1, actions.unsqueeze(1).long()).squeeze()
with torch.no_grad():
best_next_action = self.forward(batch, next=True).detach().max(1)[1]
target_values = target_net.forward(
batch, next=True, penalize=False
).detach()
all_q_values_next = torch.zeros((q_values.shape[0])).cuda()
for i in range(q_values.shape[0]):
if not_done_mask[i]:
all_q_values_next[i] = target_values[i][best_next_action[i]]
target_values = (self.args.gamma * all_q_values_next) + rewards
loss = ((q_values - target_values) ** 2).mean()
# backprop
self.optimizer.zero_grad()
loss.backward()
for param in self.parameters():
if param.grad is not None:
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
return loss.item()
def forward(self, obs, next=False, penalize=True):
value = self.model(obs, next=next)
if penalize:
value = self.penalise_actions(value, obs)
return value
def get_action(self, obs, eps_threshold, give_random=False):
sample = random.random()
if sample < eps_threshold or give_random:
return self.random_sampler.get_action(obs["mask"])
else:
with torch.no_grad():
self.model.eval()
q_values = self(obs)
actions = torch.argmax(q_values, dim=1).data.cpu().numpy()
return actions
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
import torch
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import numpy as np
from pterotactyl.utility import utils
# DDQN Q network which makes use of a pertrained latent space of predicted objects
class Latent_Model(nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
latent_size = utils.load_model_config(self.args.auto_location)[0].encoding_size
# for embedding previously performed actions
layers = []
layers.append(nn.Sequential(nn.Linear(self.args.num_actions, 200), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(200, 100), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(100, latent_size)))
self.action_model = nn.Sequential(*layers)
# MLP taking as input embedding of actions, a latent embedding of first prediction, and current prediction
# and predicts a value for every action
hidden_sizes = (
[latent_size * 3]
+ [args.hidden_dim for _ in range(args.layers - 1)]
+ [self.args.num_actions]
)
layers = []
for i in range(args.layers):
if i < args.layers - 1:
layers.append(
nn.Sequential(
nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]), nn.ReLU()
)
)
else:
layers.append(
nn.Sequential(nn.Linear(hidden_sizes[i], hidden_sizes[i + 1]))
)
self.model = nn.Sequential(*layers)
self.args = args
def forward(self, obs, next=False):
if next:
action_input = self.action_model(obs["mask_n"].float().cuda())
shape_input_1 = obs["latent_n"].float().cuda()
else:
action_input = self.action_model(obs["mask"].float().cuda())
shape_input_1 = obs["latent"].float().cuda()
shape_input_2 = obs["first_latent"].float().cuda()
full_input = torch.cat((action_input, shape_input_1, shape_input_2), dim=-1)
value = self.model(full_input)
return value
# DDQN Q network which makes use of full mesh prediction
class Graph_Model(nn.Module):
def __init__(self, args, adj):
super().__init__()
self.adj = adj["adj"].data.cpu().cuda()
self.args = args
self.num_layers = args.layers
input_size = 100
# for embedding previously performed actions
layers = []
layers.append(nn.Sequential(nn.Linear(50, 200), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(200, 100), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(100, input_size)))
self.action_model = nn.Sequential(*layers)
# embedding of vertex positions and masks
self.positional_embedding = Positional_Encoder(input_size)
self.mask_embedding = Mask_Encoder(input_size)
# GCN for predicting actions values from input mesh
hidden_sizes = (
[input_size * 3]
+ [args.hidden_dim for _ in range(args.layers - 1)]
+ [self.args.num_actions]
)
layers = []
for i in range(args.layers):
layers.append(
GCN_layer(
hidden_sizes[i],
hidden_sizes[i + 1],
cut=self.args.cut,
do_cut=(i != self.num_layers - 1),
)
)
self.layers = nn.ModuleList(layers)
def forward(self, obs, next=False):
if next:
action_embedding = self.action_model(obs["mask_n"].float().cuda())
mesh = obs["mesh_n"][:, :, :3].float().cuda()
mask = obs["mesh_n"][:, :, 3:].float().cuda()
else:
action_embedding = self.action_model(obs["mask"].float().cuda())
mesh = obs["mesh"][:, :, :3].float().cuda()
mask = obs["mesh"][:, :, 3:].float().cuda()
positional_embedding = self.positional_embedding(mesh)
mask_embedding = self.mask_embedding(mask)
action_embedding = action_embedding.unsqueeze(1).repeat(1, mesh.shape[1], 1)
vertex_features = torch.cat(
(action_embedding, positional_embedding, mask_embedding), dim=-1
)
# iterate through GCN layers
x = self.layers[0](vertex_features, self.adj, F.relu)
for i in range(1, self.num_layers):
x = self.layers[i](
x, self.adj, F.relu if (i != self.num_layers - 1) else lambda x: x
)
value = torch.max(x, dim=1)[0]
return value
# Graph convolutional network layer
class GCN_layer(nn.Module):
def __init__(self, in_features, out_features, cut=0.33, do_cut=True):
super(GCN_layer, self).__init__()
self.weight = Parameter(torch.Tensor(1, in_features, out_features))
self.bias = Parameter(torch.Tensor(out_features))
self.reset_parameters()
self.cut_size = cut
self.do_cut = do_cut
def reset_parameters(self):
stdv = 6.0 / math.sqrt((self.weight.size(1) + self.weight.size(0)))
stdv *= 0.3
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, features, adj, activation):
features = torch.matmul(features, self.weight)
# uf we want to only share a subset of features with neighbors
if self.do_cut:
length = round(features.shape[-1] * self.cut_size)
output = torch.matmul(adj, features[:, :, :length])
output = torch.cat((output, features[:, :, length:]), dim=-1)
output[:, :, :length] += self.bias[:length]
else:
output = torch.matmul(adj, features)
output = output + self.bias
return activation(output)
# encode the positional information of vertices using Nerf Embeddings
class Positional_Encoder(nn.Module):
def __init__(self, input_size):
super(Positional_Encoder, self).__init__()
layers = []
layers.append(
nn.Linear(63, input_size // 4)
) # 10 nerf layers + original positions
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 4, input_size // 2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 2, input_size))
self.model = nn.Sequential(*layers)
# apply nerf embedding of the positional information
def nerf_embedding(self, points):
embeddings = []
for i in range(10):
if i == 0:
embeddings.append(torch.sin(np.pi * points))
embeddings.append(torch.cos(np.pi * points))
else:
embeddings.append(torch.sin(np.pi * 2 * i * points))
embeddings.append(torch.cos(np.pi * 2 * i * points))
embeddings = torch.cat(embeddings, dim=-1)
return embeddings
def forward(self, positions):
shape = positions.shape
positions = positions.contiguous().view(shape[0] * shape[1], -1)
# combine nerf embedding with origional positions
positions = torch.cat((self.nerf_embedding((positions)), positions), dim=-1)
embeding = self.model(positions).view(shape[0], shape[1], -1)
return embeding
# embedding network for vetex masks
class Mask_Encoder(nn.Module):
def __init__(self, input_size):
super(Mask_Encoder, self).__init__()
layers_mask = []
layers_mask.append(nn.Embedding(4, input_size))
self.model = nn.Sequential(*layers_mask)
def forward(self, mask):
shape = mask.shape
mask = mask.contiguous().view(-1, 1)
embeding_mask = self.model(mask.long()).view(shape[0], shape[1], -1)
return embeding_mask
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import torch
import argparse
from submitit.helpers import Checkpointable
from tqdm import tqdm
from pterotactyl.policies.DDQN import ddqn
from pterotactyl.policies import environment
from pterotactyl.policies import replay
from pterotactyl.utility import utils
from pterotactyl import pretrained
# module for training the DDQN models
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
self.steps = 0
self.episode = 0
self.epoch = 0
self.cur_loss = 10000
self.best_loss = 10000
self.epsilon = self.args.epsilon_start
self.results_dir = os.path.join("results", args.exp_type, args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", self.args.exp_type, self.args.exp_id
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
utils.save_config(self.checkpoint_dir, args)
def __call__(self):
# initialize the learning environment
self.env = environment.ActiveTouch(self.args)
self.replay_memory = replay.ReplayMemory(self.args)
self.policy = ddqn.DDQN(self.args, self.env.mesh_info, self.replay_memory)
self.target_net = ddqn.DDQN(self.args, self.env.mesh_info, None)
self.target_net.load_state_dict(self.policy.state_dict())
self.target_net.eval()
self.writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
self.window_size = 1000
self.ave_reward = torch.zeros((self.window_size)).cuda()
self.ave_recon = torch.zeros((self.window_size)).cuda()
train_loader, valid_loaders = self.get_loaders()
if self.args.eval:
self.load(best=True)
self.validate(valid_loaders)
return
self.resume()
# training loop
for epoch in range(self.epoch, self.args.epochs):
self.train(train_loader)
self.env.reset_pybullet()
if self.steps >= self.args.burn_in:
with torch.no_grad():
self.validate(valid_loaders)
self.env.reset_pybullet()
self.check_values_and_save()
self.epoch += 1
# load the environment data into pytorch dataloaders
def get_loaders(self):
if self.args.eval:
train_loader = ""
else:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=True,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# training iteration
def train(self, dataloader):
for v, batch in enumerate(tqdm(dataloader, total=self.args.train_steps)):
if v > self.args.train_steps - 1:
break
obs = self.env.reset(batch)
all_done = False
total_reward = 0
while not all_done:
# update epsilon
if self.steps >= self.args.burn_in:
self.epsilon = self.policy.update_epsilon(self.epsilon, self.args)
# get action
get_random_action = self.steps < self.args.burn_in
action = self.policy.get_action(
obs, eps_threshold=self.epsilon, give_random=get_random_action
)
# perform action
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# save experiance
self.policy.add_experience(action, obs, next_obs, reward)
# update policy
if self.steps >= self.args.burn_in:
self.policy.update_parameters(self.target_net)
# update target network
if (
self.steps % self.args.target_update == 0
and self.steps >= self.args.burn_in
):
print("+" * 5 + " updating target " "+" * 5)
self.target_net.load_state_dict(self.policy.state_dict())
torch.cuda.empty_cache()
obs = next_obs
self.steps += 1
# logs
recon = float((obs["score"] / obs["first_score"]).mean().item())
reward = float(
((obs["first_score"] - obs["score"]) / obs["first_score"]).mean().item()
)
self.ave_reward[self.episode % self.window_size] = reward
self.ave_recon[self.episode % self.window_size] = float(
(obs["score"] / obs["first_score"]).mean().item()
)
ave_reward = self.ave_reward[: self.episode + 1].mean()
ave_recon = self.ave_recon[: self.episode + 1].mean()
message = (
f"T Epoch: {self.epoch} Ep: {self.episode}, recon: {recon:.2f}, "
f"reward: {reward:.2f}, a_recon: {ave_recon:.2f}, a_reward: {ave_reward:.2f}, "
f" eps: {self.epsilon:.3f}, best: {self.best_loss:.3f}"
)
tqdm.write(message)
self.episode += 1
# logs
if self.steps >= self.args.burn_in:
self.writer.add_scalars(
"train_recon_|_", {self.args.exp_id: ave_recon}, self.steps
)
self.writer.add_scalars(
"train_reward_|_", {self.args.exp_id: ave_reward}, self.steps
)
# validation iteration
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
print("*" * 30)
print("Doing Validation")
total = self.args.train_steps if not self.args.eval else None
for v, batch in enumerate(tqdm(dataloader, total=total)):
names += batch["names"]
if v > self.args.valid_steps - 1 and not self.args.eval:
break
obs = self.env.reset(batch)
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# select actions
action = self.policy.get_action(
obs, eps_threshold=-1, give_random=False
)
# perform actions
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || E: {self.epoch}, score: {print_score:.2f}, best score: {self.best_loss:.2f} "
message += f"reward = {print_reward:.2f}"
tqdm.write(message)
if self.args.visualize and v == 5 and self.args.eval:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
actions = torch.cat(actions)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
variation = torch.std(actions, dim=0).mean()
self.current_loss = (scores[:, -1] / scores[:, 0]).mean()
print("*" * 30)
message = f"Total Valid || E: {self.epoch}, score: {self.current_loss:.4f}, best score: {self.best_loss:.4f} "
message += f"reward = {rewards.mean():.2f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if not self.args.eval:
self.writer.add_scalars(
f"Valid_recon_|_", {self.args.exp_id: self.current_loss}, self.steps
)
self.writer.add_scalars(
f"Valid_reward_|_", {self.args.exp_id: rewards.mean()}, self.steps
)
self.writer.add_scalars(
"epsilon_|_", {self.args.exp_id: self.epsilon}, self.steps
)
self.writer.add_scalars(
f"Valid_variation_|_", {self.args.exp_id: variation}, self.steps
)
if self.args.visualize and self.args.eval:
utils.visualize_actions(self.results_dir, actions, self.args)
# check if the new validation score if better and save checkpoint
def check_values_and_save(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(
f"Saving with {improvement:.3f} improvement in Chamfer Distance on Validation Set "
)
self.best_loss = self.current_loss
self.save(best=True)
print(f"Saving DQN checkpoint")
self.save(best=False)
print("Saving replay memory.")
self.replay_memory.save(self.checkpoint_dir)
# resume training
def resume(self):
path = self.checkpoint_dir + "/recent"
if os.path.exists(path + "_model"):
print(f"Loading DQN checkpoint")
self.load(best=False)
print("Loading replay memory.")
self.replay_memory.load(path)
# save current state of training
def save(self, best=False):
if best:
path = self.checkpoint_dir + "/best"
else:
path = self.checkpoint_dir + "/recent"
self.replay_memory.save(path)
torch.save(
{
"dqn_weights": self.policy.state_dict(),
"target_weights": self.target_net.state_dict(),
"args": self.args,
"episode": self.episode,
"steps": self.steps,
"ave_reward": self.ave_reward,
"ave_recon": self.ave_recon,
"epsilon": self.epsilon,
"epoch": self.epoch,
},
path + "_model",
)
# load previous state of training
def load(self, best=True):
if self.args.pretrained:
prefix = "l" if self.args.use_latent else "g"
if self.args.use_img:
if self.args.finger:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_v_t_p"
)
else:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_v_t_g"
)
else:
if self.args.finger:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_t_p"
)
else:
path = (
os.path.dirname(pretrained.__file__)
+ f"/policies/DDQN/{prefix}_t_g"
)
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint["dqn_weights"])
else:
if best:
path = self.checkpoint_dir + "/best_model"
else:
path = self.checkpoint_dir + "/recent_model"
checkpoint = torch.load(path)
self.policy.load_state_dict(checkpoint["dqn_weights"])
self.episode = checkpoint["episode"] + 1
if not self.args.eval:
self.target_net.load_state_dict(checkpoint["target_weights"])
self.steps = checkpoint["steps"]
self.ave_reward = checkpoint["ave_reward"]
self.ave_recon = checkpoint["ave_recon"]
self.epsilon = checkpoint["epsilon"]
self.epoch = checkpoint["epoch"] + 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cut",
type=float,
default=0.33,
help="The shared size of features in the GCN.",
)
parser.add_argument(
"--layers", type=int, default=4, help="Number of layers in the q network"
)
parser.add_argument(
"--hidden_dim",
type=int,
default=200,
help="hidden dimension size in layers in the q network",
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--auto_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/auto/t_p/",
help="the location of the autoencoder part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--lr", type=float, default=0.0003, help="Initial learning rate."
)
parser.add_argument(
"--env_batch_size",
type=int,
default=3,
help="Size of the batch of objects sampled from the environment",
)
parser.add_argument(
"--train_batch_size",
type=int,
default=16,
help="Size of the batch of transitions sampled for training the q network.",
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--budget", type=int, default=5)
parser.add_argument(
"--normalization",
type=str,
choices=["first", "current", "none"],
default="first",
help="how to normalize the reward for the q network update ",
)
parser.add_argument(
"--mem_capacity", type=int, default=300, help="the size of the replay buffer"
)
parser.add_argument("--burn_in", type=int, default=20, help="ddqn burn in time")
parser.add_argument(
"--num_actions", type=int, default=50, help=" number of possible actions"
)
parser.add_argument("--gamma", type=float, default=0, help="ddqn gamma value")
parser.add_argument(
"--epsilon_start", type=float, default=1.0, help="ddqn initial epsilon value"
)
parser.add_argument(
"--epsilon_decay", type=float, default=0.9999, help="ddqn epsilon decay value"
)
parser.add_argument(
"--epsilon_end", type=float, default=0.01, help="ddqn minimum epsilon value"
)
parser.add_argument(
"--train_steps",
type=int,
default=20,
help="number of training iterations per epoch",
)
parser.add_argument(
"--valid_steps",
type=int,
default=10,
help="number of validation iterations per epoch",
)
parser.add_argument(
"--target_update",
type=int,
default=3000,
help="frequency of target network updates",
)
parser.add_argument(
"--use_latent",
action="store_true",
default=False,
help="if the latent embedding of objects is to be used",
)
parser.add_argument(
"--use_recon",
action="store_true",
default=False,
help="if the object prediction is to be directly used",
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch
import argparse
import numpy as np
from submitit.helpers import Checkpointable
from pterotactyl.policies import environment
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
def __call__(self):
# setup the environment, policy and data
self.env = environment.ActiveTouch(self.args)
self.spot = 0
self.actions = []
self.latents = []
train_loaders, valid_loaders = self.get_loaders()
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", self.args.exp_type
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
self.checkpoint = self.checkpoint_dir + "actions.npy"
# evaluate the policy
with torch.no_grad():
self.load()
if self.args.eval:
self.validate(valid_loaders)
else:
self.train(train_loaders)
self.save()
# load data using pytorch dataloader
def get_loaders(self):
if not self.args.eval:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
else:
train_loader = []
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# compute the lowest error action for the current step
def train(self, dataloader):
# for all training data
training_length = len(dataloader)
random.seed(self.args.seed)
training_instances = random.sample(
range(training_length), int(training_length * 0.4)
)
for v, batch in enumerate(tqdm(dataloader)):
if v < self.spot:
continue
if v not in training_instances:
continue
self.spot = v
obs = self.env.reset(batch)
for i in range(self.args.budget):
# find best action
action, next_obs, reward, all_done = self.env.best_step(
greedy_checks=self.args.greedy_checks
)
# record action, embedding correspondence
for i in range(self.args.env_batch_size):
self.actions.append(action[i])
self.latents.append(obs["latent"][i])
obs = next_obs
if v % 3 == 0:
self.save()
# perfrom the validation
def validate(self, dataloader):
self.latents = torch.stack(self.latents).cuda()
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
action = []
for i in range(self.args.env_batch_size):
latent_distance = (
(self.latents - obs["latent"][i].cuda()) ** 2
).mean(dim=1)
smallest_idxs = torch.topk(
latent_distance,
self.args.num_grasps * 5,
largest=False,
sorted=True,
)[1]
for idx in smallest_idxs:
possible_action = self.actions[idx]
if len(cur_actions) == 0:
action.append(possible_action)
break
seen_actions = list(
torch.stack(cur_actions)[:, i].data.cpu().numpy()
)
if possible_action not in seen_actions:
action.append(possible_action)
break
action = np.array(action)
next_obs, reward, all_done = self.env.step(action)
# record observation
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || score: {print_score:.4f}, "
message += f"reward = {print_reward:.4f}"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
def load(self):
if self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/NearestNeighbor/v_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/NearestNeighbor/v_t_g.npy"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/NearestNeighbor/t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/NearestNeighbor/t_g.npy"
)
data = np.load(location, allow_pickle=True).item()
self.actions = list(data["actions"])
self.latents = [torch.FloatTensor(d) for d in data["latents"]]
self.spot = data["spot"]
else:
try:
data = np.load(self.checkpoint, allow_pickle=True).item()
self.actions = list(data["actions"])
self.latents = [torch.FloatTensor(d) for d in data["latents"]]
self.spot = data["spot"]
except:
return
def save(self):
actions = np.array(self.actions)
latents = torch.stack(self.latents).data.cpu().numpy()
data = {"actions": actions, "latents": latents, "spot": self.spot}
np.save(self.checkpoint, data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--auto_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/auto/t_p/",
help="the location of the autoencoder part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--num_actions", type=int, default=50, help="number of action options"
)
parser.add_argument(
"--eval", action="store_true", default=False, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--greedy_checks",
type=int,
default=50,
help="Number of actions to check at each time step",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
args.use_recon = False
args.use_latent = True
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import random
# class for getting radnom samples from the space of action
class random_sampler:
def __init__(self, args):
super().__init__()
self.args = args
def get_action(self, mask):
batch_size = mask.shape[0]
actions = []
for b in range(batch_size):
propositions = list(np.arange(self.args.num_actions))
indexes = list(np.where(mask[b] > 0)[0])
if len(indexes) > 0:
for index in sorted(indexes, reverse=True):
del propositions[index]
actions.append(random.choice(propositions))
return np.array(actions)
# class for evenly spaced samples from the space of actions
class even_sampler:
def __init__(self, args):
super().__init__()
self.args = args
self.generate_points()
# precompute the actions to be used in the trajectory
def generate_points(self):
self.angles = []
for i in range(self.args.env_batch_size):
spacing = self.args.num_actions // self.args.num_grasps
set = [spacing * i for i in range(self.args.num_grasps)]
update_num = random.choice(range(self.args.num_actions))
choice = []
for j in range(self.args.num_grasps):
choice.append((set[j] + update_num) % self.args.num_actions)
self.angles.append(choice)
# reset the precomputed actions
def reset(self):
self.generate_points()
def get_action(self, mask):
batch_size = mask.shape[0]
actions = []
for b in range(batch_size):
actions.append(self.angles[b][0])
del self.angles[b][0]
return np.array(actions)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from tqdm import tqdm
import os
from torch.utils.data import DataLoader
import torch
import argparse
from pterotactyl.policies import environment
from pterotactyl.policies.baselines import baselines
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
self.args = args
def __call__(self):
# set up environment and policy and data
utils.set_seeds(self.args.seed)
self.env = environment.ActiveTouch(self.args)
self.policy = baselines.even_sampler(self.args)
valid_loaders = self.get_loaders()
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
# compute accuracy
with torch.no_grad():
self.validate(valid_loaders)
# load data with pytorch dataloader
def get_loaders(self):
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=10,
collate_fn=self.env.valid_data.collate,
)
return valid_loader
# perform the even policy
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
self.policy.reset()
all_done = False
cur_observations = [obs]
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# select actions
action = self.policy.get_action(obs["mask"])
# perform actions
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
temp_scored = torch.cat(scores)
current_loss = (temp_scored[:, -1] / temp_scored[:, 0]).mean()
message = f"Valid || score: {print_score:.4f} "
message += f"reward = {print_reward:.4f} ave: {100*current_loss:.4f} %"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the vision part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--num_actions", type=int, default=50)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval", type=bool, default=True, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from tqdm import tqdm
import os
from torch.utils.data import DataLoader
import torch
import argparse
from pterotactyl.policies import environment
from pterotactyl.policies.baselines import baselines
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
self.args = args
def __call__(self):
# setup the environment, policy and data
utils.set_seeds(self.args.seed)
self.env = environment.ActiveTouch(self.args)
self.policy = baselines.even_sampler(self.args)
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
valid_loaders = self.get_loaders()
# evaluate the policy
with torch.no_grad():
self.validate(valid_loaders)
# load data using pytorch dataloader
def get_loaders(self):
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return valid_loader
# perfrom the validation
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
self.policy.reset()
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# perform actions
with torch.no_grad():
action, next_obs, reward, all_done = self.env.best_step(
greedy_checks=self.args.greedy_checks
)
# record observation
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
temp_scored = torch.cat(scores)
current_loss = (temp_scored[:, -1] / temp_scored[:, 0]).mean()
message = f"Valid || score: {print_score:.4f} "
message += f"reward = {print_reward:.4f} ave: {100 * current_loss:.4f} %"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--greedy_checks",
type=int,
default=50,
help="Number of actions to check at each time step",
)
parser.add_argument(
"--num_actions", type=int, default=50, help="number of action options"
)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval", type=bool, default=True, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from tqdm import tqdm
from torch.utils.data import DataLoader
import torch
import argparse
from pterotactyl.policies import environment
from pterotactyl.policies.baselines import baselines
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
self.args = args
def __call__(self):
# set up environment and policy and data
utils.set_seeds(self.args.seed)
self.env = environment.ActiveTouch(self.args)
self.policy = baselines.random_sampler(self.args)
valid_loaders = self.get_loaders()
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
# compute accuracy
with torch.no_grad():
self.validate(valid_loaders)
# load data with pytorch dataloader
def get_loaders(self):
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return valid_loader
# perform the random policy
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
all_done = False
cur_scores = [obs["score"]]
cur_actions = []
while not all_done:
# select actions
action = self.policy.get_action(obs["mask"])
# perform actions
with torch.no_grad():
next_obs, reward, all_done = self.env.step(action)
# record observations
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(action))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
temp_scored = torch.cat(scores)
current_loss = (temp_scored[:, -1] / temp_scored[:, 0]).mean()
message = f"Valid || score: {print_score:.4f} "
message += f"reward = {print_reward:.4f} ave: {100 * current_loss:.4f} %"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
if self.args.visualize:
print("visualizing")
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the vision part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--num_actions", type=int, default=50)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval", type=bool, default=True, help="for evaluating on test set"
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from tqdm import tqdm
import numpy as np
from torch.utils.data import DataLoader
import torch
import argparse
from submitit.helpers import Checkpointable
from pterotactyl.policies import environment
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
def __call__(self):
# setup the environment, and data
self.env = environment.ActiveTouch(self.args)
data_loaders, valid_loaders = self.get_loaders()
self.chosen_actions = []
self.step = 0
self.spot = 0
self.counts = np.array([0.0 for i in range(self.args.num_actions)])
# save location for the computed trajectory
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", "MFBA", self.args.exp_type
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
self.checkpoint = self.checkpoint_dir + "actions.npy"
with torch.no_grad():
self.load()
if self.args.eval:
self.validate(valid_loaders)
else:
# find the best action at every step
for i in range(self.step, self.args.num_grasps):
self.train(data_loaders)
self.save()
# load data using pytorch dataloader
def get_loaders(self):
if not self.args.eval:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
else:
train_loader = []
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# compute the lowest error action for the current step
def train(self, dataloader):
print(f"Getting best action for step {len(self.chosen_actions)+1}")
training_length = len(dataloader)
random.seed(self.args.seed)
training_instances = random.sample(
range(training_length), int(training_length * 0.4)
)
for v, batch in enumerate(tqdm(dataloader)):
if v < self.spot:
continue
if v not in training_instances:
continue
self.spot = v
self.env.reset(batch)
# check the accuracy of every action
for action in self.chosen_actions:
actions = np.array([action for _ in range(self.args.env_batch_size)])
self.env.step(actions)
actions, _, _, _ = self.env.best_step(greedy_checks=self.args.greedy_checks)
# update the count for most successful action
for a in actions:
self.counts[a] += 1
if v % 20 == 0:
self.save()
self.chosen_actions.append(np.argmax(self.counts))
self.counts = np.array(
[
0 if i not in self.chosen_actions else -1e20
for i in range(self.args.num_actions)
]
)
self.spot = 0
self.step += 1
# evaluate the policy
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
cur_scores = [obs["score"]]
cur_actions = []
for action in self.chosen_actions:
best_actions = np.array(
[action for _ in range(self.args.env_batch_size)]
)
# perform actions
with torch.no_grad():
next_obs, _, _ = self.env.step(best_actions)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(best_actions))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || score: {print_score:.4f}, "
message += f"reward = {print_reward:.4f}"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
def load(self):
if self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/MFBA_v_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/MFBA_v_t_g.npy"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/MFBA_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/MFBA_t_g.npy"
)
data = np.load(location, allow_pickle=True).item()
self.counts = data["counts"]
self.chosen_actions = data["chosen_actions"]
self.spot = data["spot"]
self.step = data["step"]
else:
try:
data = np.load(self.checkpoint, allow_pickle=True).item()
self.counts = data["counts"]
self.chosen_actions = data["chosen_actions"]
self.spot = data["spot"]
self.step = data["step"]
except:
return
def save(self):
data = {
"counts": self.counts,
"chosen_actions": self.chosen_actions,
"step": self.step,
"spot": self.spot,
}
np.save(self.checkpoint, data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--num_actions", type=int, default=50)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--greedy_checks",
type=int,
default=50,
help="Number of actions to check at each time step",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from tqdm.notebook import tqdm
import numpy as np
from torch.utils.data import DataLoader
import torch
import argparse
from submitit.helpers import Checkpointable
from pterotactyl.policies import environment
from pterotactyl.utility import utils
from pterotactyl import pretrained
class Engine(Checkpointable):
def __init__(self, args):
self.args = args
def __call__(self) -> float:
# setup the environment, and data
self.env = environment.ActiveTouch(self.args)
data_loaders, valid_loaders = self.get_loaders()
self.chosen_actions = []
self.step = 0
self.spot = 0
self.action_scores = np.array(
[
1e10 if i not in self.chosen_actions else 1e20
for i in range(self.args.num_actions)
]
)
self.checks = np.array([1.0 for i in range(self.args.num_actions)])
# save location for the computed trajectory
self.results_dir = os.path.join("results", self.args.exp_type)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", "LEBA", self.args.exp_type
)
if not os.path.exists((self.checkpoint_dir)):
os.makedirs(self.checkpoint_dir)
self.checkpoint = self.checkpoint_dir + "actions.npy"
with torch.no_grad():
self.load()
if self.args.eval:
self.validate(valid_loaders)
else:
# find the best action at every step
for i in range(self.step, self.args.num_grasps):
self.train(data_loaders)
self.save()
# load data using pytorch dataloader
def get_loaders(self):
if not self.args.eval:
train_loader = DataLoader(
self.env.train_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.train_data.collate,
)
else:
train_loader = []
valid_loader = DataLoader(
self.env.valid_data,
batch_size=self.args.env_batch_size,
shuffle=False,
num_workers=4,
collate_fn=self.env.valid_data.collate,
)
return train_loader, valid_loader
# compute the lowest error action for the current step
def train(self, dataloader):
print(f"Getting best action for step {len(self.chosen_actions)+1}")
# for all training data
training_length = len(dataloader)
random.seed(self.args.seed)
training_instances = random.sample(
range(training_length), int(training_length * 0.4)
)
for v, batch in enumerate(tqdm(dataloader)):
if v < self.spot:
continue
if v not in training_instances:
continue
self.spot = v
self.env.reset(batch)
# check the accuracy of every action
for action in self.chosen_actions:
actions = np.array([action for _ in range(self.args.env_batch_size)])
self.env.step(actions)
remaining_actions = [
i for i in range(self.args.num_actions) if i not in self.chosen_actions
]
remaining_actions = [
remaining_actions for i in range(self.args.env_batch_size)
]
if self.args.greedy_checks < self.args.num_actions:
for i in range(self.args.env_batch_size):
remaining_actions[i] = random.sample(
remaining_actions[i], self.args.greedy_checks
)
for i in range(len(remaining_actions[0])):
actions = np.array(
[remaining_actions[j][i] for j in range(self.args.env_batch_size)]
)
scores = (
self.env.check_step(actions)["score"]
/ self.env.check_step(actions)["first_score"]
)
for action, score in zip(actions, scores):
if self.action_scores[action] == 1e10:
self.action_scores[action] = score
else:
self.action_scores[action] += score
self.checks[action] += 1.0
if v % 20 == 0:
self.save()
# record the lowest error action
action_scores = self.action_scores / self.checks
self.chosen_actions.append(np.argmin(action_scores))
self.action_scores = np.array(
[
1e10 if i not in self.chosen_actions else 1e20
for i in range(self.args.num_actions)
]
)
self.checks = np.array([1.0 for i in range(self.args.num_actions)])
self.spot = 0
self.step += 1
# validate the chosen trajectory
def validate(self, dataloader):
observations = []
scores = []
actions = []
names = []
for v, batch in enumerate(tqdm(dataloader)):
names += batch["names"]
obs = self.env.reset(batch)
cur_scores = [obs["score"]]
cur_actions = []
for action in self.chosen_actions:
best_actions = np.array(
[action for _ in range(self.args.env_batch_size)]
)
# perform actions
with torch.no_grad():
next_obs, _, _ = self.env.step(best_actions)
# record actions
torch.cuda.empty_cache()
obs = next_obs
cur_scores.append(obs["score"])
cur_actions.append(torch.FloatTensor(best_actions))
observations.append(obs["mesh"])
scores.append(torch.stack(cur_scores).permute(1, 0))
actions.append(torch.stack(cur_actions).permute(1, 0))
print_score = (scores[-1][:, -1] / scores[-1][:, 0]).mean()
print_reward = (
(scores[-1][:, 0] - scores[-1][:, -1]) / scores[-1][:, 0]
).mean()
message = f"Valid || score: {print_score:.4f}, "
message += f"reward = {print_reward:.4f}"
tqdm.write(message)
if self.args.visualize and v == 5:
meshes = torch.cat(observations, dim=0)[:, :, :3]
utils.visualize_prediction(
self.results_dir, meshes, self.env.mesh_info["faces"], names
)
self.env.reset_pybullet()
scores = torch.cat(scores)
rewards = ((scores[:, 0] - scores[:, -1]) / scores[:, 0]).mean()
current_loss = (scores[:, -1] / scores[:, 0]).mean()
if self.args.visualize:
actions = torch.stack(actions).view(-1, self.args.budget)
utils.visualize_actions(self.results_dir, actions, self.args)
print("*" * 30)
message = f"Total Valid || score: {current_loss:.4f}, "
message += f"reward = {rewards.mean():.4f}"
tqdm.write("*" * len(message))
tqdm.write(message)
tqdm.write("*" * len(message))
def load(self):
if self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/LEBA_v_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/LEBA_v_t_g.npy"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/LEBA_t_p.npy"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/policies/dataset_specific/LEBA_t_g.npy"
)
data = np.load(location, allow_pickle=True).item()
self.action_scores = data["action_scores"]
self.checks = data["checks"]
self.chosen_actions = data["chosen_actions"]
self.spot = data["spot"]
self.step = data["step"]
else:
try:
data = np.load(self.checkpoint, allow_pickle=True).item()
self.action_scores = data["action_scores"]
self.checks = data["checks"]
self.chosen_actions = data["chosen_actions"]
self.spot = data["spot"]
self.step = data["step"]
except:
return
def save(self):
data = {
"action_scores": self.action_scores,
"checks": self.checks,
"chosen_actions": self.chosen_actions,
"step": self.step,
"spot": self.spot,
}
np.save(self.checkpoint, data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--touch_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/touch/best/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the touch part prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--env_batch_size", type=int, default=3, help="Size of the batch."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument("--num_actions", type=int, default=50)
parser.add_argument("--use_latent", action="store_true", default=False)
parser.add_argument("--use_recon", action="store_true", default=False)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument(
"--budget", type=int, default=5, help="number of graspsp to perform"
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--greedy_checks",
type=int,
default=50,
help="Number of actions to check at each time step",
)
parser.add_argument(
"--pretrained_recon",
action="store_true",
default=False,
help="use the pretrained reconstruction models to train",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="use the pretrained policy",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
from scipy.spatial.transform import Rotation as R
import pyrender
import trimesh
import pterotactyl.objects as objects
from pterotactyl.utility import utils
from random import randrange
HAND_COLOUR = [119, 136, 153, 255]
DIGIT_COLOUR = [119, 225, 153, 175]
class Renderer:
def __init__(self, hand, pb, cameraResolution=[256, 256]):
self.scene = self.init_scene()
self.hand = hand
self.pb = pb
self.hand_nodes = []
self.object_nodes = []
self.init_camera()
self.init_hand()
self.update_hand()
self.r = pyrender.OffscreenRenderer(cameraResolution[0], cameraResolution[1])
# scene is initialized with fixed lights, this can be easily changed to match the desired environment
def init_scene(self):
scene = pyrender.Scene(ambient_light=[0.3, 0.3, 0.3])
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, -0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, 0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[-1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.0)
scene.add(light, pose=light_pose)
return scene
def init_camera(self):
# initializes the camera parameters
camera = pyrender.PerspectiveCamera(
yfov=60.0 / 180.0 * np.pi, znear=0.01, zfar=10.0, aspectRatio=1.0
)
camera_pose = utils.euler2matrix(
xyz="xyz", angles=[0, 0, 0], translation=[0, 0, 0], degrees=True
)
camera_node = pyrender.Node(camera=camera, matrix=camera_pose)
self.scene.add_node(camera_node)
self.scene.main_camera_node = camera_node
self.camera = camera_node
# this viewpoint is used in the paper
# if you change this, you will need to update the camaera parameter matrix in the reconstruction model as well
initial_matrix = R.from_euler("xyz", [45.0, 0, 270.0], degrees=True).as_matrix()
self.update_camera_pose([-0.3, 0, 0.3], initial_matrix)
def add_object(
self,
mesh,
position=[0, 0, 0],
orientation=[0, 0, 0],
colour=[228, 217, 111, 255],
):
mesh.visual.vertex_colors = colour
mesh = pyrender.Mesh.from_trimesh(mesh)
pose = utils.euler2matrix(angles=orientation, translation=position)
obj_node = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(obj_node)
self.object_nodes.append(obj_node)
# defines the hand in the scene
def init_hand(self):
hand_location = os.path.join(
os.path.dirname(objects.__file__), "hand/meshes_obj/"
)
base_obj = trimesh.load(hand_location + "0_base.obj")
base_obj = trimesh.Trimesh(vertices=base_obj.vertices, faces=base_obj.faces)
base_obj.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(base_obj)
for _ in range(3):
for i in range(1, 5):
element = trimesh.load(hand_location + f"{i}_finger.obj")
element = trimesh.Trimesh(
vertices=element.vertices, faces=element.faces
)
element.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(element)
element = trimesh.load(hand_location + "5_digit.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = DIGIT_COLOUR
self.add_hand_obj(element)
for i in range(6, 10):
element = trimesh.load(hand_location + f"{i}_thumb.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = HAND_COLOUR
self.add_hand_obj(element)
element = trimesh.load(hand_location + "5_digit.obj")
element = trimesh.Trimesh(vertices=element.vertices, faces=element.faces)
element.visual.vertex_colors = DIGIT_COLOUR
self.add_hand_obj(element)
def add_hand_obj(self, obj_location):
mesh = pyrender.Mesh.from_trimesh(obj_location)
pose = utils.euler2matrix(angles=[0, 0, 0], translation=[0, 0, 0])
obj_node = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(obj_node)
self.hand_nodes.append(obj_node)
# gets the various hand element's position and orientation and uses them to update the hand in the scene
def update_hand(self):
# base of the hand
position, orientation = self.pb.getBasePositionAndOrientation(self.hand)
orientation = self.pb.getEulerFromQuaternion(orientation)
pose = utils.euler2matrix(angles=orientation, translation=position)
self.scene.set_pose(self.hand_nodes[0], pose=pose)
indices = [
0,
1,
2,
3,
4,
7,
8,
9,
10,
11,
14,
15,
16,
17,
18,
21,
22,
23,
24,
25,
]
# all other elements
for node, index in zip(self.hand_nodes[1:], indices):
position, orientation = self.pb.getLinkState(self.hand, index)[:2]
orientation = self.pb.getEulerFromQuaternion(orientation)
pose = utils.euler2matrix(angles=orientation, translation=position)
self.scene.set_pose(node, pose=pose)
# moves the hand our of the perspective of the camera
def remove_hand(self):
for node in self.hand_nodes:
pose = utils.euler2matrix(angles=[0, 0, 0], translation=[0, 0, -10.0])
self.scene.set_pose(node, pose=pose)
def remove_objects(self):
for obj in self.object_nodes:
self.scene.remove_node(obj)
self.object_nodes = []
def update_camera_pose(self, position, orientation):
pose = np.eye(4)
if np.array(orientation).shape == (3,):
orientation = R.from_euler("xyz", orientation, degrees=True).as_matrix()
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose
def render(self, get_depth=False):
colour, depth = self.r.render(self.scene)
if get_depth:
return colour, depth
return colour
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
import cv2
import pyrender
import trimesh
from scipy.spatial.transform import Rotation as R
from pterotactyl.utility import utils
class Renderer:
def __init__(self, cameraResolution=[120, 160]):
"""
:param width: scalar
:param height: scalar
"""
self.width = cameraResolution[0]
self.height = cameraResolution[1]
self._background_real = None
self.force_enabled = False
self._init_pyrender()
def _init_pyrender(self):
"""
Initialize pyrender
"""
# Create scene for pybullet sync
self.scene = pyrender.Scene()
self.object_nodes = []
self.current_light_nodes = []
self.cam_light_ids = None
self._init_gel()
self._init_camera()
self._init_light()
self.r = pyrender.OffscreenRenderer(self.width, self.height)
colors, depths = self.render(noise=False, calibration=False)
self._background_sim = colors
def _init_gel(self):
"""
Add gel surface in the scene
"""
# Create gel surface (flat/curve surface based on config file)
gel_trimesh = self._generate_gel_trimesh()
mesh_gel = pyrender.Mesh.from_trimesh(gel_trimesh, smooth=False)
self.gel_pose0 = np.eye(4)
self.gel_node = pyrender.Node(mesh=mesh_gel, matrix=self.gel_pose0)
self.scene.add_node(self.gel_node)
def _generate_gel_trimesh(self):
# Load config
origin = [0.022, 0, 0.015]
X0, Y0, Z0 = origin[0], origin[1], origin[2]
W, H = 0.02, 0.03
# Curved gel surface
N = 100
M = int(N * H / W)
R = 0.1
zrange = 0.005
y = np.linspace(Y0 - W / 2, Y0 + W / 2, N)
z = np.linspace(Z0 - H / 2, Z0 + H / 2, M)
yy, zz = np.meshgrid(y, z)
h = R - np.maximum(0, R ** 2 - (yy - Y0) ** 2 - (zz - Z0) ** 2) ** 0.5
xx = X0 - zrange * h / h.max()
gel_trimesh = self._generate_trimesh_from_depth(xx)
return gel_trimesh
def _generate_trimesh_from_depth(self, depth):
# Load config
origin = [0.022, 0, 0.015]
_, Y0, Z0 = origin[0], origin[1], origin[2]
W, H = 0.02, 0.03
N = depth.shape[1]
M = depth.shape[0]
# Create grid mesh
vertices = []
faces = []
y = np.linspace(Y0 - W / 2, Y0 + W / 2, N)
z = np.linspace(Z0 - H / 2, Z0 + H / 2, M)
yy, zz = np.meshgrid(y, z)
# Vertex format: [x, y, z]
vertices = np.zeros([N * M, 3])
# Add x, y, z position to vertex
vertices[:, 0] = depth.reshape([-1])
vertices[:, 1] = yy.reshape([-1])
vertices[:, 2] = zz.reshape([-1])
# Create faces
faces = np.zeros([(N - 1) * (M - 1) * 6], dtype=np.uint)
# calculate id for each vertex: (i, j) => i * m + j
xid = np.arange(N)
yid = np.arange(M)
yyid, xxid = np.meshgrid(xid, yid)
ids = yyid[:-1, :-1].reshape([-1]) + xxid[:-1, :-1].reshape([-1]) * N
# create upper triangle
faces[::6] = ids # (i, j)
faces[1::6] = ids + N # (i+1, j)
faces[2::6] = ids + 1 # (i, j+1)
# create lower triangle
faces[3::6] = ids + 1 # (i, j+1)
faces[4::6] = ids + N # (i+1, j)
faces[5::6] = ids + N + 1 # (i+1, j+1)
faces = faces.reshape([-1, 3])
# camera_pose = utils.euler2matrix(
# angles=np.deg2rad([90, 0, -90]), translation=[0, 0, 0.015],
# )
vertices = vertices - np.array([0, 0, 0.015]).reshape(1, 3)
orientation = R.from_euler("xyz", [90, 0, -90], degrees=True).as_matrix()
vertices = vertices.dot(orientation)
# position = [0, 0, 0.015]
gel_trimesh = trimesh.Trimesh(vertices=vertices, faces=faces, process=False)
return gel_trimesh
def _init_camera(self):
"""
Set up camera
"""
camera = pyrender.PerspectiveCamera(yfov=np.deg2rad(60), znear=0.001)
camera_pose = utils.euler2matrix(
angles=np.deg2rad([0, 0, 0]), translation=[0, 0, -0.0035]
)
self.camera_pose = camera_pose
# Add camera node into scene
camera_node = pyrender.Node(camera=camera, matrix=camera_pose)
self.scene.add_node(camera_node)
self.camera = camera_node
self.cam_light_ids = list([0, 1, 2])
def _init_light(self):
"""
Set up light
"""
# Load light from config file
origin = np.array([0.005, 0, 0.015])
xyz = []
# Apply polar coordinates
thetas = [30, 150, 270]
rs = [0.02, 0.02, 0.02]
xs = [0, 0, 0]
for i in range(len(thetas)):
theta = np.pi / 180 * thetas[i]
xyz.append([xs[i], rs[i] * np.cos(theta), rs[i] * np.sin(theta)])
colors = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
intensities = [1, 1, 1]
# Save light nodes
self.light_nodes = []
self.light_poses0 = []
for i in range(len(colors)):
color = colors[i]
position = xyz[i] + origin - np.array([0, 0, 0.015])
orientation = R.from_euler("xyz", [90, 0, -90], degrees=True).as_matrix()
position = position.dot(orientation)
orientation = np.deg2rad([90, 0, -90])
light_pose_0 = utils.euler2matrix(angles=orientation, translation=position)
light = pyrender.PointLight(color=color, intensity=intensities[i])
light_node = pyrender.Node(light=light, matrix=light_pose_0)
self.scene.add_node(light_node)
self.light_nodes.append(light_node)
self.light_poses0.append(light_pose_0)
self.current_light_nodes.append(light_node)
def add_object(self, objTrimesh, position=[0, 0, 0], orientation=[0, 0, 0]):
mesh = trimesh.Trimesh(
vertices=objTrimesh.vertices, faces=objTrimesh.faces, process=False
)
mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False)
pose = utils.euler2matrix(angles=orientation, translation=position)
objNode = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(objNode)
self.object_nodes.append(objNode)
def update_camera_pose(self, position, orientation):
pose = np.eye(4)
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose.dot(self.camera_pose)
# Update gel
gel_pose = pose.dot(self.gel_pose0)
self.gel_node.matrix = gel_pose
# Update light
for i in range(len(self.light_nodes)):
light_pose = pose.dot(self.light_poses0[i])
light_node = self.light_nodes[i]
light_node.matrix = light_pose
def update_objects_pose(self, position, orientation):
pose = utils.euler2matrix(angles=orientation, translation=position)
for obj in self.object_nodes:
self.scene.set_pose(obj, pose=pose)
def remove_objects(self):
for obj in self.object_nodes:
self.scene.remove_node(obj)
self.object_nodes = []
def update_light(self, lightIDList):
"""
Update the light node based on lightIDList, remove the previous light
"""
# Remove previous light nodes
for node in self.current_light_nodes:
self.scene.remove_node(node)
# Add light nodes
self.current_light_nodes = []
for i in lightIDList:
light_node = self.light_nodes[i]
self.scene.add_node(light_node)
self.current_light_nodes.append(light_node)
def _add_noise(self, color):
"""
Add Gaussian noise to the RGB image
:param color:
:return:
"""
# Add noise to the RGB image
mean = 0
std = 7
noise = np.random.normal(mean, std, color.shape) # Gaussian noise
color = np.clip(color + noise, 0, 255).astype(np.uint8) # Add noise and clip
return color
def _calibrate(self, color):
if self._background_real is not None:
# Simulated difference image, with scaling factor 0.5
diff = (color.astype(np.float) - self._background_sim) * 0.5
# Add low-pass filter to match real readings
diff = cv2.GaussianBlur(diff, (7, 7), 0)
# Combine the simulated difference image with real background image
color = np.clip((diff[:, :, :3] + self._background_real), 0, 255).astype(
np.uint8
)
return color
def _post_process(self, color, depth, noise=True, calibration=True):
if calibration:
color = self._calibrate(color)
if noise:
color = self._add_noise(color)
return color, depth
def render(self, noise=True, calibration=True):
self.scene.main_camera_node = self.camera
self.update_light(self.cam_light_ids)
color, depth = self.r.render(self.scene)
color, depth = self._post_process(color, depth, noise, calibration)
return color, depth
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
import pyrender
from pterotactyl.utility import utils
class Renderer:
def __init__(self, cameraResolution=[120, 160]):
self.scene = pyrender.Scene(ambient_light=[0.1, 0.1, 0.1])
self.object_nodes = []
self.initialize_camera()
self.r = pyrender.OffscreenRenderer(cameraResolution[0], cameraResolution[1])
def initialize_camera(self):
camera = pyrender.PerspectiveCamera(
yfov=40.0 / 180.0 * np.pi, znear=0.0001, zfar=10.0
)
self.camera_pose = utils.euler2matrix(
xyz="xyz", angles=[0, 0, 0], translation=[0, 0, 0], degrees=True
)
# Add camera node into scene
camera_node = pyrender.Node(camera=camera, matrix=self.camera_pose)
self.scene.add_node(camera_node)
self.camera = camera_node
def add_object(self, objTrimesh, position=[0, 0, 0], orientation=[0, 0, 0]):
mesh = pyrender.Mesh.from_trimesh(objTrimesh)
pose = utils.euler2matrix(angles=orientation, translation=position)
objNode = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(objNode)
self.object_nodes.append(objNode)
def update_objects_pose(self, position, orientation):
pose = utils.euler2matrix(angles=orientation, translation=position)
for obj in self.object_nodes:
self.scene.set_pose(obj, pose=pose)
def remove_objects(self):
for obj in self.object_nodes:
self.scene.remove_node(obj)
self.object_nodes = []
def update_camera_pose(self, position, orientation):
pose = np.eye(4)
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose.dot(self.camera_pose)
def render(self):
self.scene.main_camera_node = self.camera
_, depth = self.r.render(self.scene)
return depth
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import numpy as np
import trimesh
from scipy.spatial.transform import Rotation as R
from scipy.spatial import ConvexHull
from pterotactyl.utility import utils
class Agnostic_Grasp:
def __init__(self, pb, hand):
self.pb = pb
self.hand = hand
self.directions = -utils.get_circle(50).points.data.numpy()
self.convex_mesh = None
self.verts = None
def set_object(self, verts, faces):
hull = ConvexHull(verts.data.numpy())
self.convex_mesh = trimesh.Trimesh(
vertices=verts, faces=hull.simplices, process=False
)
self.verts = verts.data.numpy()
def remove_object(self):
self.convex_mesh = None
self.verts = None
# converts selected action into the corresponding hand rotation
def action_to_params(
self, action
): # converts action selection into hand parameters
direction = self.directions[action]
rotation = 0
return direction, rotation
def grasp(self, action):
self.reset_hand()
direction, rotation = self.action_to_params(
action
) # convert action into grasping parameters
success = self.set_hand_hull(
direction, rotation
) # identify point on convex hull which intersection the chosen hand direction
# if no intersection is found
if not success:
return False
else:
# set all joint angles to maximum to perfrom grasp
joint_angles = [10 for _ in range(28)]
self.pb.setJointMotorControlArray(
self.hand,
range(28),
self.pb.POSITION_CONTROL,
targetPositions=joint_angles,
)
for i in range(5):
self.pb.stepSimulation()
return True
def set_hand_hull(self, direction, rotation, hand_distance=0.013):
# define ray from the center of the object to outwards in the chosen direction
ray_origins = np.array([[0, 0, 0]])
ray_directions = np.array([direction])
# find intersection with ray and convex hull
locations, index_ray, index_tri = self.convex_mesh.ray.intersects_location(
ray_origins=ray_origins, ray_directions=ray_directions
)
# if no intersection were found
if len(locations) == 0:
return False
else:
# find furtherest interesection from the ceneter of the object
test_locations = np.array(locations)
test_locations = (test_locations ** 2).sum(axis=-1)
max_location = np.argmax(test_locations)
point = locations[max_location]
face = self.convex_mesh.faces[index_tri[0]]
# place the hand above the convex hull at the intersection point
hand_position, surface_normal = self.get_position_on_hull(
self.verts, face, point, hand_distance
)
hand_orientation = self.pb.getQuaternionFromEuler([rotation, 0, 0])
surface_normal -= 0.001
handUpdateOrientation = utils.quats_from_vectors([-1, 0, 0], surface_normal)
hand_orientation = utils.combine_quats(
handUpdateOrientation, hand_orientation
)
# place the middle finger tip on the point instead of the hand center
# displacement of the fingertip from the center of the hand
v = [0, 0, 0.133]
matrix = (R.from_quat(hand_orientation)).as_matrix()
hand_position -= matrix.dot(v)
# transfrom the hand
self.pb.resetBasePositionAndOrientation(
self.hand, hand_position, hand_orientation
)
return True
# find the normal face which the ray intersections with, and a point just above the siurface in this direction
def get_position_on_hull(self, verts, face, point, distance):
p1, p2, p3 = verts[face[0]], verts[face[1]], verts[face[2]]
normal = utils.normal_from_triangle(p1, p2, p3)
p1 = np.array([0, 0, 0])
p2 = point
p3 = point + normal * 0.0001
# check the normal is pointing away from the mesh
if ((p1 - p2) ** 2).sum() > ((p1 - p3) ** 2).sum():
normal = normal * -1
# move position of the finger to slightly above the mesh
point = point + normal * distance
return point, normal
def reset_hand(self):
# moves hand away from the object to avoid intersections
self.pb.resetBasePositionAndOrientation(self.hand, [20, 0, 0], [1, 0, 0, 0])
# sets all joints to the initial angles
joint_angles = [0 for _ in range(28)]
# sets thumb as oppositng fingers
joint_angles[20] = 1.2
joint_angles[22] = 0.7
for i in range(28):
self.pb.resetJointState(self.hand, i, joint_angles[i])
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import pybullet as pb
import numpy as np
import trimesh
import torch
from scipy.spatial.transform import Rotation as R
from scipy import ndimage
from pterotactyl.simulator.rendering import touch_renderer
from pterotactyl.simulator.rendering import tacto_renderer
from pterotactyl.simulator.rendering import vision_renderer
from pterotactyl.utility import utils
import pterotactyl.objects as objects
class Scene:
def __init__(
self,
grasp_class,
max_depth=0.025,
conn=pb,
vision=True,
resolution=[256, 256],
object_colour=[228, 217, 111, 255],
TACTO=False,
):
hand_location = os.path.join(
os.path.dirname(objects.__file__), "hand/allegro_hand.urdf"
)
self.hand = conn.loadURDF(
hand_location,
[0, 0, 0],
conn.getQuaternionFromEuler([0, 0, 0]),
useFixedBase=1,
)
# the indices of the hand definition which correspond to the finger's perspective
self.touch_cameras = [6, 13, 20, 27]
# furthest distance from the fingers which is obseravble by the touch sensors
self.max_depth = max_depth
if TACTO:
self.max_depth = min(self.max_depth, 0.015)
self.pb = conn
self.obj = None
self.grasper = grasp_class(self.pb, self.hand)
self.depths = None
self.TACTO = TACTO
# if vision signals are desired
self.vision = vision
if self.vision:
self.object_colour = object_colour
self.camera_renderer = vision_renderer.Renderer(
self.hand, pb, cameraResolution=resolution
)
if self.TACTO:
self.touch_renderer = tacto_renderer.Renderer(cameraResolution=[121, 121])
else:
self.touch_renderer = touch_renderer.Renderer(cameraResolution=[121, 121])
def grasp(self, action):
return self.grasper.grasp(action)
def get_hand_pose(self):
poses = []
for i in range(28):
poses.append(self.get_pose(self.hand, i))
return poses
def get_pose(self, objID, linkID):
if linkID <= 0:
position, orientation = self.pb.getBasePositionAndOrientation(objID)
else:
position, orientation = self.pb.getLinkState(
objID, linkID, computeLinkVelocity=False, computeForwardKinematics=True
)[:2]
orientation = self.pb.getEulerFromQuaternion(orientation)
return position, orientation
def load_obj(self, verts, faces, urdf_location):
# adding repeating faces to ensure they are observed
faces = utils.add_faces(faces)
# loading into pybullet
self.obj = self.pb.loadURDF(
urdf_location, [0, 0, 0], [0, 0, 0, 1], useFixedBase=1
)
# loading into pyrender
mesh = trimesh.Trimesh(vertices=verts, faces=faces, process=False)
self.touch_renderer.add_object(mesh, position=[0, 0, 0], orientation=[0, 0, 0])
if self.vision:
self.camera_renderer.add_object(
mesh,
position=[0, 0, 0],
orientation=[0, 0, 0],
colour=self.object_colour,
)
# loading into grasp function
self.obj_verts = torch.FloatTensor(verts)
self.obj_faces = torch.LongTensor(faces)
self.grasper.set_object(self.obj_verts, self.obj_faces)
def remove_obj(self):
if self.obj is not None:
self.pb.removeBody(self.obj)
self.touch_renderer.remove_objects()
self.obj = None
self.hull_faces = None
if self.vision:
self.camera_renderer.remove_objects()
self.grasper.remove_object()
# render depth from the perspective of each finger
def render_depth(self):
statuses = []
depths = []
colours = []
for i in range(4):
# update position of the scene camera
position, orientation = self.get_pose(self.hand, self.touch_cameras[i])
rot_off_finger = R.from_euler("xyz", [0, -90, 0], degrees=True).as_matrix()
rot_finger = R.from_euler("xyz", orientation, degrees=False).as_matrix()
orientation_update = np.matmul(rot_finger, rot_off_finger)
self.touch_renderer.update_camera_pose(
position=position, orientation=orientation_update
)
# render depth
if self.TACTO:
colour, depth = self.touch_renderer.render()
colours.append(colour)
else:
depth = self.touch_renderer.render()
# check if object is close enough to register on touch sensor
if (depth <= self.max_depth).sum() - (depth == 0).sum() > 0:
statuses.append("touch")
else:
statuses.append("no_touch")
depths.append(depth)
self.depths = depths
self.statuses = statuses
if self.TACTO:
self.colours = colours
return statuses
# converts depth map into point cloud in the reference frame of the object
def depth_to_points(self):
if self.TACTO:
fov = 60.0 / 180.0 * np.pi # intrinsic camera parameter
else:
fov = 40.0 / 180.0 * np.pi # intrinsic camera parameter
points = []
depths = np.array(self.depths)
out_of_range = depths > self.max_depth
# sets depth beyond touch sensor to 1
depths[out_of_range] = 1.0
# sets infinite depth to 1 instead of 0
depths[depths == 0] = 1
for i in range(4):
if self.statuses[i] == "touch":
depth = depths[i]
# creates grid of points
ys = np.arange(0, 121)
ys = np.tile(ys, (121, 1)) - 60
ys = ys.transpose()
xs = ys.transpose()
# updates grid with depth
point_cloud = np.zeros((121, 121, 3))
angle = np.arctan((np.abs(xs) / 60.0) * np.tan(fov / 2.0))
point_cloud[:, :, 0] = depth * np.tan(angle) * np.sign(xs)
angle = np.arctan((np.abs(ys) / 60.0) * np.tan(fov / 2.0))
point_cloud[:, :, 1] = depth * np.tan(angle) * -np.sign(ys)
point_cloud[:, :, 2] = -depth
# removes depth beyond sensor range
point_cloud = point_cloud[depth < 1.0]
point_cloud = point_cloud.reshape((-1, 3))
# transforms points to reference frame of the finger
position, orientation = self.get_pose(self.hand, self.touch_cameras[i])
rot_z = np.array([0, -90.0, 0])
r1 = R.from_euler("xyz", rot_z, degrees=True).as_matrix()
r2 = R.from_euler("xyz", orientation, degrees=False).as_matrix()
orientation = np.matmul(r2, r1)
if self.TACTO:
point_cloud[:, -1] = point_cloud[:, -1] - 0.0035
point_cloud = orientation.dot(point_cloud.T).T + position
points.append(point_cloud)
else:
points.append(np.array([]))
return points
# simulates touch signal from depth
def depth_to_touch(self, depth):
# set depth which werent obsevred to 1 instead of zero
out_of_range = depth > self.max_depth
depth[out_of_range] = 1.0
depth[depth == 0] = 1
dim = depth.shape[-1]
zeros = depth >= self.max_depth
depth = -(depth - self.max_depth)
depth[zeros] = 0
gel_depths = depth * 6 / self.max_depth
# smooth depth values
depth = gel_depths / (30.0) + 0.4
filter_size = 7
k = np.ones((filter_size, filter_size)) / (filter_size ** 2)
depth_smoothed = ndimage.convolve(depth, k, mode="reflect")
# fix "infinite" depths to zeros
depth[zeros] = depth_smoothed[zeros]
# add rgb and ambient lights
light_positions = np.array(
[[-0.5, 0.5, 1.0], [1.3, -0.4, 1.0], [1.3, 1.4, 1.0]]
)
# set to zero, qulitativly better
ambient_intensity = np.array([0.0, 0.0, 0.0])
diffuse_constant = 2.0
touch = np.zeros((dim, dim, 3))
touch[:, :] += ambient_intensity
# calculate normal of surface
zy, zx = np.gradient(depth)
normal = np.dstack((-zx, -zy, np.ones_like(depth)))
normal = utils.normalize_vector(normal)
# calc depth positions
depth_positions = np.arange(dim).repeat(dim).reshape(dim, dim) / float(dim)
depth_positions = np.stack(
(depth_positions, depth_positions.transpose(), depth)
).transpose((1, 2, 0))
# compute intensity from light normal using phong model, assuming no specularity
for i in range(3):
light_direction = light_positions[i] - depth_positions
light_direction = utils.normalize_vector(light_direction)
touch[:, :, i] += np.clip(
diffuse_constant * np.multiply(normal, light_direction).sum(-1), 0, 1
)
touch = np.clip(touch * 255.0, 0, 255) # clip within reasonable range
return touch
def render_touch(self):
touches = []
depths = np.array(self.depths)
if self.TACTO:
return self.colours
else:
for depth in depths:
touches.append(self.depth_to_touch(depth))
return touches
def get_finger_frame(self):
positions = []
rots = []
for i in range(4):
position, orientation = self.get_pose(self.hand, self.touch_cameras[i])
rot = R.from_euler("xyz", orientation, degrees=False).as_matrix()
positions.append(position)
rots.append(rot)
frame = {"pos": torch.FloatTensor(positions), "rot_M": torch.FloatTensor(rots)}
return frame
def scene_render(self, occluded=True, parameters=None):
if occluded:
self.camera_renderer.update_hand()
else:
self.camera_renderer.remove_hand()
if parameters is not None:
self.camera_renderer.update_camera_pose(parameters[0], parameters[1])
image = self.camera_renderer.render()
return image
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import numpy as np
import pybullet_utils.bullet_client as bc
import pybullet as pb
import pybullet_data
import torch
from pterotactyl.simulator.scene import instance
from pterotactyl.utility import utils
class Sampler:
def __init__(
self,
grasp_class,
bs=1,
vision=True,
max_depth=0.025,
object_colours=[228, 217, 111, 255],
resolution=[256, 256],
TACTO=False,
):
self.pybullet_connections = []
self.pybullet_scenes = []
self.bs = bs
self.vision = vision
# make a connection for every element in the batch
for i in range(bs):
self.pybullet_connections.append(bc.BulletClient(connection_mode=pb.DIRECT))
self.pybullet_connections[i].setAdditionalSearchPath(
pybullet_data.getDataPath()
)
if np.array(object_colours).shape == (4,):
colour = object_colours
else:
colour = object_colours[i]
self.pybullet_scenes.append(
instance.Scene(
grasp_class,
max_depth=max_depth,
conn=self.pybullet_connections[i],
vision=self.vision,
object_colour=colour,
resolution=resolution,
TACTO=TACTO,
)
)
# disconnets the pybullet threads
def disconnect(self):
for i in range(self.bs):
self.pybullet_connections[i].disconnect()
# loads the objects into each pybullet thread
def load_objects(self, batch, from_dataset=True, scale=3.1):
self.remove_objects()
assert len(batch) == self.bs
for i in range(self.bs):
obj_location = batch[i]
# if the object information has already been extracted
if from_dataset:
verts = np.load(obj_location + "_verts.npy")
faces = np.load(obj_location + "_faces.npy")
faces = utils.add_faces(faces)
urdf_location = obj_location + ".urdf"
# extract and record the object information
else:
obj_location = obj_location + ".obj"
urdf_location = obj_location + ".urdf"
verts, faces = utils.get_obj_data(obj_location, scale=scale)
utils.make_urdf(verts, faces, urdf_location)
self.pybullet_scenes[i].load_obj(verts, faces, urdf_location)
def remove_objects(self):
for i in range(self.bs):
self.pybullet_scenes[i].remove_obj()
def grasp(self, i, actions):
return self.pybullet_scenes[i].grasp(actions[i])
# perfrom the grasp and extracted the requested information
def sample(
self,
actions,
touch=True,
touch_point_cloud=False,
vision=False,
vision_occluded=False,
parameters=None,
):
success = []
poses = []
dict = {}
# check if the grasps are feasible
for i in range(self.bs):
# perfrom the grasps
success.append(self.grasp(i, actions))
if success[-1]:
poses.append(self.pybullet_scenes[i].get_hand_pose())
else:
poses.append(None)
dict["hand_pose"] = poses
# get touch signal from grasp
if touch:
touch_status = [
["no_intersection" for _ in range(4)] for _ in range(self.bs)
]
touch_signal = torch.zeros((self.bs, 4, 121, 121, 3))
depths = torch.zeros((self.bs, 4, 121, 121))
finger_transform_pos = torch.zeros((self.bs, 4, 3))
finger_transform_rot_M = torch.zeros((self.bs, 4, 3, 3))
for i in range(self.bs):
if success[i]:
# depth from camera
touch_status[i] = self.pybullet_scenes[i].render_depth()
# simulated touch from depth
touch = self.pybullet_scenes[i].render_touch()
ref_frame = self.pybullet_scenes[i].get_finger_frame()
touch_signal[i] = torch.FloatTensor(touch)
depths[i] = torch.FloatTensor(self.pybullet_scenes[i].depths)
finger_transform_pos[i] = torch.FloatTensor(ref_frame["pos"])
finger_transform_rot_M[i] = torch.FloatTensor(ref_frame["rot_M"])
dict["touch_status"] = touch_status
dict["touch_signal"] = touch_signal
dict["depths"] = depths
dict["finger_transfrom_pos"] = finger_transform_pos
dict["finger_transform_rot_M"] = finger_transform_rot_M
# get pointcloud of touch site in the object frame of reference
if touch_point_cloud:
point_clouds = []
for i in range(self.bs):
point_clouds.append(self.pybullet_scenes[i].depth_to_points())
dict["touch_point_cloud"] = point_clouds
# get image of the grasp
if vision_occluded:
vision_occluded_imgs = []
for i in range(self.bs):
if parameters is not None:
param = parameters[i]
else:
param = None
img = self.pybullet_scenes[i].scene_render(
occluded=True, parameters=param
)
vision_occluded_imgs.append(img)
dict["vision_occluded"] = vision_occluded_imgs
# get image of the object
if vision:
vision_imgs = []
for i in range(self.bs):
if parameters is not None:
param = parameters[i]
else:
param = None
img = self.pybullet_scenes[i].scene_render(
occluded=False, parameters=param
)
vision_imgs.append(img)
dict["vision"] = vision_imgs
return dict
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
import torch
import numpy as np
from torch.nn.parameter import Parameter
import torch.nn.functional as F
# class for the autoencoder
# for extracting latent vector from predicted shape
class AutoEncoder(nn.Module):
def __init__(self, adj_info, inital_positions, args, only_encode=False):
super(AutoEncoder, self).__init__()
self.adj_info = adj_info
self.initial_positions = inital_positions
self.args = args
# feature size passed to the GCN
input_size = 50
self.only_encode = only_encode
self.positional_encoder = Positional_Encoder(input_size)
self.mask_encoder = Mask_Encoder(input_size)
self.encoder = Encoder(input_size, args)
if not self.only_encode:
self.decoder = Decoder(args).cuda()
def forward(self, verts, mask, only_encode=False):
positional_features = self.positional_encoder(verts)
mask_features = self.mask_encoder(mask)
# combine mesh features
vertex_features = positional_features + mask_features
latent = self.encoder(vertex_features, self.adj_info)
if self.only_encode or only_encode:
return latent
pred_points = self.decoder(latent)
return pred_points.permute(0, 2, 1), latent
# encoder for the auto encoder
class Encoder(nn.Module):
def __init__(self, input_features, args):
super(Encoder, self).__init__()
self.num_layers = args.num_GCN_layers
# define output sizes for each GCN layer
hidden_values = [input_features] + [
args.hidden_GCN_size for _ in range(self.num_layers)
]
# define layers
layers = []
for i in range(self.num_layers):
layers.append(
GCN_layer(
hidden_values[i],
hidden_values[i + 1],
args.cut,
do_cut=i < self.num_layers - 1,
)
)
self.layers = nn.ModuleList(layers)
# MLP layers
hidden_values = [args.hidden_GCN_size, 500, 400, 300, args.encoding_size]
num_layers = len(hidden_values) - 1
layers = []
for i in range(num_layers):
if i < num_layers - 1:
layers.append(
nn.Sequential(
nn.Linear(hidden_values[i], hidden_values[i + 1]), nn.ReLU()
)
)
else:
layers.append(
nn.Sequential(nn.Linear(hidden_values[i], hidden_values[i + 1]))
)
self.mlp = nn.Sequential(*layers)
def forward(self, features, adj_info):
adj = adj_info["adj"]
for i in range(self.num_layers):
activation = F.relu if i < self.num_layers - 1 else lambda x: x
features = self.layers[i](features, adj, activation)
features = features.max(dim=1)[0]
features = self.mlp(features)
return features
# Graph convolutional network layer
class GCN_layer(nn.Module):
def __init__(self, in_features, out_features, cut=0.33, do_cut=True):
super(GCN_layer, self).__init__()
self.weight = Parameter(torch.Tensor(1, in_features, out_features))
self.bias = Parameter(torch.Tensor(out_features))
self.reset_parameters()
self.cut_size = cut
self.do_cut = do_cut
def reset_parameters(self):
stdv = 6.0 / math.sqrt((self.weight.size(1) + self.weight.size(0)))
stdv *= 0.3
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, features, adj, activation):
features = torch.matmul(features, self.weight)
# if we want to only share a subset of features with neighbors
if self.do_cut:
length = round(features.shape[-1] * self.cut_size)
output = torch.matmul(adj, features[:, :, :length])
output = torch.cat((output, features[:, :, length:]), dim=-1)
output[:, :, :length] += self.bias[:length]
else:
output = torch.matmul(adj, features)
output = output + self.bias
return activation(output)
# decoder for the autoencoder
# this is just Foldingnet
class Decoder(nn.Module):
def __init__(self, args, rank=0):
super(Decoder, self).__init__()
self.model = FoldingNetDec(rank=rank)
self.initial = nn.Linear(args.encoding_size, 512)
def forward(self, features):
features = self.initial(features)
points = self.model(features)
return points
# foldingnet definition
class FoldingNetDecFold1(nn.Module):
def __init__(self):
super(FoldingNetDecFold1, self).__init__()
self.conv1 = nn.Conv1d(514, 512, 1)
self.conv2 = nn.Conv1d(512, 512, 1)
self.conv3 = nn.Conv1d(512, 3, 1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.conv3(x)
return x
# foldingnet definition
def GridSamplingLayer(batch_size, meshgrid):
ret = np.meshgrid(*[np.linspace(it[0], it[1], num=it[2]) for it in meshgrid])
ndim = len(meshgrid)
grid = np.zeros(
(np.prod([it[2] for it in meshgrid]), ndim), dtype=np.float32
) # MxD
for d in range(ndim):
grid[:, d] = np.reshape(ret[d], -1)
g = np.repeat(grid[np.newaxis, ...], repeats=batch_size, axis=0)
return g
# foldingnet definition
class FoldingNetDecFold2(nn.Module):
def __init__(self):
super(FoldingNetDecFold2, self).__init__()
self.conv1 = nn.Conv1d(515, 512, 1)
self.conv2 = nn.Conv1d(512, 512, 1)
self.conv3 = nn.Conv1d(512, 3, 1)
self.relu = nn.ReLU()
def forward(self, x): # input x = batch,515,45^2
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.conv3(x)
return x
# foldingnet definition
class FoldingNetDec(nn.Module):
def __init__(self, rank=0):
super(FoldingNetDec, self).__init__()
self.rank = rank
self.fold1 = FoldingNetDecFold1()
self.fold2 = FoldingNetDecFold2()
def forward(self, x):
batch_size = x.size(0)
x = torch.unsqueeze(x, 1) # x = batch,1,512
x = x.repeat(1, 80 ** 2, 1) # x = batch,45^2,512
code = x.transpose(2, 1) # x = batch,512,45^2
meshgrid = [[-0.5, 0.5, 80], [-0.5, 0.5, 80]]
grid = GridSamplingLayer(batch_size, meshgrid) # grid = batch,45^2,2
grid = torch.from_numpy(grid).cuda(self.rank)
x = torch.cat((x, grid), 2) # x = batch,45^2,514
x = x.transpose(2, 1) # x = batch,514,45^2
x = self.fold1(x) # x = batch,3,45^2
x = torch.cat((code, x), 1) # x = batch,515,45^2
x = self.fold2(x) # x = batch,3,45^2
return x
# encode the positional information of vertices using Nerf Embeddings
class Positional_Encoder(nn.Module):
def __init__(self, input_size):
super(Positional_Encoder, self).__init__()
layers = []
layers.append(
nn.Linear(63, input_size // 4)
) # 10 nerf layers + original positions
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 4, input_size // 2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 2, input_size))
self.model = nn.Sequential(*layers)
# apply nerf embedding of the positional information
def nerf_embedding(self, points):
embeddings = []
for i in range(10):
if i == 0:
embeddings.append(torch.sin(np.pi * points))
embeddings.append(torch.cos(np.pi * points))
else:
embeddings.append(torch.sin(np.pi * 2 * i * points))
embeddings.append(torch.cos(np.pi * 2 * i * points))
embeddings = torch.cat(embeddings, dim=-1)
return embeddings
def forward(self, positions):
shape = positions.shape
positions = positions.contiguous().view(shape[0] * shape[1], -1)
# combine nerf embedding with origional positions
positions = torch.cat((self.nerf_embedding((positions)), positions), dim=-1)
embeding = self.model(positions).view(shape[0], shape[1], -1)
return embeding
# make embedding token of the mask information for each vertex
class Mask_Encoder(nn.Module):
def __init__(self, input_size):
super(Mask_Encoder, self).__init__()
layers_mask = []
layers_mask.append(nn.Embedding(4, input_size))
self.model = nn.Sequential(*layers_mask)
def forward(self, mask):
shape = mask.shape
mask = mask.contiguous().view(-1, 1)
embeding_mask = self.model(mask.long()).view(shape[0], shape[1], -1)
return embeding_mask
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import numpy as np
from tqdm import tqdm
import argparse
import random
from PIL import Image
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from torch.utils.data import DataLoader
from pterotactyl.reconstruction.autoencoder import model
from pterotactyl.utility import utils
from pterotactyl.utility import data_loaders
from pterotactyl.reconstruction.vision import model as vision_model
import pterotactyl.objects as objects
from pterotactyl import pretrained
import pterotactyl.object_data as object_data
IMAGE_LOCATION = os.path.join(os.path.dirname(object_data.__file__), "images_colourful/")
class Engine:
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set initial data values
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.vision_chart_location = os.path.join(
os.path.dirname(objects.__file__), "vision_charts.obj"
)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", args.exp_type, args.exp_id
)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
self.results_dir = os.path.join("results", self.args.exp_type, self.args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
utils.save_config(self.checkpoint_dir, args)
def __call__(self) -> float:
# define the model and optimizer
vision_args, weights = utils.load_model_config(self.args.vision_location)
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
vision_args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
self.deform = vision_model.Deformation(
self.mesh_info, self.initial_mesh, vision_args
).cuda()
self.deform.load_state_dict(torch.load(weights))
self.auto_encoder = model.AutoEncoder(
self.mesh_info, self.initial_mesh, self.args
)
params = list(self.auto_encoder.parameters())
self.auto_encoder.cuda()
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
self.load()
# logging information
writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
self.train_loss = 0
# get data
train_loader, valid_loaders = self.get_loaders()
# evaluate on the test set
if self.args.eval:
self.load()
with torch.no_grad():
self.validate(valid_loaders, writer)
return
# train and validate
for epoch in range(0, self.args.epochs):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
self.check_values()
# get dataloaders
def get_loaders(self):
train_loader, valid_loader = "", ""
# training loader
if not self.args.eval:
train_data = data_loaders.mesh_loader_vision(
self.args, set_type="auto_train"
)
train_loader = DataLoader(
train_data,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=16,
collate_fn=train_data.collate,
)
# evaluation loaders
set_type = "test" if self.args.eval else "valid"
valid_data = data_loaders.mesh_loader_vision(self.args, set_type=set_type)
valid_loader = DataLoader(
valid_data,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=16,
collate_fn=valid_data.collate,
)
return train_loader, valid_loader
def train(self, data, writer):
total_loss = 0
iterations = 0
self.auto_encoder.train()
for k, batch in enumerate(tqdm(data, smoothing=0)):
self.optimizer.zero_grad()
# initialize data
img = batch["img"].cuda()
# inference
with torch.no_grad():
charts = vision_model.prepare_mesh(batch, self.initial_mesh, self.args)
verts, mask = self.deform(img, charts)
pred_points, latent = self.auto_encoder(verts.detach(), mask)
loss = utils.chamfer_distance(
verts.detach(),
self.mesh_info["faces"],
pred_points,
num=self.args.number_points,
)
loss = self.args.loss_coeff * loss.mean()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f"Train || Epoch: {self.epoch}, loss: {loss.item():.2f}, b_ptp: {self.best_loss:.2f}"
tqdm.write(message)
total_loss += loss.item()
iterations += 1.0
self.train_loss = total_loss / iterations
writer.add_scalars(
"train_loss", {self.args.exp_id: total_loss / iterations}, self.epoch
)
def validate(self, valid_loader, writer):
total_loss = 0
self.auto_encoder.eval()
num_examples = 0
latents = []
names = []
for v, batch in enumerate(tqdm(valid_loader)):
self.optimizer.zero_grad()
# initialize data
img = batch["img"].cuda()
batch_size = img.shape[0]
# inference
charts = vision_model.prepare_mesh(batch, self.initial_mesh, self.args)
verts, mask = self.deform(img, charts)
pred_points, latent = self.auto_encoder(verts.detach(), mask)
names += batch["names"]
latents.append(latent)
loss = utils.chamfer_distance(
verts.detach(),
self.mesh_info["faces"],
pred_points,
num=self.args.number_points,
)
loss = self.args.loss_coeff * loss.mean() * batch_size
# logs
num_examples += float(batch_size)
total_loss += loss
total_loss = total_loss / num_examples
message = f"Valid || Epoch: {self.epoch}, train loss: {self.train_loss:.4f}, val loss: {total_loss:.4f}, b_ptp: {self.best_loss:.4f}"
tqdm.write(message)
print("*******************************************************")
print(f"Validation Accuracy: {total_loss}")
print("*******************************************************")
if not self.args.eval:
writer.add_scalars("valid_ptp", {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
if self.args.eval:
latents = torch.cat(latents)
self.cluster(latents, names)
# save the model
def save(self):
torch.save(self.auto_encoder.state_dict(), self.checkpoint_dir + "/model")
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + "/optim")
# load the model
def load(self):
if self.args.eval and self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location_vision = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_p/"
)
location_auto = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/v_t_p/"
)
else:
location_vision = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_g/"
)
location_auto = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/v_t_g/"
)
else:
if self.args.finger:
location_vision = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_p/"
)
location_auto = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/t_p/"
)
else:
location_vision = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_g/"
)
location_auto = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/auto/t_g/"
)
# define the vision model
vision_args, _ = utils.load_model_config(location_vision)
weights = location_vision + 'model'
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
vision_args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
self.deform = vision_model.Deformation(
self.mesh_info, self.initial_mesh, vision_args
)
self.deform.cuda()
self.deform.load_state_dict(torch.load(weights))
self.deform.eval()
# define the autoencoder model
auto_args, _ = utils.load_model_config(location_auto)
weights = location_auto + '/model'
self.auto_encoder = model.AutoEncoder(
self.mesh_info, self.initial_mesh, auto_args
)
self.auto_encoder.cuda()
self.auto_encoder.load_state_dict(torch.load(weights))
else:
try:
self.auto_encoder.load_state_dict(
torch.load(self.checkpoint_dir + "/model")
)
self.optimizer.load_state_dict(
torch.load(self.checkpoint_dir + "/optim")
)
except:
return
# check if current validation is better, and if so save model
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(
f"Saving with {improvement:.3f} improvement in Chamfer Distance on Validation Set "
)
self.best_loss = self.current_loss
self.last_improvement = 0
self.save()
else:
self.last_improvement += 1
if self.last_improvement >= self.args.patience:
print(f"Over {self.args.patience} steps since last imporvement")
print("Exiting now")
exit()
def cluster(self, latents, names):
example_nums = 20
crop = 20
img_dim = 256
examples = random.choices(range(latents.shape[0]), k=example_nums)
collage = Image.new(
"RGB", ((img_dim - crop * 2) * 5, (img_dim - crop * 2) * example_nums)
)
for v, e in enumerate(examples):
new_im = Image.new("RGB", (img_dim * 5, img_dim))
l = latents[e]
main_obj = names[e][0].split("/")[-1]
imgs = [os.path.join(IMAGE_LOCATION, main_obj + ".npy")]
seen = [main_obj]
compare_latents = latents - l.unsqueeze(0)
compare_latents = (compare_latents ** 2).sum(-1)
closest = torch.topk(compare_latents, 25, largest=False)[1][1:]
for c in closest:
obj = names[c][0].split("/")[-1]
if obj in seen:
continue
seen.append(obj)
imgs.append(os.path.join(IMAGE_LOCATION, obj + ".npy"))
for i in range(5):
im = Image.fromarray(np.load(imgs[i]))
new_im.paste(im, (i * img_dim, 0))
new_im.save(f"{self.results_dir}/valid_{v}.png")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cut",
type=float,
default=0.33,
help="The shared size of features in the GCN.",
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--vision_location",
type=str,
default=os.path.dirname(pretrained.__file__) + "/reconstruction/vision/t_p/",
help="the location of the deformation prediction.",
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--encoding_size", type=int, default=200, help="size of the latent vector"
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--lr", type=float, default=0.0003, help="Initial learning rate."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument("--batch_size", type=int, default=16, help="Size of the batch.")
parser.add_argument(
"--val_grasps",
type=int,
default=-1,
help="number of grasps to use during validation.",
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--use_touch",
action="store_true",
default=False,
help="To use the touch information.",
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_GCN_layers",
type=int,
default=20,
help="Number of GCN layers in the mesh deformation network.",
)
parser.add_argument(
"--hidden_GCN_size",
type=int,
default=300,
help="Size of the feature vector for each GCN layer in the mesh deformation network.",
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="load the pretrained model",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
# CNN block
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, last=False):
super().__init__()
self.last = last
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=5, padding=2, stride=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=5, padding=2),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=5, padding=2),
)
self.activation = nn.Sequential(
nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.double_conv(x)
if not self.last:
x = self.activation(x)
return x
# Model for predicting touch chart shape
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
# CNN
CNN_layers = []
CNN_layers.append(DoubleConv(3, 16))
CNN_layers.append(DoubleConv(16, 32))
CNN_layers.append(DoubleConv(32, 32))
CNN_layers.append(DoubleConv(32, 64))
CNN_layers.append(DoubleConv(64, 128))
CNN_layers.append(DoubleConv(128, 128, last=True))
self.CNN_layers = nn.Sequential(*CNN_layers)
# MLP
layers = []
layers.append(nn.Sequential(nn.Linear(512, 256), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(256, 128), nn.ReLU()))
layers.append(nn.Sequential(nn.Linear(128, 75)))
self.fc = nn.Sequential(*layers)
def predict_verts(self, touch):
for layer in self.CNN_layers:
touch = layer(touch)
points = touch.contiguous().view(-1, 512)
points = self.fc(points)
return points
# tranform the predicted shape into the reference frame of the sensro
def transform_verts(self, verts, ref):
pos = ref["pos"].cuda().view(-1, 1, 3).repeat(1, verts.shape[1], 1)
rot = ref["rot"].cuda()
verts = torch.bmm(rot, verts.permute(0, 2, 1)).permute(0, 2, 1)
verts += pos
return verts
def forward(self, gel, ref_frame, verts):
verts = verts + self.predict_verts(gel).view(-1, verts.shape[1], 3)
verts = self.transform_verts(verts, ref_frame)
return verts
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from tqdm import tqdm
import argparse
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from torch.utils.data import DataLoader
from pterotactyl.reconstruction.touch import model
from pterotactyl.utility import utils
from pterotactyl.utility import data_loaders
import pterotactyl.objects as objects
from pterotactyl import pretrained
class Engine:
def __init__(self, args):
utils.set_seeds(args.seed)
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", args.exp_type, args.exp_id
)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
utils.save_config(self.checkpoint_dir, args)
chart_location = os.path.join(
os.path.dirname(objects.__file__), "touch_chart.obj"
)
self.verts, self.faces = utils.load_mesh_touch(chart_location)
self.verts = self.verts.view(1, self.verts.shape[0], 3).repeat(
args.batch_size, 1, 1
)
def __call__(self):
self.encoder = model.Encoder()
self.encoder.cuda()
params = list(self.encoder.parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr)
writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
train_loader, valid_loader = self.get_loaders()
# evaluate
if self.args.eval:
self.load()
with torch.no_grad():
self.validate(valid_loader, writer)
return
# train and validate
else:
for epoch in range(self.args.epochs):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loader, writer)
self.check_values()
# get the dataloaders
def get_loaders(self):
train_loader, valid_loader = "", ""
# dataloader for training
if not self.args.eval:
train_data = data_loaders.mesh_loader_touch(
self.args, set_type="recon_train"
)
train_loader = DataLoader(
train_data,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=16,
collate_fn=train_data.collate,
)
# dataloader for evaluation
set_type = "test" if self.args.eval else "valid"
valid_data = data_loaders.mesh_loader_touch(self.args, set_type=set_type)
valid_loader = DataLoader(
valid_data,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=16,
collate_fn=valid_data.collate,
)
return train_loader, valid_loader
def train(self, data, writer):
total_loss = 0
iterations = 0
self.encoder.train()
for k, batch in enumerate(tqdm(data)):
# initialize
self.optimizer.zero_grad()
sim_touch = batch["sim_touch"].cuda()
ref_frame = batch["ref"]
gt_points = batch["samples"].cuda()
batch_size = gt_points.shape[0]
# inference
pred_verts = self.encoder(
sim_touch, ref_frame, self.verts.clone()[:batch_size]
)
loss = self.args.loss_coeff * utils.chamfer_distance(
pred_verts, self.faces, gt_points, self.args.num_samples
)
loss = loss.mean()
total_loss += loss.data.cpu().numpy()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f"Train || Epoch: {self.epoch}, loss: {loss.item():.5f} "
message += f"|| best_loss: {self.best_loss :.5f}"
tqdm.write(message)
iterations += 1.0
writer.add_scalars(
"train", {self.args.exp_id: total_loss / iterations}, self.epoch
)
def validate(self, valid_loader, writer):
total_loss = 0
self.encoder.eval()
num_examples = 0
for k, batch in enumerate(tqdm(valid_loader)):
# initialize data
sim_touch = batch["sim_touch"].cuda()
ref_frame = batch["ref"]
gt_points = batch["samples"].cuda()
batch_size = gt_points.shape[0]
# inference
pred_verts = self.encoder(
sim_touch, ref_frame, self.verts.clone()[:batch_size]
)
# back prop
loss = self.args.loss_coeff * utils.chamfer_distance(
pred_verts, self.faces, gt_points, self.args.num_samples
)
loss = loss.mean()
num_examples += float(batch_size)
total_loss += loss * float(batch_size)
total_loss = total_loss / float(num_examples)
# log
print("*******************************************************")
print(f"Total validation loss: {total_loss}")
print("*******************************************************")
if not self.args.eval:
writer.add_scalars("valid", {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
# save the model
def save(self):
torch.save(self.encoder.state_dict(), self.checkpoint_dir + "/model")
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + "/optim")
# check if the latest validation is better, save if so
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
self.best_loss = self.current_loss
print(f"Saving Model with a {improvement} improvement in point loss")
self.save()
self.last_improvement = 0
else:
self.last_improvement += 1
if self.last_improvement == self.args.patience:
print(f"Over {self.args.patience} steps since last imporvement")
print("Exiting now")
exit()
print("*******************************************************")
# load the model
def load(self):
if self.args.eval and self.args.pretrained:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/touch/best/model"
)
self.encoder.load_state_dict(torch.load(location))
else:
self.encoder.load_state_dict(torch.load(self.checkpoint_dir + "/model"))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="reduces the number of data examples",
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--lr", type=float, default=0.0001, help="Initial learning rate."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument("--batch_size", type=int, default=64, help="Size of the batch.")
parser.add_argument(
"--num_samples",
type=int,
default=4000,
help="Number of points in the predicted point cloud.",
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name"
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group"
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="load the pretrained model",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch.nn as nn
import torch
import numpy as np
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from PIL import Image
# basic CNN layer template
def CNN_layer(f_in, f_out, k, stride=1, simple=False, padding=1):
layers = []
if not simple:
layers.append(nn.BatchNorm2d(int(f_in)))
layers.append(nn.ReLU(inplace=True))
layers.append(
nn.Conv2d(int(f_in), int(f_out), kernel_size=k, padding=padding, stride=stride)
)
return nn.Sequential(*layers)
# network for making image features for vertex feature vectors
class Image_Encoder(nn.Module):
def __init__(self, args):
super(Image_Encoder, self).__init__()
# CNN sizes
cur_size = 3
next_size = 16
# layers for the CNN
layers = []
layers.append(
CNN_layer(cur_size, cur_size, args.CNN_ker_size, stride=1, simple=True)
)
for i in range(args.num_CNN_blocks):
layers.append(CNN_layer(cur_size, next_size, args.CNN_ker_size, stride=2))
cur_size = next_size
next_size = next_size * 2
for j in range(args.layers_per_block - 1):
layers.append(CNN_layer(cur_size, cur_size, args.CNN_ker_size))
self.args = args
self.layers = nn.ModuleList(layers)
# camera parameters
f = 221.7025
RT = np.array(
[
[
-7.587616579485257e-08,
-1.0000001192092896,
0.0,
-2.2762851159541242e-08,
],
[-0.7071068286895752, 7.587616579485257e-08, -0.7071068286895752, 0.0],
[0.7071068286895752, 0.0, -0.7071067690849304, 0.4242640733718872],
]
)
K = np.array([[f, 0, 128.0], [0, f, 128.0], [0, 0, 1]])
# projection matrix
self.matrix = torch.FloatTensor(K.dot(RT)).cuda()
# defines image features over vertices from vertex positions, and feature mpas from vision
def pooling(self, blocks, verts_pos):
# convert vertex positions to x,y coordinates in the image, scaled to fractions of image dimension
ext_verts_pos = torch.cat(
(
verts_pos,
torch.FloatTensor(
np.ones([verts_pos.shape[0], verts_pos.shape[1], 1])
).cuda(),
),
dim=-1,
)
ext_verts_pos = torch.matmul(ext_verts_pos, self.matrix.permute(1, 0))
ext_verts_pos[:, :, 2][ext_verts_pos[:, :, 2] == 0] = 0.1
xs = ext_verts_pos[:, :, 1] / ext_verts_pos[:, :, 2] / 256.0
xs[torch.isinf(xs)] = 0.5
ys = ext_verts_pos[:, :, 0] / ext_verts_pos[:, :, 2] / 256.0
ys[torch.isinf(ys)] = 0.5
full_features = None
xs = xs.unsqueeze(2).unsqueeze(3)
ys = ys.unsqueeze(2).unsqueeze(3)
grid = torch.cat([ys, xs], 3)
grid = grid * 2 - 1
# extract image features based on vertex projected positions
for block in blocks:
features = torch.nn.functional.grid_sample(block, grid, align_corners=True)
if full_features is None:
full_features = features
else:
full_features = torch.cat((full_features, features), dim=1)
vert_image_features = full_features[:, :, :, 0].permute(0, 2, 1)
return vert_image_features
# Examines the projection of points into image space and displayes the image
# This is only for debugging purposes
def debug_pooling(self, img, points):
# convert vertex positions to x,y coordinates in the image, scaled to fractions of image dimension
ext_verts_pos = torch.cat(
(
points,
torch.FloatTensor(
np.ones([points.shape[0], points.shape[1], 1])
).cuda(),
),
dim=-1,
)
ext_verts_pos = torch.matmul(ext_verts_pos, self.matrix.permute(1, 0))
xs = ext_verts_pos[:, :, 1] / ext_verts_pos[:, :, 2] / 256.0
ys = ext_verts_pos[:, :, 0] / ext_verts_pos[:, :, 2] / 256.0
for xses, yses, i in zip(xs, ys, img):
i = (255 * i.permute(1, 2, 0)).data.cpu().numpy().astype(np.uint8)
for x, y in zip(xses, yses):
x = int(x * 255)
if x > 255:
x = 255
if x < 0:
x = 0
y = int(y * 255)
if y > 255:
y = 255
if y < 0:
y = 0
i[x, y, 0] = 255.0
i[x, y, 1] = 0
i[x, y, 2] = 0
Image.fromarray(i).save("debug_img.png")
print("Image of point projection has been saved to debug_img.png")
print("press enter to continue")
input()
print("*" * 15)
print()
exit()
def forward(self, img):
x = img
features = []
# layers to select image features from
layer_selections = [
len(self.layers) - 1 - (i + 1) * self.args.layers_per_block
for i in range(3)
]
for e, layer in enumerate(self.layers):
# if too many layers are applied the map size will be lower then then kernel size
if x.shape[-1] < self.args.CNN_ker_size:
break
x = layer(x)
# collect feature maps
if e in layer_selections:
features.append(x)
features.append(x)
return features
# Class for defroming the charts into the traget shape
class Deformation(nn.Module):
def __init__(
self, adj_info, inital_positions, args, return_img=False, pass_img=False
):
super(Deformation, self).__init__()
self.adj_info = adj_info
self.initial_positions = inital_positions
self.args = args
self.return_img = return_img
self.pass_img = pass_img
# add image encoder and get image feature size
if args.use_img:
self.img_encoder_global = Image_Encoder(args).cuda()
self.img_encoder_local = Image_Encoder(args).cuda()
with torch.no_grad():
img_features = self.img_encoder_global(
torch.zeros(1, 3, 256, 256).cuda()
)
vert_positions = torch.zeros(1, 1, 3).cuda()
input_size = self.img_encoder_global.pooling(
img_features, vert_positions
).shape[-1]
else:
# if no image features fix the feature size at 50
input_size = 50
# add positional and mask enocoder and GCN deformation networks
self.positional_encoder = Positional_Encoder(input_size)
self.mask_encoder = Mask_Encoder(input_size)
self.mesh_deform_1 = GCN(
input_size, args, ignore_touch_matrix=args.use_img
).cuda()
self.mesh_deform_2 = GCN(input_size, args).cuda()
def forward(self, img, charts, img_features=None):
# number of vision charts
vc_length = charts["vision_charts"].clone().shape[1]
# get image features
if self.pass_img and img_features is not None:
global_img_features, local_img_features = img_features
elif self.args.use_img:
global_img_features = self.img_encoder_global(img)
local_img_features = self.img_encoder_local(img)
else:
global_img_features, local_img_features = [], []
##### first iteration #####
# if we are using only touch then we need to use touch information immediately
if self.args.use_touch and not self.args.use_img:
# use touch information
vertices = torch.cat(
(charts["vision_charts"].clone(), charts["touch_charts"].clone()), dim=1
)
mask = torch.cat(
(charts["vision_masks"].clone(), charts["touch_masks"].clone()), dim=1
)
positional_features = self.positional_encoder(vertices)
mask_features = self.mask_encoder(mask)
vertex_features = positional_features + mask_features
# in all other setting we only use vision
else:
vertices = charts["vision_charts"].clone()
mask = charts["vision_masks"].clone()
positional_features = self.positional_encoder(vertices)
mask_features = self.mask_encoder(mask)
vertex_features = positional_features + mask_features
# use vision information
if self.args.use_img:
img_features = self.img_encoder_global.pooling(
global_img_features, vertices
)
vertex_features += img_features
# perfrom the first deformation
update = self.mesh_deform_1(vertex_features, self.adj_info)
# update positions of vision charts only
vertices[:, :vc_length] = vertices[:, :vc_length] + update[:, :vc_length]
##### second loop #####
# add touch information if not already present
if self.args.use_touch and self.args.use_img:
vertices = torch.cat((vertices, charts["touch_charts"].clone()), dim=1)
mask = torch.cat(
(charts["vision_masks"].clone(), charts["touch_masks"].clone()), dim=1
)
mask_features = self.mask_encoder(mask)
positional_features = self.positional_encoder(vertices)
vertex_features = positional_features + mask_features
# add image information
if self.args.use_img:
img_features = self.img_encoder_global.pooling(local_img_features, vertices)
vertex_features += img_features
# perfrom the second deformation
update = self.mesh_deform_2(vertex_features, self.adj_info)
# update positions of vision charts only
vertices[:, :vc_length] = vertices[:, :vc_length] + update[:, :vc_length]
##### third loop #####
positional_features = self.positional_encoder(vertices)
mask_features = self.mask_encoder(mask)
vertex_features = positional_features + mask_features
if self.args.use_img:
img_features = self.img_encoder_global.pooling(local_img_features, vertices)
vertex_features += img_features
# perfrom the third deformation
update = self.mesh_deform_2(vertex_features, self.adj_info)
# update positions of vision charts only
vertices[:, :vc_length] = vertices[:, :vc_length] + update[:, :vc_length]
if self.return_img:
return vertices, mask, [global_img_features, local_img_features]
return vertices, mask
# Graph convolutional network class for predicting mesh deformation
class GCN(nn.Module):
def __init__(self, input_features, args, ignore_touch_matrix=False):
super(GCN, self).__init__()
#
self.ignore_touch_matrix = ignore_touch_matrix
self.num_layers = args.num_GCN_layers
# define output sizes for each GCN layer
hidden_values = (
[input_features]
+ [args.hidden_GCN_size for _ in range(self.num_layers - 1)]
+ [3]
)
# define layers
layers = []
for i in range(self.num_layers):
layers.append(
GCN_layer(
hidden_values[i],
hidden_values[i + 1],
args.cut,
do_cut=i < self.num_layers - 1,
)
)
self.layers = nn.ModuleList(layers)
def forward(self, features, adj_info):
if self.ignore_touch_matrix:
adj = adj_info["origional"]
else:
adj = adj_info["adj"]
# iterate through GCN layers
for i in range(self.num_layers):
activation = F.relu if i < self.num_layers - 1 else lambda x: x
features = self.layers[i](features, adj, activation)
if torch.isnan(features).any():
print(features)
print("here", i, self.num_layers)
input()
return features
# Graph convolutional network layer
class GCN_layer(nn.Module):
def __init__(self, in_features, out_features, cut=0.33, do_cut=True):
super(GCN_layer, self).__init__()
self.weight = Parameter(torch.Tensor(1, in_features, out_features))
self.bias = Parameter(torch.Tensor(out_features))
self.reset_parameters()
self.cut_size = cut
self.do_cut = do_cut
def reset_parameters(self):
stdv = 6.0 / math.sqrt((self.weight.size(1) + self.weight.size(0)))
stdv *= 0.3
self.weight.data.uniform_(-stdv, stdv)
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, features, adj, activation):
features = torch.matmul(features, self.weight)
# uf we want to only share a subset of features with neighbors
if self.do_cut:
length = round(features.shape[-1] * self.cut_size)
output = torch.matmul(adj, features[:, :, :length])
output = torch.cat((output, features[:, :, length:]), dim=-1)
output[:, :, :length] += self.bias[:length]
else:
output = torch.matmul(adj, features)
output = output + self.bias
return activation(output)
# encode the positional information of vertices using Nerf Embeddings
class Positional_Encoder(nn.Module):
def __init__(self, input_size):
super(Positional_Encoder, self).__init__()
layers = []
layers.append(
nn.Linear(63, input_size // 4)
) # 10 nerf layers + original positions
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 4, input_size // 2))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Linear(input_size // 2, input_size))
self.model = nn.Sequential(*layers)
# apply nerf embedding of the positional information
def nerf_embedding(self, points):
embeddings = []
for i in range(10):
if i == 0:
embeddings.append(torch.sin(np.pi * points))
embeddings.append(torch.cos(np.pi * points))
else:
embeddings.append(torch.sin(np.pi * 2 * i * points))
embeddings.append(torch.cos(np.pi * 2 * i * points))
embeddings = torch.cat(embeddings, dim=-1)
return embeddings
def forward(self, positions):
shape = positions.shape
positions = positions.contiguous().view(shape[0] * shape[1], -1)
# combine nerf embedding with origional positions
positions = torch.cat((self.nerf_embedding((positions)), positions), dim=-1)
embeding = self.model(positions).view(shape[0], shape[1], -1)
return embeding
# make embedding token of the mask information for each vertex
class Mask_Encoder(nn.Module):
def __init__(self, input_size):
super(Mask_Encoder, self).__init__()
layers_mask = []
layers_mask.append(nn.Embedding(4, input_size))
self.model = nn.Sequential(*layers_mask)
def forward(self, mask):
shape = mask.shape
mask = mask.contiguous().view(-1, 1)
embeding_mask = self.model(mask.long()).view(shape[0], shape[1], -1)
return embeding_mask
# takes as input the touch information, and makes it a mart of the input mesh
def prepare_mesh(batch, vision_mesh, args):
s1 = batch["img"].shape[0]
if args.use_touch:
touch_info = batch["touch_charts"].cuda().view(s1, -1, 4)
touch_charts = touch_info[:, :, :3]
touch_masks = touch_info[:, :, 3:]
# combine vision charts into a single mesh
vision_charts = vision_mesh.unsqueeze(0).repeat(s1, 1, 1)
vision_masks = 3 * torch.ones(vision_charts.shape[:-1]).cuda().unsqueeze(-1)
charts = {
"touch_charts": touch_charts,
"vision_charts": vision_charts,
"touch_masks": touch_masks,
"vision_masks": vision_masks,
}
else:
# combine vision charts into a single mesh
vision_charts = vision_mesh.unsqueeze(0).repeat(s1, 1, 1)
vision_masks = 3 * torch.ones(vision_charts.shape[:-1]).cuda().unsqueeze(-1)
charts = {"vision_charts": vision_charts, "vision_masks": vision_masks}
return charts
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import numpy as np
from tqdm import tqdm
import argparse
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
from torch.utils.data import DataLoader
from submitit.helpers import Checkpointable
from pterotactyl.reconstruction.vision import model
from pterotactyl.utility import utils
from pterotactyl.utility import data_loaders
import pterotactyl.objects as objects
from pterotactyl import pretrained
class Engine(Checkpointable):
def __init__(self, args):
# set seeds
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# set initial data values
self.epoch = 0
self.best_loss = 10000
self.args = args
self.last_improvement = 0
self.vision_chart_location = os.path.join(
os.path.dirname(objects.__file__), "vision_charts.obj"
)
self.results_dir = os.path.join("results", args.exp_type, args.exp_id)
if not os.path.exists(self.results_dir):
os.makedirs(self.results_dir)
self.checkpoint_dir = os.path.join(
"experiments/checkpoint/", args.exp_type, args.exp_id
)
if not os.path.exists(self.checkpoint_dir):
os.makedirs(self.checkpoint_dir)
if not self.args.eval:
utils.save_config(self.checkpoint_dir, args)
def __call__(self) -> float:
# compute mesh statistics
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
self.args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
# define the model and optimizer
self.encoder = model.Deformation(self.mesh_info, self.initial_mesh, self.args)
self.encoder.cuda()
if not self.args.eval:
params = list(self.encoder.parameters())
self.optimizer = optim.Adam(params, lr=self.args.lr, weight_decay=0)
# logging information
writer = SummaryWriter(
os.path.join("experiments/tensorboard/", self.args.exp_type)
)
# get data
train_loader, valid_loaders = self.get_loaders()
# evaluate of the test set
if self.args.eval:
self.load()
with torch.no_grad():
self.validate(valid_loaders, writer)
return
# train and validate
else:
self.load()
for epoch in range(self.epoch, self.args.epochs):
self.epoch = epoch
self.train(train_loader, writer)
with torch.no_grad():
self.validate(valid_loaders, writer)
self.check_values()
# get dataloaders
def get_loaders(self):
train_loader, valid_loader = "", ""
if not self.args.eval:
# training dataloader
train_data = data_loaders.mesh_loader_vision(
self.args, set_type="recon_train"
)
train_loader = DataLoader(
train_data,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=16,
collate_fn=train_data.collate,
)
# evaluation dataloder
set_type = "test" if self.args.eval else "valid"
valid_data = data_loaders.mesh_loader_vision(self.args, set_type=set_type)
valid_loader = DataLoader(
valid_data,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=16,
collate_fn=valid_data.collate,
)
return train_loader, valid_loader
def train(self, data, writer):
total_loss = 0
iterations = 0
self.encoder.train()
for k, batch in enumerate(tqdm(data, smoothing=0)):
self.optimizer.zero_grad()
# initialize data
img = batch["img"].cuda()
gt_points = batch["gt_points"].cuda()
# for debugging , if you want to change the camera view, reach out to [email protected]
# self.encoder.img_encoder_global.debug_pooling(img, gt_points)
# self.encoder.img_encoder_global.debug_pooling(img, self.initial_mesh.unsqueeze(0).repeat(img.shape[0], 1, 1))
# inference
with torch.no_grad():
charts = model.prepare_mesh(batch, self.initial_mesh, self.args)
verts = self.encoder(img, charts)[0]
loss = utils.chamfer_distance(
verts, self.mesh_info["faces"], gt_points, num=self.args.number_points
)
loss = self.args.loss_coeff * loss.mean()
# backprop
loss.backward()
self.optimizer.step()
# log
message = f"Train || Epoch: {self.epoch}, loss: {loss.item():.2f}, b_ptp: {self.best_loss:.2f}"
tqdm.write(message)
total_loss += loss.item()
iterations += 1.0
writer.add_scalars(
"train_loss", {self.args.exp_id: total_loss / iterations}, self.epoch
)
def validate(self, valid_loader, writer):
total_loss = 0
self.encoder.eval()
num_examples = 0
observations = []
names = []
for v, batch in enumerate(tqdm(valid_loader)):
# initialize data
names += batch["names"]
img = batch["img"].cuda()
gt_points = batch["gt_points"].cuda()
batch_size = img.shape[0]
# inference
charts = model.prepare_mesh(batch, self.initial_mesh, self.args)
ps = list(self.encoder.parameters())
ps = torch.cat([p.flatten() for p in ps])
verts = self.encoder(img, charts)[0]
observations.append(verts)
loss = utils.chamfer_distance(
verts, self.mesh_info["faces"], gt_points, num=self.args.number_points
)
loss = self.args.loss_coeff * loss.sum()
# logs
num_examples += float(batch_size)
total_loss += loss
message = f"Valid || Epoch: {self.epoch}, ave: {total_loss / num_examples:.4f}, b_ptp: {self.best_loss:.2f}"
tqdm.write(message)
if self.args.visualize and v == 5 and self.args.eval:
meshes = torch.cat(observations, dim=0)[:, :, :3]
names = [n[0] for n in names]
utils.visualize_prediction(
self.results_dir, meshes, self.mesh_info["faces"], names
)
total_loss = total_loss / num_examples
print("*******************************************************")
print(f"Validation Accuracy: {total_loss}")
print("*******************************************************")
if not self.args.eval:
writer.add_scalars("valid_ptp", {self.args.exp_id: total_loss}, self.epoch)
self.current_loss = total_loss
# save the model
def save(self):
torch.save(self.encoder.state_dict(), self.checkpoint_dir + "/model")
torch.save(self.optimizer.state_dict(), self.checkpoint_dir + "/optim")
np.save(self.checkpoint_dir + "/epoch.npy", np.array([self.epoch + 1]))
# load the model
def load(self):
if self.args.eval and self.args.pretrained:
if self.args.use_img:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_p/"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/v_t_g/"
)
else:
if self.args.finger:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_p/"
)
else:
location = (
os.path.dirname(pretrained.__file__)
+ "/reconstruction/vision/t_g/"
)
vision_args, _ = utils.load_model_config(location)
weights = location + 'model'
self.mesh_info, self.initial_mesh = utils.load_mesh_vision(
vision_args, self.vision_chart_location
)
self.initial_mesh = self.initial_mesh.cuda()
self.n_vision_charts = self.initial_mesh.shape[0]
# define the model and optimizer
self.encoder = model.Deformation(
self.mesh_info, self.initial_mesh, vision_args
)
self.encoder.cuda()
self.encoder.load_state_dict(torch.load(weights))
else:
try:
self.encoder.load_state_dict(torch.load(self.checkpoint_dir + "/model"))
self.optimizer.load_state_dict(
torch.load(self.checkpoint_dir + "/optim")
)
self.epoch = np.load(self.checkpoint_dir + "/epoch.npy")[0]
except:
return
# check if the latest validation beats the previous, and save model if so
def check_values(self):
if self.best_loss >= self.current_loss:
improvement = self.best_loss - self.current_loss
print(
f"Saving with {improvement:.3f} improvement in Chamfer Distance on Validation Set "
)
self.best_loss = self.current_loss
self.last_improvement = 0
self.save()
else:
self.last_improvement += 1
if self.last_improvement >= self.args.patience:
print(f"Over {self.args.patience} steps since last imporvement")
print("Exiting now")
exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cut",
type=float,
default=0.33,
help="The shared size of features in the GCN.",
)
parser.add_argument(
"--epochs", type=int, default=1000, help="Number of epochs to use."
)
parser.add_argument(
"--limit_data",
action="store_true",
default=False,
help="use less data, for debugging.",
)
parser.add_argument(
"--finger", action="store_true", default=False, help="use only one finger."
)
parser.add_argument(
"--number_points",
type=int,
default=30000,
help="number of points sampled for the chamfer distance.",
)
parser.add_argument(
"--seed", type=int, default=0, help="Setting for the random seed."
)
parser.add_argument(
"--lr", type=float, default=0.0003, help="Initial learning rate."
)
parser.add_argument(
"--eval",
action="store_true",
default=False,
help="Evaluate the trained model on the test set.",
)
parser.add_argument("--batch_size", type=int, default=16, help="Size of the batch.")
parser.add_argument(
"--val_grasps",
type=int,
default=-1,
help="number of grasps to use during validation.",
)
parser.add_argument(
"--exp_id", type=str, default="test", help="The experiment name."
)
parser.add_argument(
"--exp_type", type=str, default="test", help="The experiment group."
)
parser.add_argument(
"--use_img", action="store_true", default=False, help="To use the image."
)
parser.add_argument(
"--use_touch",
action="store_true",
default=False,
help="To use the touch information.",
)
parser.add_argument(
"--patience",
type=int,
default=70,
help="How many epochs without imporvement before training stops.",
)
parser.add_argument(
"--loss_coeff", type=float, default=9000.0, help="Coefficient for loss term."
)
parser.add_argument(
"--num_CNN_blocks",
type=int,
default=6,
help="Number of image blocks in the CNN.",
)
parser.add_argument(
"--layers_per_block",
type=int,
default=3,
help="Number of image layers in each block in the CNN.",
)
parser.add_argument(
"--CNN_ker_size",
type=int,
default=5,
help="Size of the image kernel in each CNN layer.",
)
parser.add_argument(
"--num_GCN_layers",
type=int,
default=20,
help="Number of GCN layers in the mesh deformation network.",
)
parser.add_argument(
"--hidden_GCN_size",
type=int,
default=300,
help="Size of the feature vector for each GCN layer in the mesh deformation network.",
)
parser.add_argument(
"--num_grasps", type=int, default=5, help="Number of grasps to train with. "
)
parser.add_argument(
"--visualize",
action="store_true",
default=False,
help="visualize predictions and actions while evaluating",
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="load the pretrained model",
)
args = parser.parse_args()
trainer = Engine(args)
trainer()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
from glob import glob
from tqdm import tqdm
import numpy as np
import torch
from torchvision import transforms
import pterotactyl.objects as objects
import pterotactyl.object_data as object_data
POINT_CLOUD_LOCATION = os.path.join(
os.path.dirname(object_data.__file__), "point_cloud_info/"
)
GRASP_LOCATION = os.path.join(os.path.dirname(object_data.__file__), "grasp_info/")
TOUCH_LOCATION = os.path.join(os.path.dirname(object_data.__file__), "touch_charts/")
IMAGE_LOCATION = os.path.join(
os.path.dirname(object_data.__file__), "images_colourful/"
)
DATA_SPLIT = np.load(
os.path.join(os.path.dirname(objects.__file__), "data_split.npy"), allow_pickle=True
).item()
OBJ_LOCATION = os.path.join(os.path.dirname(object_data.__file__), "object_info/")
preprocess = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
def get_finger_transforms(obj, grasp, finger):
ref_location = os.path.join(
GRASP_LOCATION, obj, str(grasp), f"{finger}_ref_frame.npy"
)
touch_info = np.load(ref_location, allow_pickle=True).item()
rot = touch_info["rot"]
pos = touch_info["pos"]
return torch.FloatTensor(rot), torch.FloatTensor(pos)
# class used for obtaining an instance of the dataset for training touch chart prediction
# to be passed to a pytorch dataloader
class mesh_loader_touch(object):
def __init__(self, args, set_type="train"):
# initialization of data locations
self.args = args
self.set_type = set_type
object_names = [
f.split("/")[-1].split(".")[0] for f in glob(f"{IMAGE_LOCATION}/*.npy")
]
self.object_names = []
if self.args.limit_data:
random.shuffle(object_names)
object_names = object_names[:3000]
for n in tqdm(object_names):
if os.path.exists(POINT_CLOUD_LOCATION + n + ".npy"):
if os.path.exists(GRASP_LOCATION + n):
if n in DATA_SPLIT[self.set_type]:
successful_touches = glob(
os.path.join(GRASP_LOCATION, n, "*", "*_touch.npy")
)
if self.args.limit_data:
random.shuffle(successful_touches)
successful_touches = successful_touches[:7]
for touch in successful_touches:
grasp_number = touch.split("/")[-2]
finger_number = touch.split("/")[-1].split("_")[0]
self.object_names.append([n, grasp_number, finger_number])
print(f"The number of {set_type} set objects found : {len(self.object_names)}")
def __len__(self):
return len(self.object_names)
def standerdize_point_size(self, points):
np.random.shuffle(points)
points = torch.FloatTensor(points)
while points.shape[0] < self.args.num_samples:
points = torch.cat((points, points, points, points))
perm = torch.randperm(points.shape[0])
idx = perm[: self.args.num_samples]
return points[idx]
def __getitem__(self, index):
object_name, grasp, finger = self.object_names[index]
# meta data
data = {}
data["names"] = object_name, grasp, finger
# hand infomation
data["rot"], data["pos"] = get_finger_transforms(object_name, grasp, finger)
# simulated touch information
touch = np.load(
os.path.join(GRASP_LOCATION, object_name, grasp, f"{finger}_touch.npy")
)
data["sim_touch"] = (
torch.FloatTensor(touch).permute(2, 0, 1).contiguous().view(3, 121, 121)
/ 255.0
)
# point cloud information
points = np.load(
os.path.join(GRASP_LOCATION, object_name, grasp, f"{finger}_points.npy")
)
data["samples"] = self.standerdize_point_size(points)
return data
def collate(self, batch):
data = {}
data["names"] = [item["names"] for item in batch]
data["samples"] = torch.cat([item["samples"].unsqueeze(0) for item in batch])
data["sim_touch"] = torch.cat(
[item["sim_touch"].unsqueeze(0) for item in batch]
)
data["ref"] = {}
data["ref"]["rot"] = torch.cat([item["rot"].unsqueeze(0) for item in batch])
data["ref"]["pos"] = torch.cat([item["pos"].unsqueeze(0) for item in batch])
return data
# class used for obtaining an instance of the dataset for training chart deformation
# to be passed to a pytorch dataloader
class mesh_loader_vision(object):
def __init__(self, args, set_type="train"):
# initialization of data locations
self.args = args
self.set_type = set_type
object_names = [
f.split("/")[-1].split(".")[0] for f in glob(f"{IMAGE_LOCATION}/*.npy")
]
if self.set_type == "recon_train" or self.set_type == "auto_train":
self.get_instance = self.get_training_instance
else:
self.get_instance = self.get_validation_instance
self.object_names = []
# for debuggin use less data
if args.limit_data:
random.Random(0).shuffle(object_names)
object_names = object_names[:2000]
seed = 0
for n in tqdm(object_names):
if os.path.exists(POINT_CLOUD_LOCATION + n + ".npy"):
if os.path.exists(TOUCH_LOCATION + n):
if n in DATA_SPLIT[self.set_type]:
iters = (
1
if (
self.set_type == "recon_train"
or self.set_type == "auto_train"
)
else 5
)
for _ in range(iters):
self.object_names.append([n, seed])
seed += 1
print(f"The number of {set_type} set objects found : {len(self.object_names)}")
def __len__(self):
return len(self.object_names)
def get_training_instance(self, index):
obj, seed = random.choice(self.object_names)
num_grasps_choice = random.choice(range(0, self.args.num_grasps + 1))
grasp_choices = [i for i in range(50)]
random.shuffle(grasp_choices)
grasps = grasp_choices[:num_grasps_choice]
return obj, grasps
def get_validation_instance(self, index):
obj, seed = self.object_names[index]
grasp_choices = [i for i in range(50)]
if self.args.val_grasps >= 0 and self.args.eval:
num_grasps_choice = self.args.val_grasps
else:
num_grasps_choice = random.Random(seed).choice(
range(0, self.args.num_grasps + 1)
)
random.Random(seed).shuffle(grasp_choices)
grasps = grasp_choices[:num_grasps_choice]
return obj, grasps
# load object point cloud
def get_points(self, obj):
point_location = os.path.join(POINT_CLOUD_LOCATION, obj + ".npy")
samples = np.load(point_location)
np.random.shuffle(samples)
gt_points = torch.FloatTensor(samples[: self.args.number_points])
return gt_points
# load image of object
def get_image(self, obj):
img = torch.empty((1))
if self.args.use_img:
img_location = os.path.join(IMAGE_LOCATION, obj + ".npy")
img = torch.FloatTensor(np.load(img_location)).permute(2, 0, 1) / 255.0
return torch.FloatTensor(img)
# load touch infomation from the object
def get_touch_info(self, obj, grasps):
touch_charts = torch.ones((1))
if self.args.use_touch:
remaining = self.args.num_grasps - len(grasps)
all_touch_charts = torch.FloatTensor(
np.load(TOUCH_LOCATION + obj + "/touch_charts.npy")
).view(50, 4, 25, 4)
if self.args.finger:
touch_charts = all_touch_charts[grasps][:, 1]
touch_charts = torch.cat((touch_charts, torch.zeros(remaining, 25, 4)))
else:
touch_charts = all_touch_charts[grasps]
touch_charts = torch.cat(
(touch_charts, torch.zeros(remaining, 4, 25, 4))
)
return touch_charts
def __getitem__(self, index):
obj, grasps = self.get_instance(index)
data = {}
# meta data
data["names"] = OBJ_LOCATION + obj, grasps
# load sampled ground truth points
data["gt_points"] = self.get_points(obj)
# load images
data["img"] = self.get_image(obj)
# get touch information
data["touch_charts"] = self.get_touch_info(obj, grasps)
return data
def collate(self, batch):
data = {}
data["names"] = [item["names"] for item in batch]
data["gt_points"] = torch.cat(
[item["gt_points"].unsqueeze(0) for item in batch]
)
data["img"] = torch.cat([item["img"].unsqueeze(0) for item in batch])
data["touch_charts"] = torch.cat(
[item["touch_charts"].unsqueeze(0) for item in batch]
)
return data
# class used for obtaining an instance of the dataset for training chart deformation
# to be passed to a pytorch dataloader
class mesh_loader_active(object):
def __init__(self, args, set_type="RL_train"):
# initialization of data locations
self.args = args
self.set_type = set_type
object_names = [
f.split("/")[-1].split(".")[0] for f in glob(f"{IMAGE_LOCATION}/*.npy")
]
self.object_names = []
# for debuggin use less data
if args.limit_data:
random.Random(0).shuffle(object_names)
object_names = object_names[:400]
for n in tqdm(object_names):
if os.path.exists(POINT_CLOUD_LOCATION + n + ".npy"):
if n in DATA_SPLIT[self.set_type]:
self.object_names.append(n)
print(f"The number of {set_type} set objects found : {len(self.object_names)}")
def __len__(self):
return (
len(self.object_names) // self.args.env_batch_size
) * self.args.env_batch_size
def get_instance(self, index):
obj = self.object_names[index]
num_grasps_choice = random.choice(range(0, self.args.num_grasps + 1))
grasp_choices = [i for i in range(50)]
random.shuffle(grasp_choices)
grasps = grasp_choices[:num_grasps_choice]
return obj, grasps
# load object point cloud
def get_points(self, obj):
point_location = os.path.join(POINT_CLOUD_LOCATION, obj + ".npy")
samples = np.load(point_location)
np.random.shuffle(samples)
gt_points = torch.FloatTensor(samples[: self.args.number_points])
return gt_points
# load image of object
def get_image(self, obj):
img = torch.empty((1))
if self.args.use_img:
img_location = os.path.join(IMAGE_LOCATION, obj + ".npy")
img = torch.FloatTensor(np.load(img_location)).permute(2, 0, 1) / 255.0
return torch.FloatTensor(img)
def __getitem__(self, index):
obj = self.object_names[index]
data = {}
# meta data
data["names"] = OBJ_LOCATION + obj
# load sampled ground truth points
data["gt_points"] = self.get_points(obj)
# load images
data["img"] = self.get_image(obj)
return data
def collate(self, batch):
data = {}
data["names"] = [item["names"] for item in batch]
data["gt_points"] = torch.cat(
[item["gt_points"].unsqueeze(0) for item in batch]
)
data["img"] = torch.cat([item["img"].unsqueeze(0) for item in batch])
return data
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import os
from PIL import Image
import math
import json
import numpy as np
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
import xml.etree.ElementTree as ET
from scipy import ndimage
from collections import namedtuple
from pytorch3d.loss import chamfer_distance as cuda_cd
from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals
from pytorch3d.ops.sample_points_from_meshes import _rand_barycentric_coords
from pytorch3d.io.obj_io import load_obj, save_obj
from pterotactyl.utility import pretty_render
import pterotactyl.objects as objects
def load_mesh_vision(args, obj):
# load obj file
verts, faces = load_mesh_touch(obj)
# get adjacency matrix infomation
adj_info = adj_init(verts, faces, args)
return adj_info, verts
# set seeds for consistency
def set_seeds(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
# normalizes symetric, binary adj matrix such that sum of each row is 1
def normalize_adj(mx):
rowsum = mx.sum(1)
r_inv = (1. / rowsum).view(-1)
r_inv[r_inv != r_inv] = 0.
mx = torch.mm(torch.eye(r_inv.shape[0]).to(mx.device) * r_inv, mx)
return mx
# defines the adjacecny matrix for an object
def adj_init(verts, faces, args):
# get generic adjacency matrix for vision charts
adj = calc_adj(faces)
adj_info = {}
adj_info['origional'] = normalize_adj(adj.clone())
# this combines the adjacency information of touch and vision charts
# the output adj matrix has the first k rows corresponding to vision charts, and the last |V| - k
# corresponding to touch charts. Similarly the first l faces are correspond to vision charts, and the
# remaining correspond to touch charts
if args.use_touch:
adj, faces = adj_fuse_touch(verts, faces, adj, args)
adj = normalize_adj(adj)
adj_info['adj'] = adj
adj_info['faces'] = faces
return adj_info
# combines graph for vision and touch charts to define a fused adjacency matrix
def adj_fuse_touch(verts, faces, adj, args):
verts = verts.data.cpu().numpy()
hash = {}
number_of_grasps = args.num_grasps
# find vertices which have the same 3D position
for e, v in enumerate(verts):
if v.tobytes() in hash:
hash[v.tobytes()].append(e)
else:
hash[v.tobytes()] = [e]
# load object information for generic touch chart
if args.use_touch:
chart_location = os.path.join(
os.path.dirname(objects.__file__), "touch_chart.obj"
)
sheet_verts, sheet_faces = load_mesh_touch(chart_location)
sheet_adj = calc_adj(sheet_faces)
# central vertex for each touch chart that will communicate with all vision charts
central_point = 4
fingers = 1 if args.finger else 4
central_points = [central_point + (i * sheet_adj.shape[0]) + adj.shape[0] for i in
range(fingers * number_of_grasps)]
# define and fill new adjacency matrix with vision and touch charts
new_dim = adj.shape[0] + (fingers * number_of_grasps * sheet_adj.shape[0])
new_adj = torch.zeros((new_dim, new_dim)).cuda()
new_adj[: adj.shape[0], :adj.shape[0]] = adj.clone()
for i in range(fingers * number_of_grasps):
start = adj.shape[0] + (sheet_adj.shape[0] * i)
end = adj.shape[0] + (sheet_adj.shape[0] * (i + 1))
new_adj[start: end, start:end] = sheet_adj.clone()
adj = new_adj
# define new faces with vision and touch charts
all_faces = [faces]
for i in range(fingers * number_of_grasps):
temp_sheet_faces = sheet_faces.clone() + verts.shape[0]
temp_sheet_faces += i * sheet_verts.shape[0]
all_faces.append(temp_sheet_faces)
faces = torch.cat(all_faces)
# update adjacency matrix to allow communication between vision and touch charts
for key in hash.keys():
cur_verts = hash[key]
if len(cur_verts) > 1:
for v1 in cur_verts:
for v2 in cur_verts: # vertices on the boundary of vision charts can communicate
adj[v1, v2] = 1
if args.use_touch:
for c in central_points: # touch and vision charts can communicate
adj[v1, c] = 1
adj[c, v1] = 1
return adj, faces
# computes adjacemcy matrix from face information
def calc_adj(faces):
v1 = faces[:, 0]
v2 = faces[:, 1]
v3 = faces[:, 2]
num_verts = int(faces.max())
adj = torch.eye(num_verts + 1).to(faces.device)
adj[(v1, v2)] = 1
adj[(v1, v3)] = 1
adj[(v2, v1)] = 1
adj[(v2, v3)] = 1
adj[(v3, v1)] = 1
adj[(v3, v2)] = 1
return adj
# sample points from a batch of meshes
def batch_sample(verts, faces, num=10000):
# Pytorch3D based code
bs = verts.shape[0]
face_dim = faces.shape[0]
vert_dim = verts.shape[1]
# following pytorch3D convention shift faces to correctly index flatten vertices
F = faces.unsqueeze(0).repeat(bs, 1, 1)
F += vert_dim * torch.arange(0, bs).unsqueeze(-1).unsqueeze(-1).to(F.device)
# flatten vertices and faces
F = F.reshape(-1, 3)
V = verts.reshape(-1, 3)
with torch.no_grad():
areas, _ = mesh_face_areas_normals(V, F)
Ar = areas.reshape(bs, -1)
Ar[Ar != Ar] = 0
Ar = torch.abs(Ar / Ar.sum(1).unsqueeze(1))
Ar[Ar != Ar] = 1
sample_face_idxs = Ar.multinomial(num, replacement=True)
sample_face_idxs += face_dim * torch.arange(0, bs).unsqueeze(-1).to(Ar.device)
# Get the vertex coordinates of the sampled faces.
face_verts = V[F]
v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2]
# Randomly generate barycentric coords.
w0, w1, w2 = _rand_barycentric_coords(bs, num, V.dtype, V.device)
# Use the barycentric coords to get a point on each sampled face.
A = v0[sample_face_idxs] # (N, num_samples, 3)
B = v1[sample_face_idxs]
C = v2[sample_face_idxs]
samples = w0[:, :, None] * A + w1[:, :, None] * B + w2[:, :, None] * C
return samples
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# loads the initial mesh and returns vertex, and face information
def load_mesh_touch(obj):
obj_info = load_obj(obj)
verts = obj_info[0]
faces = obj_info[1].verts_idx
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(faces).cuda()
return verts, faces
# returns the chamfer distance between a mesh and a point cloud
def chamfer_distance(verts, faces, gt_points, num=1000, repeat=3):
pred_points= batch_sample(verts, faces, num=num)
cd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
if repeat > 1:
cds = [cd]
for i in range(repeat - 1):
pred_points = batch_sample(verts, faces, num=num)
cd, _ = cuda_cd(pred_points, gt_points, batch_reduction=None)
cds.append(cd)
cds = torch.stack(cds)
cd = cds.mean(dim=0)
return cd
# saves a point cloud as a .obj file
def save_points(file, points):
location = f'{file}.obj'
try:
write_obj(location, points.data.cpu().numpy(), [])
except:
write_obj(location, points, [])
# converts a voxel object to a point cloud
def extract_surface(voxel):
conv_filter = torch.ones((1, 1, 3, 3, 3)).cuda()
local_occupancy = F.conv3d(voxel.unsqueeze(
0).unsqueeze(0), conv_filter, padding=1)
local_occupancy = local_occupancy.squeeze(0).squeeze(0)
# only elements with exposed faces
surface_positions = (local_occupancy < 27) * (local_occupancy > 0)
points = torch.where(surface_positions)
points = torch.stack(points)
points = points.permute(1, 0)
return points.type(torch.cuda.FloatTensor)
# saves a mesh as an .obj file
def write_obj(filename, verts, faces):
""" write the verts and faces on file."""
with open(filename, 'w') as f:
# write vertices
f.write('g\n# %d vertex\n' % len(verts))
for vert in verts:
f.write('v %f %f %f\n' % tuple(vert))
# write faces
f.write('# %d faces\n' % len(faces))
for face in faces:
f.write('f %d %d %d\n' % tuple(face))
# makes the sphere of actions
class get_circle(object):
def __init__(self, num_points, rank=0):
action_position = []
a = 4 * np.pi / float(num_points)
d = math.sqrt(a)
M_t = round(np.pi / d)
d_t = np.pi / M_t
d_phi = a / d_t
sphere_positions = []
for i in range(0, M_t):
theta = np.pi * (i + .5) / M_t
M_phi = round(2 * np.pi * math.sin(theta) / d_phi)
for j in range(0, M_phi):
phi = 2 * np.pi * j / M_phi
point = self.get_point(theta, phi)
sphere_positions.append([theta, phi])
action_position.append(point)
self.points = torch.stack(action_position)
self.sphere_points = sphere_positions
if num_points != self.points.shape[0]:
print(f' we have {self.points.shape} points but want {num_points}')
exit()
def get_point(self, a, b):
x = math.sin(a) * math.cos(b)
y = math.sin(a) * math.sin(b)
z = math.cos(a)
return torch.FloatTensor([x, y, z])
# get the normal of a 3D traingle
def normal_from_triangle(a, b, c):
A = b - a
B = c - a
normal = np.cross(A, B)
normal = normalize_vector(normal.reshape(1, 1, 3))
return normal.reshape(3)
# normalizes a vector
def normalize_vector(vector):
n = np.linalg.norm(vector, axis=2)
vector[:, :, 0] /= n
vector[:, :, 1] /= n
vector[:, :, 2] /= n
return vector
# combines 2 3D rotations and converts to a quaternion
def quats_from_vectors(vec1, vec2):
vec1 = np.array(vec1)
vec2 = np.array(vec2)
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
if s == 0:
s = 1
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
quat = R.from_matrix(rotation_matrix).as_quat()
return quat
# combines two quaternions
def combine_quats(q1, q2):
r1 = R.from_quat(q1).as_matrix()
r2 = R.from_quat(q2).as_matrix()
new_q = R.from_matrix(np.matmul(r1, r2)).as_quat()
return new_q
# converts a euler rotation to a rotation matrix
def euler2matrix(angles=[0, 0, 0], translation=[0, 0, 0], xyz="xyz", degrees=False):
r = R.from_euler(xyz, angles, degrees=degrees)
pose = np.eye(4)
pose[:3, 3] = translation
pose[:3, :3] = r.as_matrix()
return pose
# adds redundent faces
def add_faces(faces):
f1 = np.array(faces[:, 0]).reshape(-1, 1)
f2 = np.array(faces[:, 1]).reshape(-1, 1)
f3 = np.array(faces[:, 2]).reshape(-1, 1)
faces_2 = np.concatenate((f1, f3, f2), axis=-1)
faces_3 = np.concatenate((f3, f2, f1), axis=-1)
faces = np.concatenate((faces, faces_2, faces_3), axis=0)
return faces
# centers a pointcloud and scales to defined size
def scale_points(points, scale = 1.):
for i in range(3):
points[:,i] -= points[:,i].min()
points = points / points.max()
points = points / scale
for i in range(3):
verts_range = points[:, i].max()
points[:, i] -= verts_range / 2.
return points
# makes a urdf file pointing to a mesh
def make_urdf(verts, faces, urdf_location):
obj_location = urdf_location.replace('.urdf', '.obj')
faces = add_faces(faces)
save_obj(obj_location, torch.FloatTensor(verts), torch.LongTensor(faces), 4)
blank_location = os.path.join(os.path.dirname(objects.__file__), 'blank.urdf')
tree = ET.parse(blank_location)
root = tree.getroot()
root.attrib['name'] = 'object.urdf'
root[0][2][1][0].attrib['filename'] = obj_location
root[0][3][1][0].attrib['filename'] = obj_location
tree.write(urdf_location)
# loads a obj file and scales it
def get_obj_data(obj_location, scale = 1.):
obj_info = load_obj(obj_location)
verts = obj_info[0].data.numpy()
verts = scale_points(verts, scale)
faces = obj_info[1].verts_idx.data.numpy()
return verts, faces
# converts a mesh to a voxel array by subdeviding the mesh
def mesh_to_voxel(verts, faces, resolution):
# maximum side lentghs of the subdevided triangles
smallest_side = (1. / resolution) ** 2
# center the mesh and scales to unit
verts_max = verts.max()
verts_min = verts.min()
verts = (verts - verts_min) / (verts_max - verts_min) - 0.5
# get all of the mesh triangles
faces = faces.clone()
v1 = torch.index_select(verts, 0, faces[:, 0])
v2 = torch.index_select(verts, 0, faces[:, 1])
v3 = torch.index_select(verts, 0, faces[:, 2])
# defined points as swt of all vertices
points = torch.cat((v1, v2, v3))
while True:
# get maximum side length of all traingles
side_1 = (torch.abs(v1 - v2) ** 2).sum(dim=1).unsqueeze(1)
side_2 = (torch.abs(v2 - v3) ** 2).sum(dim=1).unsqueeze(1)
side_3 = (torch.abs(v3 - v1) ** 2).sum(dim=1).unsqueeze(1)
sides = torch.cat((side_1, side_2, side_3), dim=1)
sides = sides.max(dim=1)[0]
# identify triangles which are small enough
keep = sides > smallest_side
if keep.sum() == 0:
break
# remove triangles which are small enough
v1 = v1[keep]
v2 = v2[keep]
v3 = v3[keep]
v4 = (v1 + v3) / 2.
v5 = (v1 + v2) / 2.
v6 = (v2 + v3) / 2.
del (side_1, side_2, side_3, keep, sides)
# add new vertices to set of points
points = torch.cat((points, v4, v5, v6))
# add subdevided traingles to list of triagnles
vertex_set = [v1, v2, v3, v4, v5, v6]
new_traingles = [[0, 3, 4], [4, 1, 5], [4, 3, 5], [3, 2, 5]]
new_verts = []
for i in range(4):
for j in range(3):
if i == 0:
new_verts.append(vertex_set[new_traingles[i][j]])
else:
new_verts[j] = torch.cat(
(new_verts[j], vertex_set[new_traingles[i][j]]))
v1, v2, v3 = new_verts
del (v4, v5, v6, vertex_set, new_verts)
del (v1, v2, v3)
if points is None:
return None
# scales points
points = ((points + .5) * (resolution - 1)).long()
points = torch.split(points.permute(1, 0), 1, dim=0)
points = [m.unsqueeze(0) for m in points]
# set grid points to on if a point exists inside them
voxel = torch.zeros((resolution, resolution, resolution)).cuda()
voxel[points] = 1
return voxel
# converts a voxel grid to a pointcloud
def voxel_to_pointcloud(voxel):
voxel = voxel.float()
off_positions = voxel == 0
conv_filter = torch.ones((1, 1, 3, 3, 3))
surface_voxel = torch.zeros(voxel.shape).cuda()
conv_filter = conv_filter.cuda()
local_occupancy = F.conv3d(voxel.unsqueeze(0).unsqueeze(0), conv_filter, padding=1)
local_occupancy = local_occupancy.squeeze(0).squeeze(0)
surface_positions = (local_occupancy < 27) * (local_occupancy > 0)
surface_voxel[surface_positions] = 1
surface_voxel[off_positions] = 0
points = torch.where(surface_voxel != 0)
points = torch.stack(points).permute(1, 0).float()
return points
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
def extract_ODMs(voxels):
voxels = voxels.data.cpu().numpy()
dim = voxels.shape[0]
a, b, c = np.where(voxels == 1)
large = int(dim * 1.5)
big_list = [[[[-1, large] for j in range(dim)] for i in range(dim)] for k in range(3)]
# over the whole object extract for each face the first and last occurance of a voxel at each pixel
# we take highest for convinience
for i, j, k in zip(a, b, c):
big_list[0][i][j][0] = (max(k, big_list[0][i][j][0]))
big_list[0][i][j][1] = (min(k, big_list[0][i][j][1]))
big_list[1][i][k][0] = (max(j, big_list[1][i][k][0]))
big_list[1][i][k][1] = (min(j, big_list[1][i][k][1]))
big_list[2][j][k][0] = (max(i, big_list[2][j][k][0]))
big_list[2][j][k][1] = (min(i, big_list[2][j][k][1]))
ODMs = np.zeros((6, dim, dim)) # will hold odms
for i in range(dim):
for j in range(dim):
ODMs[0, i, j] = dim - 1 - big_list[0][i][j][0] if big_list[0][i][j][0] > -1 else dim
ODMs[1, i, j] = big_list[0][i][j][1] if big_list[0][i][j][1] < large else dim
ODMs[2, i, j] = dim - 1 - big_list[1][i][j][0] if big_list[1][i][j][0] > -1 else dim
ODMs[3, i, j] = big_list[1][i][j][1] if big_list[1][i][j][1] < large else dim
ODMs[4, i, j] = dim - 1 - big_list[2][i][j][0] if big_list[2][i][j][0] > -1 else dim
ODMs[5, i, j] = big_list[2][i][j][1] if big_list[2][i][j][1] < large else dim
return ODMs
# implemented from:
# https://github.com/EdwardSmith1884/GEOMetrics/blob/master/utils.py
# MIT License
# use orthographic depth maps to do space carving
def apply_ODMs(ODMs, dim):
voxel = np.ones((dim, dim, dim))
a, b, c = np.where(ODMs > 0)
for x, i, j in zip(a, b, c):
pos = int(ODMs[x, i, j])
if x == 0:
voxel[i, j, -pos:] = 0
if x == 1:
voxel[i, j, :pos] = 0
if x == 2:
voxel[i, -pos:, j] = 0
if x == 3:
voxel[i, :pos, j] = 0
if x == 4:
voxel[-pos:, i, j] = 0
if x == 5:
voxel[:pos, i, j] = 0
voxel[ndimage.binary_fill_holes(voxel)] = 1
return torch.LongTensor(voxel).cuda()
# aligns a pointcloud to the size of a mesh
def realign_points(points, verts):
points = points.float()
verts = verts
for i in range(3):
points[:, i] = points[:, i] - ((points[:, i].max() + points[:, i].min()) / 2.)
v_range = verts[:, i].max() - verts[:, i].min()
p_range = points[:, i].max() + 1 - points[:, i].min()
points[:, i] = points[:, i] * v_range / p_range
return points
# saves arguments for a experiment
def save_config(location, args):
abs_path = os.path.abspath(location)
args = vars(args)
args['check_point'] = abs_path
config_location = f'{location}/config.json'
with open(config_location, 'w') as fp:
json.dump(args, fp, indent=4)
return config_location
# loads arguments from an experiment and the model weights
def load_model_config(location):
config_location = f'{location}/config.json'
with open(config_location) as json_file:
data = json.load(json_file)
weight_location = data['check_point'] + '/model'
args = namedtuple("ObjectName", data.keys())(*data.values())
return args, weight_location
# for nicely visualizing dpeth images
def visualize_depth(depth, max_depth=0.025):
depth[depth > max_depth] = 0
depth = 255 * (depth / max_depth)
depth = depth.astype(np.uint8)
return depth
# visualize the actions used by the policy
def visualize_actions(location, actions, args):
actions = actions.view(-1).long().data.cpu().numpy()
circle = get_circle(args.num_actions)
plt.hist(actions, bins=np.arange(0, args.num_actions+ 1 ))
plt.title("actions histogram")
plt.savefig(location + '/histogram.png')
plt.close()
array = np.zeros([args.num_actions * 2, args.num_actions * 4, 3])
for i in range(args.num_actions):
x, y, z = circle.points[i]
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
for i in range(3):
for j in range(3):
array[x_co - 1 + i, y_co - 1 + j] += 1.
for a in actions:
x, y, z = circle.points[a]
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
for i in range(3):
for j in range(3):
array[x_co - 1 + i, y_co - 1 + j] += 1.
array = array * 255. / array.max()
if args.use_img:
visible_location = os.path.join(
os.path.dirname(objects.__file__), "visible.obj"
)
seen_points = np.array(load_obj(visible_location)[0])
seen_points = seen_points / np.sqrt(((seen_points ** 2).sum(axis=1))).reshape(-1, 1)
for point in seen_points:
x, y, z = point
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
for i in range(5):
for j in range(5):
if array[x_co - 2 + i, y_co - 2 + j].sum() == 0:
array[x_co - 2 + i, y_co - 2 + j] = (255, 127, 80)
array[np.all(array == (0, 0, 0), axis=-1)] = (0, 204, 204)
check_array = np.zeros([args.num_actions * 2, args.num_actions * 4])
for point in seen_points:
x, y, z = point
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
for i in range(3):
for j in range(3):
check_array[x_co - 1 + i, y_co - 1 + j] = 100
on = 0.
off = 0.
for a in actions:
x, y, z = circle.points[a]
x = math.atan2(-x, y);
x = (x + np.pi / 2.0) / (np.pi * 2.0) + np.pi * (28.670 / 360.0);
y = math.acos(z) / np.pi;
x_co = int(y * args.num_actions * 12 / (2 * np.pi))
y_co = int(x * args.num_actions * 24 / (2 * np.pi))
if check_array[x_co, y_co] > 0:
on += 1
else:
off += 1
print(f'percentage in vision is {on * 100 / (on+off):.2f} % for policy')
else:
array[np.all(array == (0, 0, 0), axis=-1)] = (0, 204, 204)
array = array.astype(np.uint8)
Image.fromarray(array).save(location + '/sphere_projection.png')
# visualize the actions used by the policy
def visualize_prediction(location, meshes, faces, names):
data = {}
meshes = meshes.data.cpu().numpy()
faces = faces.data.cpu().numpy()
locations = []
for n in names:
n = '/'+ n.split('/')[-1] + '/'
locations.append(location + n)
if not os.path.exists(locations[-1]):
os.makedirs(locations[-1])
data['locations'] = locations
pretty_render.render_representations(locations, names, meshes, faces)
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
os.environ["PYOPENGL_PLATFORM"] = "egl"
import numpy as np
import trimesh
from scipy.spatial.transform import Rotation as R
import pyrender
from PIL import Image
import torch
from tqdm.contrib import tzip
from pterotactyl.utility import utils
class CameraRenderer:
def __init__(self, cameraResolution=[512, 512]):
self.W = cameraResolution[0]
self.H = cameraResolution[1]
self._init_pyrender()
def _init_pyrender(self):
self.scene = self._init_scene()
self.objectNodes = []
self.handNodes = []
self._init_camera()
self.r = pyrender.OffscreenRenderer(self.W, self.H)
def _init_scene(self):
scene = pyrender.Scene(ambient_light=[0.3, 0.3, 0.3])
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, -0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.8)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[0, 0.8, 0.3], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.8)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[-1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.8)
scene.add(light, pose=light_pose)
light_pose = utils.euler2matrix(
angles=[0, 0, 0], translation=[1, 0, 1], xyz="xyz", degrees=False
)
light = pyrender.PointLight(color=np.ones(3), intensity=1.8)
scene.add(light, pose=light_pose)
return scene
def _init_camera(self):
camera = pyrender.PerspectiveCamera(
yfov=60.0 / 180.0 * np.pi, znear=0.01, zfar=10.0, aspectRatio=1.0
)
cameraPose0 = utils.euler2matrix(
xyz="xyz", angles=[0, 0, 0], translation=[0, 0, 0], degrees=True
)
# Add camera node into scene
cameraNode = pyrender.Node(camera=camera, matrix=cameraPose0)
self.scene.add_node(cameraNode)
self.scene.main_camera_node = cameraNode
self.camera = cameraNode
initial_matrix = R.from_euler("xyz", [45.0, 0, 180.0], degrees=True).as_matrix()
self.update_camera_pose([0, 0.6, 0.6], initial_matrix)
def update_camera_pose(self, position, orientation):
"""
Update digit pose (including camera, lighting, and gel surface)
"""
pose = np.eye(4)
pose[:3, 3] = position
pose[:3, :3] = orientation
self.camera.matrix = pose
def add_object(self, objTrimesh, position=[0, 0, 0], orientation=[0, 0, 0]):
mesh = pyrender.Mesh.from_trimesh(objTrimesh)
pose = utils.euler2matrix(angles=orientation, translation=position)
objNode = pyrender.Node(mesh=mesh, matrix=pose)
self.scene.add_node(objNode)
self.objectNodes.append(objNode)
def add_points(self, points, radius, colour=[0, 0, 0]):
sm = trimesh.creation.uv_sphere(radius=radius)
sm.visual.vertex_colors = colour
tfs = np.tile(np.eye(4), (points.shape[0], 1, 1))
tfs[:, :3, 3] = points
m = pyrender.Mesh.from_trimesh(sm, poses=tfs)
objNode = pyrender.Node(mesh=m)
self.scene.add_node(objNode)
self.objectNodes.append(objNode)
def remove_objects(self):
for obj in self.objectNodes:
self.scene.remove_node(obj)
self.objectNodes = []
def render(self):
colour, depth = self.r.render(self.scene)
colour = np.clip((np.array(colour)), 0, 255).astype(np.uint8)
colour = Image.fromarray(colour)
return colour
# renders the predicted mesh along with the ground truth mesh
def render_representations(locations, names, meshes, faces):
recon_face = utils.add_faces(faces)
scene = CameraRenderer()
message = "rendering the predicted objects"
print("*" * len(message))
print(message)
print("*" * len(message))
for verts, name, location in tzip(meshes, names, locations):
###### render mesh #######
mesh = trimesh.Trimesh(verts, recon_face)
mesh.visual.vertex_colors = [228, 217, 111, 255]
scene.add_object(mesh)
img = scene.render()
img.save(f"{location}/mesh.png")
scene.remove_objects()
##### render point clouds #######
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(recon_face).cuda()
points = (
utils.batch_sample(verts.unsqueeze(0), faces, num=100000)[0]
.data.cpu()
.numpy()
)
scene.add_points(points, 0.01, [228, 217, 111])
img = scene.render()
img.save(f"{location}/points.png")
scene.remove_objects()
######## render real object #########
verts = np.load(name + "_verts.npy")
faces = np.load(name + "_faces.npy")
faces = utils.add_faces(faces)
mesh = trimesh.Trimesh(vertices=verts, faces=faces, process=False)
mesh.visual.vertex_colors = [228, 217, 111, 255]
scene.add_object(mesh)
img = scene.render()
img.save(f"{location}/ground_truth.png")
scene.remove_objects()
|
#Copyright (c) Facebook, Inc. and its affiliates.
#All rights reserved.
#This source code is licensed under the license found in the
#LICENSE file in the root directory of this source tree.
import os
import numpy as np
from tqdm import tqdm
from glob import glob
import random
from pathlib import Path
import torch
import pterotactyl.object_data as object_data
import pterotactyl.objects as objects
from pterotactyl.utility import utils
from pterotactyl.simulator.scene import sampler
from pterotactyl.simulator.physics import grasping
def make_data_split():
data_location = os.path.join(
os.path.dirname(object_data.__file__), "initial_objects/*"
)
split_destination = os.path.join(
os.path.dirname(objects.__file__), "data_split.npy"
)
object_files = glob(data_location)
object_files = [o.split("/")[-1].split(".")[0] for o in object_files]
object_files.sort()
random.Random(0).shuffle(object_files)
recon_train = object_files[:7700]
auto_train = object_files[7700 : 2 * 7700]
RL_train = object_files[2 * 7700 : 3 * 7700]
valid = object_files[3 * 7700 : 3 * 7700 + 2000]
test = object_files[3 * 7700 + 2000 : 3 * 7700 + 3000]
dict = {
"recon_train": recon_train,
"auto_train": auto_train,
"RL_train": RL_train,
"valid": valid,
"test": test,
}
np.save(split_destination, dict)
# produces a pointcloud from the surface of an object
def extract_points(verts, faces, dim=128, num_points=30000):
verts = torch.FloatTensor(verts).cuda()
faces = torch.LongTensor(faces).cuda()
# converts the mesh to a voxel grid
voxel = utils.mesh_to_voxel(verts, faces, dim)
if voxel is None:
return None
# extracts orthographic depth maps from the voxel grid
ODMs = utils.extract_ODMs(voxel)
# reprojects the depth maps to a voxel grid to remove internal structure
voxel = utils.apply_ODMs(ODMs, dim)
# extracts a point cloud from the voxel grid
points = utils.voxel_to_pointcloud(voxel)
# aligns the pointcloud to the origional mesh
points = utils.realign_points(points, verts.clone())
# make the point cloud of uniform size
while points.shape[0] < num_points:
points = torch.cat((points, points))
choices = np.random.choice(points.shape[0], num_points, replace=False)
points = points[choices]
return points
# extract the object information from mesh
def save_object_info():
data_location = os.path.join(
os.path.dirname(object_data.__file__), "initial_objects/*"
)
data_destination = os.path.join(
os.path.dirname(object_data.__file__), "object_info/"
)
if not os.path.exists(data_destination):
os.makedirs(data_destination)
object_files = glob(data_location)
pbar = tqdm(object_files, smoothing=0.0)
pbar.set_description(f"Saving object information for quick loading")
for file in pbar:
file_destination = data_destination + file.split("/")[-1].split(".")[0]
# scale meshes and extract vertices and faces
verts, faces = utils.get_obj_data(file, scale=3.1)
np.save(file_destination + "_verts.npy", verts)
np.save(file_destination + "_faces.npy", faces)
# save the new object as a mesh and reference it in a urdf file for pybullet
utils.make_urdf(verts, faces, file_destination + ".urdf")
# extracts a point cloud from the object and saves it
def save_point_info():
data_location = os.path.join(
os.path.dirname(object_data.__file__), "object_info/*.obj"
)
data_destination = os.path.join(os.path.dirname(object_data.__file__), "/")
if not os.path.exists(data_destination):
os.makedirs(data_destination)
object_files = glob(data_location)
pbar = tqdm(object_files, smoothing=0.0)
pbar.set_description(f"Extracting surface point cloud")
for file in pbar:
destination = data_destination + file.split("/")[-1].split(".")[0] + ".npy"
verts = np.load(file.replace(".obj", "_verts.npy"))
faces = np.load(file.replace(".obj", "_faces.npy"))
# extract the point cloud
points = extract_points(verts, faces)
if points is None:
continue
np.save(destination, points.data.cpu().numpy())
# simulates the graps of an object for all possible actions
def save_simulation():
data_location = os.path.join(
os.path.dirname(object_data.__file__), "object_info/*.obj"
)
grasp_destination_dir = os.path.join(
os.path.dirname(object_data.__file__), "grasp_info/"
)
image_destination_dir = os.path.join(
os.path.dirname(object_data.__file__), "images_colourful/"
)
if not os.path.exists(grasp_destination_dir):
os.makedirs(grasp_destination_dir)
if not os.path.exists(image_destination_dir):
os.makedirs(image_destination_dir)
object_files = glob(data_location)
simulation_infomation = {}
# defines the sampling function for simulation
s = sampler.Sampler(grasping.Agnostic_Grasp, bs=1, vision=True)
pbar = tqdm(object_files, smoothing=0.0)
pbar.set_description(f"Extracting grasp information")
set = [0, 0, 0, 0]
file_num = 0
for file in pbar:
file_number = file.split("/")[-1].split(".")[0]
grasp_destination = grasp_destination_dir + file_number + "/"
image_destination = image_destination_dir + file_number + ".npy"
batch = [file.replace(".obj", "")]
statuses = []
try:
s.load_objects(batch, from_dataset=True)
except:
continue
# save an image of the object
signals = s.sample(
[0],
touch=False,
touch_point_cloud=False,
vision=True,
vision_occluded=False,
)
img = signals["vision"][0]
np.save(image_destination, img)
for i in range(50):
# simulate the object
signals = s.sample(
[i],
touch=True,
touch_point_cloud=True,
vision=False,
vision_occluded=False,
)
status = signals["touch_status"][0]
good = 0
for k in range(4):
if status[k] == "touch":
good += 1
for k in range(good):
set[k] += 1
statuses.append(status)
# extracts the touch information for each of the 4 fingers
for j in range(4):
instance_grasp_destination = os.path.join(
grasp_destination, str(i), str(j)
)
Path(instance_grasp_destination).mkdir(parents=True, exist_ok=True)
if status[j] == "touch":
touch_signal = (
signals["touch_signal"][0][j].data.numpy().astype(np.uint8)
)
touch_points = signals["touch_point_cloud"][0][j]
np.save(instance_grasp_destination + "_touch.npy", touch_signal)
np.save(instance_grasp_destination + "_points.npy", touch_points)
if status[j] != "no_intersection":
ref_frame_pos = signals["finger_transfrom_pos"][0][j].data.numpy()
ref_frame_rot_M = signals["finger_transform_rot_M"][0][
j
].data.numpy()
ref_frame = {"pos": ref_frame_pos, "rot": ref_frame_rot_M}
np.save(instance_grasp_destination + "_ref_frame.npy", ref_frame)
s.remove_objects()
file_num += 0.5
simulation_infomation[file_number] = statuses
if __name__ == "__main__":
save_object_info()
save_point_info()
save_simulation()
make_data_split()
|
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import argparse
import os
import numpy as np
import torch
from human_body_prior.body_model.body_model import BodyModel
from human_body_prior.tools.rotation_tools import aa2matrot, local2global_pose
from tqdm import tqdm
from utils import utils_transform
def main(args, bm):
for dataroot_subset in ["BioMotionLab_NTroje", "CMU", "MPI_HDM05"]:
print(dataroot_subset)
for phase in ["train", "test"]:
print(phase)
savedir = os.path.join(args.save_dir, dataroot_subset, phase)
if not os.path.exists(savedir):
os.makedirs(savedir)
split_file = os.path.join(
"prepare_data/data_split", dataroot_subset, phase + "_split.txt"
)
with open(split_file, "r") as f:
filepaths = [line.strip() for line in f]
rotation_local_full_gt_list = []
hmd_position_global_full_gt_list = []
body_parms_list = []
head_global_trans_list = []
idx = 0
for filepath in tqdm(filepaths):
data = {}
bdata = np.load(
os.path.join(args.root_dir, filepath), allow_pickle=True
)
if "mocap_framerate" in bdata:
framerate = bdata["mocap_framerate"]
else:
continue
idx += 1
if framerate == 120:
stride = 2
elif framerate == 60:
stride = 1
else:
raise AssertionError(
"Please check your AMASS data, should only have 2 types of framerate, either 120 or 60!!!"
)
bdata_poses = bdata["poses"][::stride, ...]
bdata_trans = bdata["trans"][::stride, ...]
subject_gender = bdata["gender"]
body_parms = {
"root_orient": torch.Tensor(
bdata_poses[:, :3]
), # .to(comp_device), # controls the global root orientation
"pose_body": torch.Tensor(
bdata_poses[:, 3:66]
), # .to(comp_device), # controls the body
"trans": torch.Tensor(
bdata_trans
), # .to(comp_device), # controls the global body position
}
body_parms_list = body_parms
body_pose_world = bm(
**{
k: v
for k, v in body_parms.items()
if k in ["pose_body", "root_orient", "trans"]
}
)
output_aa = torch.Tensor(bdata_poses[:, :66]).reshape(-1, 3)
output_6d = utils_transform.aa2sixd(output_aa).reshape(
bdata_poses.shape[0], -1
)
rotation_local_full_gt_list = output_6d[1:]
rotation_local_matrot = aa2matrot(
torch.tensor(bdata_poses).reshape(-1, 3)
).reshape(bdata_poses.shape[0], -1, 9)
rotation_global_matrot = local2global_pose(
rotation_local_matrot, bm.kintree_table[0].long()
) # rotation of joints relative to the origin
head_rotation_global_matrot = rotation_global_matrot[:, [15], :, :]
rotation_global_6d = utils_transform.matrot2sixd(
rotation_global_matrot.reshape(-1, 3, 3)
).reshape(rotation_global_matrot.shape[0], -1, 6)
input_rotation_global_6d = rotation_global_6d[1:, [15, 20, 21], :]
rotation_velocity_global_matrot = torch.matmul(
torch.inverse(rotation_global_matrot[:-1]),
rotation_global_matrot[1:],
)
rotation_velocity_global_6d = utils_transform.matrot2sixd(
rotation_velocity_global_matrot.reshape(-1, 3, 3)
).reshape(rotation_velocity_global_matrot.shape[0], -1, 6)
input_rotation_velocity_global_6d = rotation_velocity_global_6d[
:, [15, 20, 21], :
]
position_global_full_gt_world = body_pose_world.Jtr[
:, :22, :
] # position of joints relative to the world origin
position_head_world = position_global_full_gt_world[
:, 15, :
] # world position of head
head_global_trans = torch.eye(4).repeat(
position_head_world.shape[0], 1, 1
)
head_global_trans[:, :3, :3] = head_rotation_global_matrot.squeeze()
head_global_trans[:, :3, 3] = position_global_full_gt_world[:, 15, :]
head_global_trans_list = head_global_trans[1:]
num_frames = position_global_full_gt_world.shape[0] - 1
hmd_position_global_full_gt_list = torch.cat(
[
input_rotation_global_6d.reshape(num_frames, -1),
input_rotation_velocity_global_6d.reshape(num_frames, -1),
position_global_full_gt_world[1:, [15, 20, 21], :].reshape(
num_frames, -1
),
position_global_full_gt_world[1:, [15, 20, 21], :].reshape(
num_frames, -1
)
- position_global_full_gt_world[:-1, [15, 20, 21], :].reshape(
num_frames, -1
),
],
dim=-1,
)
data["rotation_local_full_gt_list"] = rotation_local_full_gt_list
data[
"hmd_position_global_full_gt_list"
] = hmd_position_global_full_gt_list
data["body_parms_list"] = body_parms_list
data["head_global_trans_list"] = head_global_trans_list
data["position_global_full_gt_world"] = (
position_global_full_gt_world[1:].cpu().float()
)
data["framerate"] = 60
data["gender"] = subject_gender
data["filepath"] = filepath
torch.save(data, os.path.join(savedir, "{}.pt".format(idx)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--support_dir",
type=str,
default=None,
help="=dir where you put your smplh and dmpls dirs",
)
parser.add_argument(
"--save_dir",
type=str,
default=None,
help="=dir where you want to save your generated data",
)
parser.add_argument(
"--root_dir", type=str, default=None, help="=dir where you put your AMASS data"
)
args = parser.parse_args()
# Here we follow the AvatarPoser paper and use male model for all sequences
bm_fname_male = os.path.join(args.support_dir, "smplh/{}/model.npz".format("male"))
dmpl_fname_male = os.path.join(
args.support_dir, "dmpls/{}/model.npz".format("male")
)
num_betas = 16 # number of body parameters
num_dmpls = 8 # number of DMPL parameters
bm_male = BodyModel(
bm_fname=bm_fname_male,
num_betas=num_betas,
num_dmpls=num_dmpls,
dmpl_fname=dmpl_fname_male,
)
main(args, bm_male)
|
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import math
import os
import random
import numpy as np
import torch
from data_loaders.dataloader import load_data, TestDataset
from human_body_prior.body_model.body_model import BodyModel as BM
from model.networks import PureMLP
from tqdm import tqdm
from utils import utils_transform, utils_visualize
from utils.metrics import get_metric_function
from utils.model_util import create_model_and_diffusion, load_model_wo_clip
from utils.parser_util import sample_args
device = torch.device("cuda")
#####################
RADIANS_TO_DEGREES = 360.0 / (2 * math.pi)
METERS_TO_CENTIMETERS = 100.0
pred_metrics = [
"mpjre",
"mpjpe",
"mpjve",
"handpe",
"upperpe",
"lowerpe",
"rootpe",
"pred_jitter",
]
gt_metrics = [
"gt_jitter",
]
all_metrics = pred_metrics + gt_metrics
RADIANS_TO_DEGREES = 360.0 / (2 * math.pi) # 57.2958 grads
metrics_coeffs = {
"mpjre": RADIANS_TO_DEGREES,
"mpjpe": METERS_TO_CENTIMETERS,
"mpjve": METERS_TO_CENTIMETERS,
"handpe": METERS_TO_CENTIMETERS,
"upperpe": METERS_TO_CENTIMETERS,
"lowerpe": METERS_TO_CENTIMETERS,
"rootpe": METERS_TO_CENTIMETERS,
"pred_jitter": 1.0,
"gt_jitter": 1.0,
"gt_mpjpe": METERS_TO_CENTIMETERS,
"gt_mpjve": METERS_TO_CENTIMETERS,
"gt_handpe": METERS_TO_CENTIMETERS,
"gt_rootpe": METERS_TO_CENTIMETERS,
"gt_upperpe": METERS_TO_CENTIMETERS,
"gt_lowerpe": METERS_TO_CENTIMETERS,
}
#####################
class BodyModel(torch.nn.Module):
def __init__(self, support_dir):
super().__init__()
device = torch.device("cuda")
subject_gender = "male"
bm_fname = os.path.join(
support_dir, "smplh/{}/model.npz".format(subject_gender)
)
dmpl_fname = os.path.join(
support_dir, "dmpls/{}/model.npz".format(subject_gender)
)
num_betas = 16 # number of body parameters
num_dmpls = 8 # number of DMPL parameters
body_model = BM(
bm_fname=bm_fname,
num_betas=num_betas,
num_dmpls=num_dmpls,
dmpl_fname=dmpl_fname,
).to(device)
self.body_model = body_model.eval()
def forward(self, body_params):
with torch.no_grad():
body_pose = self.body_model(
**{
k: v
for k, v in body_params.items()
if k in ["pose_body", "trans", "root_orient"]
}
)
return body_pose
def non_overlapping_test(
args,
data,
sample_fn,
dataset,
model,
num_per_batch=256,
model_type="mlp",
):
gt_data, sparse_original, body_param, head_motion, filename = (
data[0],
data[1],
data[2],
data[3],
data[4],
)
gt_data = gt_data.cuda().float()
sparse_original = sparse_original.cuda().float()
head_motion = head_motion.cuda().float()
num_frames = head_motion.shape[0]
output_samples = []
count = 0
sparse_splits = []
flag_index = None
if args.input_motion_length <= num_frames:
while count < num_frames:
if count + args.input_motion_length > num_frames:
tmp_k = num_frames - args.input_motion_length
sub_sparse = sparse_original[
:, tmp_k : tmp_k + args.input_motion_length
]
flag_index = count - tmp_k
else:
sub_sparse = sparse_original[
:, count : count + args.input_motion_length
]
sparse_splits.append(sub_sparse)
count += args.input_motion_length
else:
flag_index = args.input_motion_length - num_frames
tmp_init = sparse_original[:, :1].repeat(1, flag_index, 1).clone()
sub_sparse = torch.concat([tmp_init, sparse_original], dim=1)
sparse_splits = [sub_sparse]
n_steps = len(sparse_splits) // num_per_batch
if len(sparse_splits) % num_per_batch > 0:
n_steps += 1
# Split the sequence into n_steps non-overlapping batches
if args.fix_noise:
# fix noise seed for every frame
noise = torch.randn(1, 1, 1).cuda()
noise = noise.repeat(1, args.input_motion_length, args.motion_nfeat)
else:
noise = None
for step_index in range(n_steps):
sparse_per_batch = torch.cat(
sparse_splits[
step_index * num_per_batch : (step_index + 1) * num_per_batch
],
dim=0,
)
new_batch_size = sparse_per_batch.shape[0]
if model_type == "diffusion":
sample = sample_fn(
model,
(new_batch_size, args.input_motion_length, args.motion_nfeat),
sparse=sparse_per_batch,
clip_denoised=False,
model_kwargs=None,
skip_timesteps=0,
init_image=None,
progress=False,
dump_steps=None,
noise=noise,
const_noise=False,
)
elif model_type == "mlp":
sample = model(sparse_per_batch)
if flag_index is not None and step_index == n_steps - 1:
last_batch = sample[-1]
last_batch = last_batch[flag_index:]
sample = sample[:-1].reshape(-1, args.motion_nfeat)
sample = torch.cat([sample, last_batch], dim=0)
else:
sample = sample.reshape(-1, args.motion_nfeat)
if not args.no_normalization:
output_samples.append(dataset.inv_transform(sample.cpu().float()))
else:
output_samples.append(sample.cpu().float())
return output_samples, body_param, head_motion, filename
def overlapping_test(
args,
data,
sample_fn,
dataset,
model,
sld_wind_size=70,
model_type="diffusion",
):
assert (
model_type == "diffusion"
), "currently only diffusion model supports overlapping test!!!"
gt_data, sparse_original, body_param, head_motion, filename = (
data[0],
data[1],
data[2],
data[3],
data[4],
)
gt_data = gt_data.cuda().float()
sparse_original = sparse_original.cuda().float()
head_motion = head_motion.cuda().float()
num_frames = head_motion.shape[0]
output_samples = []
count = 0
sparse_splits = []
flag_index = None
if num_frames < args.input_motion_length:
flag_index = args.input_motion_length - num_frames
tmp_init = sparse_original[:, :1].repeat(1, flag_index, 1).clone()
sub_sparse = torch.concat([tmp_init, sparse_original], dim=1)
sparse_splits = [sub_sparse]
else:
while count + args.input_motion_length <= num_frames:
if count == 0:
sub_sparse = sparse_original[
:, count : count + args.input_motion_length
]
tmp_idx = 0
else:
sub_sparse = sparse_original[
:, count : count + args.input_motion_length
]
tmp_idx = args.input_motion_length - sld_wind_size
sparse_splits.append([sub_sparse, tmp_idx])
count += sld_wind_size
if count < num_frames:
sub_sparse = sparse_original[:, -args.input_motion_length :]
tmp_idx = args.input_motion_length - (
num_frames - (count - sld_wind_size + args.input_motion_length)
)
sparse_splits.append([sub_sparse, tmp_idx])
memory = None # init memory
if args.fix_noise:
# fix noise seed for every frame
noise = torch.randn(1, 1, 1).cuda()
noise = noise.repeat(1, args.input_motion_length, args.motion_nfeat)
else:
noise = None
for step_index in range(len(sparse_splits)):
sparse_per_batch = sparse_splits[step_index][0]
memory_end_index = sparse_splits[step_index][1]
new_batch_size = sparse_per_batch.shape[0]
assert new_batch_size == 1
if memory is not None:
model_kwargs = {}
model_kwargs["y"] = {}
model_kwargs["y"]["inpainting_mask"] = torch.zeros(
(
new_batch_size,
args.input_motion_length,
args.motion_nfeat,
)
).cuda()
model_kwargs["y"]["inpainting_mask"][:, :memory_end_index, :] = 1
model_kwargs["y"]["inpainted_motion"] = torch.zeros(
(
new_batch_size,
args.input_motion_length,
args.motion_nfeat,
)
).cuda()
model_kwargs["y"]["inpainted_motion"][:, :memory_end_index, :] = memory[
:, -memory_end_index:, :
]
else:
model_kwargs = None
sample = sample_fn(
model,
(new_batch_size, args.input_motion_length, args.motion_nfeat),
sparse=sparse_per_batch,
clip_denoised=False,
model_kwargs=None,
skip_timesteps=0,
init_image=None,
progress=False,
dump_steps=None,
noise=noise,
const_noise=False,
)
memory = sample.clone().detach()
if flag_index is not None:
sample = sample[:, flag_index:].cpu().reshape(-1, args.motion_nfeat)
else:
sample = sample[:, memory_end_index:].reshape(-1, args.motion_nfeat)
if not args.no_normalization:
output_samples.append(dataset.inv_transform(sample.cpu().float()))
else:
output_samples.append(sample.cpu().float())
return output_samples, body_param, head_motion, filename
def evaluate_prediction(
args,
metrics,
sample,
body_model,
sample_index,
head_motion,
body_param,
fps,
filename,
):
motion_pred = sample.squeeze().cuda()
# Get the prediction from the model
model_rot_input = (
utils_transform.sixd2aa(motion_pred.reshape(-1, 6).detach())
.reshape(motion_pred.shape[0], -1)
.float()
)
T_head2world = head_motion.clone().cuda()
t_head2world = T_head2world[:, :3, 3].clone()
# Get the offset between the head and other joints using forward kinematic model
body_pose_local = body_model(
{
"pose_body": model_rot_input[..., 3:66],
"root_orient": model_rot_input[..., :3],
}
).Jtr
# Get the offset in global coordiante system between head and body_world.
t_head2root = -body_pose_local[:, 15, :]
t_root2world = t_head2root + t_head2world.cuda()
predicted_body = body_model(
{
"pose_body": model_rot_input[..., 3:66],
"root_orient": model_rot_input[..., :3],
"trans": t_root2world,
}
)
predicted_position = predicted_body.Jtr[:, :22, :]
# Get the predicted position and rotation
predicted_angle = model_rot_input
for k, v in body_param.items():
body_param[k] = v.squeeze().cuda()
body_param[k] = body_param[k][-predicted_angle.shape[0] :, ...]
# Get the ground truth position from the model
gt_body = body_model(body_param)
gt_position = gt_body.Jtr[:, :22, :]
# Create animation
if args.vis:
video_dir = args.output_dir
if not os.path.exists(video_dir):
os.makedirs(video_dir)
save_filename = filename.split(".")[0].replace("/", "-")
save_video_path = os.path.join(video_dir, save_filename + ".mp4")
utils_visualize.save_animation(
body_pose=predicted_body,
savepath=save_video_path,
bm=body_model.body_model,
fps=fps,
resolution=(800, 800),
)
save_video_path_gt = os.path.join(video_dir, save_filename + "_gt.mp4")
if not os.path.exists(save_video_path_gt):
utils_visualize.save_animation(
body_pose=gt_body,
savepath=save_video_path_gt,
bm=body_model.body_model,
fps=fps,
resolution=(800, 800),
)
gt_angle = body_param["pose_body"]
gt_root_angle = body_param["root_orient"]
predicted_root_angle = predicted_angle[:, :3]
predicted_angle = predicted_angle[:, 3:]
upper_index = [3, 6, 9, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
lower_index = [0, 1, 2, 4, 5, 7, 8, 10, 11]
eval_log = {}
for metric in metrics:
eval_log[metric] = (
get_metric_function(metric)(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
)
.cpu()
.numpy()
)
torch.cuda.empty_cache()
return eval_log
def load_diffusion_model(args):
print("Creating model and diffusion...")
args.arch = args.arch[len("diffusion_") :]
model, diffusion = create_model_and_diffusion(args)
print(f"Loading checkpoints from [{args.model_path}]...")
state_dict = torch.load(args.model_path, map_location="cpu")
load_model_wo_clip(model, state_dict)
model.to("cuda:0") # dist_util.dev())
model.eval() # disable random masking
return model, diffusion
def load_mlp_model(args):
model = PureMLP(
args.latent_dim,
args.input_motion_length,
args.layers,
args.sparse_dim,
args.motion_nfeat,
)
model.eval()
state_dict = torch.load(args.model_path, map_location="cpu")
model.load_state_dict(state_dict)
model.to("cuda:0")
return model, None
def main():
args = sample_args()
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
fps = 60 # AMASS dataset requires 60 frames per second
body_model = BodyModel(args.support_dir)
print("Loading dataset...")
filename_list, all_info, mean, std = load_data(
args.dataset,
args.dataset_path,
"test",
)
dataset = TestDataset(
args.dataset,
mean,
std,
all_info,
filename_list,
)
log = {}
for metric in all_metrics:
log[metric] = 0
model_type = args.arch.split("_")[0]
if model_type == "diffusion":
model, diffusion = load_diffusion_model(args)
sample_fn = diffusion.p_sample_loop
elif model_type == "mlp":
model, _ = load_mlp_model(args)
sample_fn = None
else:
raise ValueError(f"Unknown model type {model_type}")
if not args.overlapping_test:
test_func = non_overlapping_test
# batch size in the case of non-overlapping testing
n_testframe = args.num_per_batch
else:
print("Overlapping testing...")
test_func = overlapping_test
# sliding window size in case of overlapping testing
n_testframe = args.sld_wind_size
for sample_index in tqdm(range(len(dataset))):
output, body_param, head_motion, filename = test_func(
args,
dataset[sample_index],
sample_fn,
dataset,
model,
n_testframe,
model_type=model_type,
)
sample = torch.cat(output, axis=0)
instance_log = evaluate_prediction(
args,
all_metrics,
sample,
body_model,
sample_index,
head_motion,
body_param,
fps,
filename,
)
for key in instance_log:
log[key] += instance_log[key]
# Print the value for all the metrics
print("Metrics for the predictions")
for metric in pred_metrics:
print(log[metric] / len(dataset) * metrics_coeffs[metric])
print("Metrics for the ground truth")
for metric in gt_metrics:
print(metric, log[metric] / len(dataset) * metrics_coeffs[metric])
if __name__ == "__main__":
main()
|
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import json
import os
import random
import numpy as np
import torch
from data_loaders.dataloader import get_dataloader, load_data, TrainDataset
from model.networks import PureMLP
from runner.train_mlp import train_step
from runner.training_loop import TrainLoop
from utils import dist_util
from utils.model_util import create_model_and_diffusion
from utils.parser_util import train_args
def train_diffusion_model(args, dataloader):
print("creating model and diffusion...")
args.arch = args.arch[len("diffusion_") :]
num_gpus = torch.cuda.device_count()
args.num_workers = args.num_workers * num_gpus
model, diffusion = create_model_and_diffusion(args)
if num_gpus > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
dist_util.setup_dist()
model = torch.nn.DataParallel(model).cuda()
print(
"Total params: %.2fM"
% (sum(p.numel() for p in model.module.parameters()) / 1000000.0)
)
else:
dist_util.setup_dist(args.device)
model.to(dist_util.dev())
print(
"Total params: %.2fM"
% (sum(p.numel() for p in model.parameters()) / 1000000.0)
)
print("Training...")
TrainLoop(args, model, diffusion, dataloader).run_loop()
print("Done.")
def train_mlp_model(args, dataloader):
print("creating MLP model...")
args.arch = args.arch[len("mlp_") :]
num_gpus = torch.cuda.device_count()
args.num_workers = args.num_workers * num_gpus
model = PureMLP(
args.latent_dim,
args.input_motion_length,
args.layers,
args.sparse_dim,
args.motion_nfeat,
)
model.train()
if num_gpus > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
dist_util.setup_dist()
model = torch.nn.DataParallel(model).cuda()
print(
"Total params: %.2fM"
% (sum(p.numel() for p in model.module.parameters()) / 1000000.0)
)
else:
dist_util.setup_dist(args.device)
model.to(dist_util.dev())
print(
"Total params: %.2fM"
% (sum(p.numel() for p in model.parameters()) / 1000000.0)
)
# initialize optimizer
optimizer = torch.optim.Adam(
model.parameters(), lr=args.lr, weight_decay=args.weight_decay
)
nb_iter = 0
avg_loss = 0.0
avg_lr = 0.0
while (nb_iter + 1) < args.num_steps:
for (motion_target, motion_input) in dataloader:
loss, optimizer, current_lr = train_step(
motion_input,
motion_target,
model,
optimizer,
nb_iter,
args.num_steps,
args.lr,
args.lr / 10.0,
dist_util.dev(),
args.lr_anneal_steps,
)
avg_loss += loss
avg_lr += current_lr
if (nb_iter + 1) % args.log_interval == 0:
avg_loss = avg_loss / args.log_interval
avg_lr = avg_lr / args.log_interval
print("Iter {} Summary: ".format(nb_iter + 1))
print(f"\t lr: {avg_lr} \t Training loss: {avg_loss}")
avg_loss = 0
avg_lr = 0
if (nb_iter + 1) == args.num_steps:
break
nb_iter += 1
with open(
os.path.join(args.save_dir, "model-iter-" + str(nb_iter + 1) + ".pth"),
"wb",
) as f:
torch.save(model.state_dict(), f)
def main():
args = train_args()
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.save_dir is None:
raise FileNotFoundError("save_dir was not specified.")
elif os.path.exists(args.save_dir) and not args.overwrite:
raise FileExistsError("save_dir [{}] already exists.".format(args.save_dir))
elif not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
args_path = os.path.join(args.save_dir, "args.json")
with open(args_path, "w") as fw:
json.dump(vars(args), fw, indent=4, sort_keys=True)
print("creating data loader...")
motions, sparses, mean, std = load_data(
args.dataset,
args.dataset_path,
"train",
input_motion_length=args.input_motion_length,
)
dataset = TrainDataset(
args.dataset,
mean,
std,
motions,
sparses,
args.input_motion_length,
args.train_dataset_repeat_times,
args.no_normalization,
)
dataloader = get_dataloader(
dataset, "train", batch_size=args.batch_size, num_workers=args.num_workers
)
# args.lr_anneal_steps = (
# args.lr_anneal_steps // args.train_dataset_repeat_times
# ) * len(
# dataloader
# ) # the input lr_anneal_steps is by epoch, here convert it to the number of steps
model_type = args.arch.split("_")[0]
if model_type == "diffusion":
train_diffusion_model(args, dataloader)
elif model_type == "mlp":
train_mlp_model(args, dataloader)
if __name__ == "__main__":
main()
|
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import functools
import os
import torch
from diffusion import logger
from diffusion.fp16_util import MixedPrecisionTrainer
from diffusion.resample import create_named_schedule_sampler, LossAwareSampler
from torch.optim import AdamW
from tqdm import tqdm
from utils import dist_util
class TrainLoop:
def __init__(self, args, model, diffusion, data):
self.args = args
self.dataset = args.dataset
self.model = model
self.diffusion = diffusion
self.data = data
self.batch_size = args.batch_size
self.lr = args.lr
self.log_interval = args.log_interval
self.save_interval = args.save_interval
self.resume_checkpoint = args.resume_checkpoint
self.load_optimizer = args.load_optimizer
self.use_fp16 = False
self.fp16_scale_growth = 1e-3
self.weight_decay = args.weight_decay
self.lr_anneal_steps = args.lr_anneal_steps
self.step = 0
self.resume_step = 0
self.global_batch = self.batch_size
self.num_steps = args.num_steps
self.num_epochs = self.num_steps // len(self.data) + 1
self.sync_cuda = torch.cuda.is_available()
self._load_and_sync_parameters()
self.mp_trainer = MixedPrecisionTrainer(
model=self.model,
use_fp16=self.use_fp16,
fp16_scale_growth=self.fp16_scale_growth,
)
self.save_dir = args.save_dir
self.overwrite = args.overwrite
self.opt = AdamW(
self.mp_trainer.master_params, lr=self.lr, weight_decay=self.weight_decay
)
if self.resume_step and self.load_optimizer:
self._load_optimizer_state()
self.device = torch.device("cpu")
if torch.cuda.is_available() and dist_util.dev() != "cpu":
self.device = torch.device(dist_util.dev())
self.schedule_sampler_type = "uniform"
self.schedule_sampler = create_named_schedule_sampler(
self.schedule_sampler_type, diffusion
)
self.eval_wrapper, self.eval_data, self.eval_gt_data = None, None, None
self.use_ddp = False
self.ddp_model = self.model
def _load_and_sync_parameters(self):
resume_checkpoint = self.resume_checkpoint
if resume_checkpoint:
self.resume_step = parse_resume_step_from_filename(resume_checkpoint)
logger.log(f"loading model from checkpoint: {resume_checkpoint}...")
self.model.load_state_dict(
dist_util.load_state_dict(
resume_checkpoint,
map_location=dist_util.dev(),
)
)
def _load_optimizer_state(self):
main_checkpoint = self.resume_checkpoint
opt_checkpoint = os.path.join(
os.path.dirname(main_checkpoint), f"opt{self.resume_step:09}.pt"
)
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
assert os.path.exists(opt_checkpoint), "optimiser states does not exist."
state_dict = dist_util.load_state_dict(
opt_checkpoint, map_location=dist_util.dev()
)
self.opt.load_state_dict(state_dict)
def run_loop(self):
for epoch in range(self.num_epochs):
print(f"Starting epoch {epoch}")
for motion, cond in tqdm(self.data):
motion = motion.to(self.device)
cond = cond.to(self.device)
self.run_step(motion, cond)
self.step += 1
if epoch % self.save_interval == 0:
self.save()
if epoch % self.log_interval == 0:
for k, v in logger.get_current().name2val.items():
if k == "loss":
print("epoch[{}]: loss[{:0.5f}]".format(epoch, v))
print("lr:", self.lr)
# Save the last checkpoint if it wasn't already saved.
if (self.step - 1) % self.save_interval != 0:
self.save()
def run_step(self, batch, cond):
self.forward_backward(batch, cond)
self.mp_trainer.optimize(self.opt)
self._step_lr()
self.log_step()
def forward_backward(self, batch, cond):
self.mp_trainer.zero_grad()
t, weights = self.schedule_sampler.sample(batch.shape[0], dist_util.dev())
compute_losses = functools.partial(
self.diffusion.training_losses,
self.ddp_model,
batch,
t,
cond,
dataset=self.data.dataset,
)
losses = compute_losses()
if isinstance(self.schedule_sampler, LossAwareSampler):
self.schedule_sampler.update_with_local_losses(t, losses["loss"].detach())
loss = (losses["loss"] * weights).mean()
log_loss_dict(self.diffusion, t, {k: v * weights for k, v in losses.items()})
self.mp_trainer.backward(loss)
def _anneal_lr(self):
if not self.lr_anneal_steps:
return
frac_done = (self.step + self.resume_step) / self.lr_anneal_steps
lr = self.lr * (1 - frac_done)
for param_group in self.opt.param_groups:
param_group["lr"] = lr
def _step_lr(self):
# One-step learning rate decay if needed.
if not self.lr_anneal_steps:
return
if (self.step + self.resume_step) > self.lr_anneal_steps:
self.lr = self.lr / 30.0
self.lr_anneal_steps = False
else:
self.lr = self.lr
for param_group in self.opt.param_groups:
param_group["lr"] = self.lr
def log_step(self):
logger.logkv("step", self.step + self.resume_step)
logger.logkv("samples", (self.step + self.resume_step + 1) * self.global_batch)
def ckpt_file_name(self):
return f"model{(self.step+self.resume_step):09d}.pt"
def save(self):
def save_checkpoint(params):
state_dict = self.mp_trainer.master_params_to_state_dict(params)
logger.log("saving model...")
filename = self.ckpt_file_name()
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
with open(
os.path.join(self.save_dir, filename),
"wb",
) as f:
torch.save(state_dict, f)
save_checkpoint(self.mp_trainer.master_params)
with open(
os.path.join(self.save_dir, f"opt{(self.step+self.resume_step):09d}.pt"),
"wb",
) as f:
torch.save(self.opt.state_dict(), f)
def parse_resume_step_from_filename(filename):
"""
Parse filenames of the form path/to/modelNNNNNN.pt, where NNNNNN is the
checkpoint's number of steps.
"""
split = filename.split("model")
if len(split) < 2:
return 0
split1 = split[-1].split(".")[0]
try:
return int(split1)
except ValueError:
return 0
def log_loss_dict(diffusion, ts, losses):
for key, values in losses.items():
logger.logkv_mean(key, values.mean().item())
# Log the quantiles (four quartiles, in particular).
for sub_t, sub_loss in zip(ts.cpu().numpy(), values.detach().cpu().numpy()):
quartile = int(4 * sub_t / diffusion.num_timesteps)
logger.logkv_mean(f"{key}_q{quartile}", sub_loss)
|
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import torch
def update_lr_multistep(
nb_iter, total_iter, max_lr, min_lr, optimizer, lr_anneal_steps
):
if nb_iter > lr_anneal_steps:
current_lr = min_lr
else:
current_lr = max_lr
for param_group in optimizer.param_groups:
param_group["lr"] = current_lr
return optimizer, current_lr
def train_step(
motion_input,
motion_target,
model,
optimizer,
nb_iter,
total_iter,
max_lr,
min_lr,
device,
lr_anneal_steps,
):
motion_input = motion_input.to(device)
motion_target = motion_target.to(device)
motion_pred = model(motion_input)
loss = torch.mean(
torch.norm(
(motion_pred - motion_target).reshape(-1, 6),
2,
1,
)
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
optimizer, current_lr = update_lr_multistep(
nb_iter, total_iter, max_lr, min_lr, optimizer, lr_anneal_steps
)
return loss.item(), optimizer, current_lr
|
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
# Metric functions with same inputs
import numpy as np
import torch
def pred_jitter(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
pred_jitter = (
(
(
predicted_position[3:]
- 3 * predicted_position[2:-1]
+ 3 * predicted_position[1:-2]
- predicted_position[:-3]
)
* (fps**3)
)
.norm(dim=2)
.mean()
)
return pred_jitter
def gt_jitter(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
gt_jitter = (
(
(
gt_position[3:]
- 3 * gt_position[2:-1]
+ 3 * gt_position[1:-2]
- gt_position[:-3]
)
* (fps**3)
)
.norm(dim=2)
.mean()
)
return gt_jitter
def mpjre(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
diff = gt_angle - predicted_angle
diff[diff > np.pi] = diff[diff > np.pi] - 2 * np.pi
diff[diff < -np.pi] = diff[diff < -np.pi] + 2 * np.pi
rot_error = torch.mean(torch.absolute(diff))
return rot_error
def mpjpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
pos_error = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))
)
return pos_error
def handpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
pos_error_hands = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))[
..., [20, 21]
]
)
return pos_error_hands
def upperpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
upper_body_error = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))[
..., upper_index
]
)
return upper_body_error
def lowerpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
lower_body_error = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))[
..., lower_index
]
)
return lower_body_error
def rootpe(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
pos_error_root = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_position - predicted_position), axis=-1))[
..., [0]
]
)
return pos_error_root
def mpjve(
predicted_position,
predicted_angle,
predicted_root_angle,
gt_position,
gt_angle,
gt_root_angle,
upper_index,
lower_index,
fps,
):
gt_velocity = (gt_position[1:, ...] - gt_position[:-1, ...]) * fps
predicted_velocity = (
predicted_position[1:, ...] - predicted_position[:-1, ...]
) * fps
vel_error = torch.mean(
torch.sqrt(torch.sum(torch.square(gt_velocity - predicted_velocity), axis=-1))
)
return vel_error
metric_funcs_dict = {
"mpjre": mpjre,
"mpjpe": mpjpe,
"mpjve": mpjve,
"handpe": handpe,
"upperpe": upperpe,
"lowerpe": lowerpe,
"rootpe": rootpe,
"pred_jitter": pred_jitter,
"gt_jitter": gt_jitter,
}
def get_metric_function(metric):
return metric_funcs_dict[metric]
|
import os
SMPL_DATA_PATH = "./body_models/smpl"
SMPL_KINTREE_PATH = os.path.join(SMPL_DATA_PATH, "kintree_table.pkl")
SMPL_MODEL_PATH = os.path.join(SMPL_DATA_PATH, "SMPL_NEUTRAL.pkl")
JOINT_REGRESSOR_TRAIN_EXTRA = os.path.join(SMPL_DATA_PATH, "J_regressor_extra.npy")
ROT_CONVENTION_TO_ROT_NUMBER = {
"legacy": 23,
"no_hands": 21,
"full_hands": 51,
"mitten_hands": 33,
}
GENDERS = ["neutral", "male", "female"]
NUM_BETAS = 10
|
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
from diffusion import gaussian_diffusion as gd
from diffusion.respace import space_timesteps, SpacedDiffusion
from model.meta_model import MetaModel
def load_model_wo_clip(model, state_dict):
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
if len(unexpected_keys) != 0:
state_dict_new = {}
for key in state_dict.keys():
state_dict_new[key.replace("module.", "")] = state_dict[key]
missing_keys, unexpected_keys = model.load_state_dict(
state_dict_new, strict=False
)
assert len(unexpected_keys) == 0
assert all([k.startswith("clip_model.") for k in missing_keys])
def create_model_and_diffusion(args):
model = MetaModel(**get_model_args(args))
diffusion = create_gaussian_diffusion(args)
return model, diffusion
def get_model_args(args):
return {
"arch": args.arch,
"nfeats": args.motion_nfeat,
"latent_dim": args.latent_dim,
"sparse_dim": args.sparse_dim,
"num_layers": args.layers,
"dropout": 0.1,
"cond_mask_prob": args.cond_mask_prob,
"dataset": args.dataset,
"input_motion_length": args.input_motion_length,
}
def create_gaussian_diffusion(args):
predict_xstart = True
steps = args.diffusion_steps # 1000
scale_beta = 1.0
timestep_respacing = args.timestep_respacing
learn_sigma = False
rescale_timesteps = False
betas = gd.get_named_beta_schedule(args.noise_schedule, steps, scale_beta)
loss_type = gd.LossType.MSE
if not timestep_respacing:
timestep_respacing = [steps]
return SpacedDiffusion(
dataset=args.dataset,
use_timesteps=space_timesteps(steps, timestep_respacing),
betas=betas,
model_mean_type=(
gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X
),
model_var_type=(
(
gd.ModelVarType.FIXED_LARGE
if not args.sigma_small
else gd.ModelVarType.FIXED_SMALL
)
if not learn_sigma
else gd.ModelVarType.LEARNED_RANGE
),
loss_type=loss_type,
rescale_timesteps=rescale_timesteps,
)
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# Check PYTORCH3D_LICENCE before use
import functools
from typing import Optional
import torch
import torch.nn.functional as F
"""
The transformation matrices returned from the functions in this file assume
the points on which the transformation will be applied are column vectors.
i.e. the R matrix is structured as
R = [
[Rxx, Rxy, Rxz],
[Ryx, Ryy, Ryz],
[Rzx, Rzy, Rzz],
] # (3, 3)
This matrix can be applied to column vectors by post multiplication
by the points e.g.
points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point
transformed_points = R * points
To apply the same matrix to points which are row vectors, the R matrix
can be transposed and pre multiplied by the points:
e.g.
points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point
transformed_points = points * R.transpose(1, 0)
"""
def quaternion_to_matrix(quaternions):
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def _copysign(a, b):
"""
Return a tensor where each element has the absolute value taken from the,
corresponding element of a, with sign taken from the corresponding
element of b. This is like the standard copysign floating-point operation,
but is not careful about negative 0 and NaN.
Args:
a: source tensor.
b: tensor whose signs will be used, of the same shape as a.
Returns:
Tensor of the same shape as a with the signs of b.
"""
signs_differ = (a < 0) != (b < 0)
return torch.where(signs_differ, -a, a)
def _sqrt_positive_part(x):
"""
Returns torch.sqrt(torch.max(0, x))
but with a zero subgradient where x is 0.
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
def matrix_to_quaternion(matrix):
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
m00 = matrix[..., 0, 0]
m11 = matrix[..., 1, 1]
m22 = matrix[..., 2, 2]
o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22)
x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22)
y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22)
z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22)
o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2])
o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0])
o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1])
return torch.stack((o0, o1, o2, o3), -1)
def _axis_angle_rotation(axis: str, angle):
"""
Return the rotation matrices for one of the rotations about an axis
of which Euler angles describe, for each value of the angle given.
Args:
axis: Axis label "X" or "Y or "Z".
angle: any shape tensor of Euler angles in radians
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
if axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
if axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
def euler_angles_to_matrix(euler_angles, convention: str):
"""
Convert rotations given as Euler angles in radians to rotation matrices.
Args:
euler_angles: Euler angles in radians as tensor of shape (..., 3).
convention: Convention string of three uppercase letters from
{"X", "Y", and "Z"}.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
raise ValueError("Invalid input euler angles.")
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1))
return functools.reduce(torch.matmul, matrices)
def _angle_from_tan(
axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
):
"""
Extract the first or third Euler angle from the two members of
the matrix which are positive constant times its sine and cosine.
Args:
axis: Axis label "X" or "Y or "Z" for the angle we are finding.
other_axis: Axis label "X" or "Y or "Z" for the middle axis in the
convention.
data: Rotation matrices as tensor of shape (..., 3, 3).
horizontal: Whether we are looking for the angle for the third axis,
which means the relevant entries are in the same row of the
rotation matrix. If not, they are in the same column.
tait_bryan: Whether the first and third axes in the convention differ.
Returns:
Euler Angles in radians for each matrix in dataset as a tensor
of shape (...).
"""
i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis]
if horizontal:
i2, i1 = i1, i2
even = (axis + other_axis) in ["XY", "YZ", "ZX"]
if horizontal == even:
return torch.atan2(data[..., i1], data[..., i2])
if tait_bryan:
return torch.atan2(-data[..., i2], data[..., i1])
return torch.atan2(data[..., i2], -data[..., i1])
def _index_from_letter(letter: str):
if letter == "X":
return 0
if letter == "Y":
return 1
if letter == "Z":
return 2
def matrix_to_euler_angles(matrix, convention: str):
"""
Convert rotations given as rotation matrices to Euler angles in radians.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
convention: Convention string of three uppercase letters.
Returns:
Euler angles in radians as tensor of shape (..., 3).
"""
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
i0 = _index_from_letter(convention[0])
i2 = _index_from_letter(convention[2])
tait_bryan = i0 != i2
if tait_bryan:
central_angle = torch.asin(
matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0)
)
else:
central_angle = torch.acos(matrix[..., i0, i0])
o = (
_angle_from_tan(
convention[0], convention[1], matrix[..., i2], False, tait_bryan
),
central_angle,
_angle_from_tan(
convention[2], convention[1], matrix[..., i0, :], True, tait_bryan
),
)
return torch.stack(o, -1)
def random_quaternions(
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random quaternions representing rotations,
i.e. versors with nonnegative real part.
Args:
n: Number of quaternions in a batch to return.
dtype: Type to return.
device: Desired device of returned tensor. Default:
uses the current device for the default tensor type.
requires_grad: Whether the resulting tensor should have the gradient
flag set.
Returns:
Quaternions as tensor of shape (N, 4).
"""
o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad)
s = (o * o).sum(1)
o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
return o
def random_rotations(
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random rotations as 3x3 rotation matrices.
Args:
n: Number of rotation matrices in a batch to return.
dtype: Type to return.
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type.
requires_grad: Whether the resulting tensor should have the gradient
flag set.
Returns:
Rotation matrices as tensor of shape (n, 3, 3).
"""
quaternions = random_quaternions(
n, dtype=dtype, device=device, requires_grad=requires_grad
)
return quaternion_to_matrix(quaternions)
def random_rotation(
dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate a single random 3x3 rotation matrix.
Args:
dtype: Type to return
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type
requires_grad: Whether the resulting tensor should have the gradient
flag set
Returns:
Rotation matrix as tensor of shape (3, 3).
"""
return random_rotations(1, dtype, device, requires_grad)[0]
def standardize_quaternion(quaternions):
"""
Convert a unit quaternion to a standard form: one in which the real
part is non negative.
Args:
quaternions: Quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Standardized quaternions as tensor of shape (..., 4).
"""
return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
def quaternion_raw_multiply(a, b):
"""
Multiply two quaternions.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions shape (..., 4).
"""
aw, ax, ay, az = torch.unbind(a, -1)
bw, bx, by, bz = torch.unbind(b, -1)
ow = aw * bw - ax * bx - ay * by - az * bz
ox = aw * bx + ax * bw + ay * bz - az * by
oy = aw * by - ax * bz + ay * bw + az * bx
oz = aw * bz + ax * by - ay * bx + az * bw
return torch.stack((ow, ox, oy, oz), -1)
def quaternion_multiply(a, b):
"""
Multiply two quaternions representing rotations, returning the quaternion
representing their composition, i.e. the versor with nonnegative real part.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions of shape (..., 4).
"""
ab = quaternion_raw_multiply(a, b)
return standardize_quaternion(ab)
def quaternion_invert(quaternion):
"""
Given a quaternion representing rotation, get the quaternion representing
its inverse.
Args:
quaternion: Quaternions as tensor of shape (..., 4), with real part
first, which must be versors (unit quaternions).
Returns:
The inverse, a tensor of quaternions of shape (..., 4).
"""
return quaternion * quaternion.new_tensor([1, -1, -1, -1])
def quaternion_apply(quaternion, point):
"""
Apply the rotation given by a quaternion to a 3D point.
Usual torch rules for broadcasting apply.
Args:
quaternion: Tensor of quaternions, real part first, of shape (..., 4).
point: Tensor of 3D points of shape (..., 3).
Returns:
Tensor of rotated points of shape (..., 3).
"""
if point.size(-1) != 3:
raise ValueError(f"Points are not in 3D, f{point.shape}.")
real_parts = point.new_zeros(point.shape[:-1] + (1,))
point_as_quaternion = torch.cat((real_parts, point), -1)
out = quaternion_raw_multiply(
quaternion_raw_multiply(quaternion, point_as_quaternion),
quaternion_invert(quaternion),
)
return out[..., 1:]
def axis_angle_to_matrix(axis_angle):
"""
Convert rotations given as axis/angle to rotation matrices.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))
def matrix_to_axis_angle(matrix):
"""
Convert rotations given as rotation matrices to axis/angle.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
return quaternion_to_axis_angle(matrix_to_quaternion(matrix))
def axis_angle_to_quaternion(axis_angle):
"""
Convert rotations given as axis/angle to quaternions.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True)
half_angles = 0.5 * angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
quaternions = torch.cat(
[torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1
)
return quaternions
def quaternion_to_axis_angle(quaternions):
"""
Convert rotations given as quaternions to axis/angle.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
half_angles = torch.atan2(norms, quaternions[..., :1])
angles = 2 * half_angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
return quaternions[..., 1:] / sin_half_angles_over_angles
def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalisation per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2)
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
"""
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
by dropping the last row. Note that 6D representation is not unique.
Args:
matrix: batch of rotation matrices of size (*, 3, 3)
Returns:
6D rotation representation, of size (*, 6)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6)
|
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import argparse
import json
import os
from argparse import ArgumentParser
def parse_and_load_from_model(parser):
# args according to the loaded model
# do not try to specify them from cmd line since they will be overwritten
add_data_options(parser)
add_model_options(parser)
add_diffusion_options(parser)
args = parser.parse_args()
args_to_overwrite = []
for group_name in ["dataset", "model", "diffusion"]:
args_to_overwrite += get_args_per_group_name(parser, args, group_name)
# load args from model
model_path = get_model_path_from_args()
args_path = os.path.join(os.path.dirname(model_path), "args.json")
assert os.path.exists(args_path), "Arguments json file was not found!"
with open(args_path, "r") as fr:
model_args = json.load(fr)
for a in args_to_overwrite:
if a in model_args.keys():
# Use the chosen dataset, or use the dataset that is used to train the model
if a == "dataset":
if args.__dict__[a] is None:
args.__dict__[a] = model_args[a]
elif a == "input_motion_length":
continue
else:
args.__dict__[a] = model_args[a]
else:
print(
"Warning: was not able to load [{}], using default value [{}] instead.".format(
a, args.__dict__[a]
)
)
return args
def get_args_per_group_name(parser, args, group_name):
for group in parser._action_groups:
if group.title == group_name:
group_dict = {
a.dest: getattr(args, a.dest, None) for a in group._group_actions
}
return list(argparse.Namespace(**group_dict).__dict__.keys())
return ValueError("group_name was not found.")
def get_model_path_from_args():
try:
dummy_parser = ArgumentParser()
dummy_parser.add_argument("model_path")
dummy_args, _ = dummy_parser.parse_known_args()
return dummy_args.model_path
except Exception:
raise ValueError("model_path argument must be specified.")
def add_base_options(parser):
group = parser.add_argument_group("base")
group.add_argument(
"--cuda", default=True, type=bool, help="Use cuda device, otherwise use CPU."
)
group.add_argument("--device", default=0, type=int, help="Device id to use.")
group.add_argument("--seed", default=10, type=int, help="For fixing random seed.")
group.add_argument(
"--batch_size", default=64, type=int, help="Batch size during training."
)
group.add_argument(
"--timestep_respacing", default="", type=str, help="ddim timestep respacing."
)
def add_diffusion_options(parser):
group = parser.add_argument_group("diffusion")
group.add_argument(
"--noise_schedule",
default="cosine",
choices=["linear", "cosine"],
type=str,
help="Noise schedule type",
)
group.add_argument(
"--diffusion_steps",
default=1000,
type=int,
help="Number of diffusion steps (denoted T in the paper)",
)
group.add_argument(
"--sigma_small", default=True, type=bool, help="Use smaller sigma values."
)
def add_model_options(parser):
group = parser.add_argument_group("model")
group.add_argument(
"--arch",
default="DiffMLP",
type=str,
help="Architecture types as reported in the paper.",
)
group.add_argument(
"--motion_nfeat", default=132, type=int, help="motion feature dimension"
)
group.add_argument(
"--sparse_dim", default=54, type=int, help="sparse signal feature dimension"
)
group.add_argument("--layers", default=8, type=int, help="Number of layers.")
group.add_argument(
"--latent_dim", default=512, type=int, help="Transformer/GRU width."
)
group.add_argument(
"--cond_mask_prob",
default=0.0,
type=float,
help="The probability of masking the condition during training."
" For classifier-free guidance learning.",
)
group.add_argument(
"--input_motion_length",
default=196,
type=int,
help="Limit for the maximal number of frames.",
)
group.add_argument(
"--no_normalization",
action="store_true",
help="no data normalisation for the 6d motions",
)
def add_data_options(parser):
group = parser.add_argument_group("dataset")
group.add_argument(
"--dataset",
default=None,
choices=[
"amass",
],
type=str,
help="Dataset name (choose from list).",
)
group.add_argument(
"--dataset_path",
default="./dataset/AMASS/",
type=str,
help="Dataset path",
)
def add_training_options(parser):
group = parser.add_argument_group("training")
group.add_argument(
"--save_dir",
required=True,
type=str,
help="Path to save checkpoints and results.",
)
group.add_argument(
"--overwrite",
action="store_true",
help="If True, will enable to use an already existing save_dir.",
)
group.add_argument(
"--train_platform_type",
default="NoPlatform",
choices=["NoPlatform", "ClearmlPlatform", "TensorboardPlatform"],
type=str,
help="Choose platform to log results. NoPlatform means no logging.",
)
group.add_argument("--lr", default=2e-4, type=float, help="Learning rate.")
group.add_argument(
"--weight_decay", default=0.0, type=float, help="Optimizer weight decay."
)
group.add_argument(
"--lr_anneal_steps",
default=0,
type=int,
help="Number of learning rate anneal steps.",
)
group.add_argument(
"--train_dataset_repeat_times",
default=1000,
type=int,
help="Repeat the training dataset to save training time",
)
group.add_argument(
"--eval_during_training",
action="store_true",
help="If True, will run evaluation during training.",
)
group.add_argument(
"--log_interval", default=100, type=int, help="Log losses each N steps"
)
group.add_argument(
"--save_interval",
default=5000,
type=int,
help="Save checkpoints and run evaluation each N steps",
)
group.add_argument(
"--num_steps",
default=6000000,
type=int,
help="Training will stop after the specified number of steps.",
)
group.add_argument(
"--resume_checkpoint",
default="",
type=str,
help="If not empty, will start from the specified checkpoint (path to model###.pt file).",
)
group.add_argument(
"--load_optimizer",
action="store_true",
help="If True, will also load the saved optimizer state for network initialization",
)
group.add_argument(
"--num_workers",
default=8,
type=int,
help="Number of dataloader workers.",
)
def add_sampling_options(parser):
group = parser.add_argument_group("sampling")
group.add_argument(
"--overlapping_test",
action="store_true",
help="enabling overlapping test",
)
group.add_argument(
"--num_per_batch",
default=256,
type=int,
help="the batch size of each split during non-overlapping testing",
)
group.add_argument(
"--sld_wind_size",
default=70,
type=int,
help="the sliding window size",
)
group.add_argument(
"--vis",
action="store_true",
help="visualize the output",
)
group.add_argument(
"--fix_noise",
action="store_true",
help="fix init noise for the output",
)
group.add_argument(
"--fps",
default=30,
type=int,
help="FPS",
)
group.add_argument(
"--model_path",
required=True,
type=str,
help="Path to model####.pt file to be sampled.",
)
group.add_argument(
"--output_dir",
default="",
type=str,
help="Path to results dir (auto created by the script). "
"If empty, will create dir in parallel to checkpoint.",
)
group.add_argument(
"--support_dir",
type=str,
help="the dir that you store your smplh and dmpls dirs",
)
def add_evaluation_options(parser):
group = parser.add_argument_group("eval")
group.add_argument(
"--model_path",
required=True,
type=str,
help="Path to model####.pt file to be sampled.",
)
def train_args():
parser = ArgumentParser()
add_base_options(parser)
add_data_options(parser)
add_model_options(parser)
add_diffusion_options(parser)
add_training_options(parser)
return parser.parse_args()
def sample_args():
parser = ArgumentParser()
# args specified by the user: (all other will be loaded from the model)
add_base_options(parser)
add_sampling_options(parser)
return parse_and_load_from_model(parser)
def evaluation_parser():
parser = ArgumentParser()
# args specified by the user: (all other will be loaded from the model)
add_base_options(parser)
add_evaluation_options(parser)
return parse_and_load_from_model(parser)
|
# MIT License
# Copyright (c) 2022 ETH Sensing, Interaction & Perception Lab
#
# This code is based on https://github.com/eth-siplab/AvatarPoser
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import os
import cv2
import numpy as np
import trimesh
from body_visualizer.mesh.mesh_viewer import MeshViewer
from body_visualizer.tools.vis_tools import colors
from human_body_prior.tools.omni_tools import copy2cpu as c2c
from tqdm import tqdm
os.environ["PYOPENGL_PLATFORM"] = "egl"
class CheckerBoard:
def __init__(self, white=(247, 246, 244), black=(146, 163, 171)):
self.white = np.array(white) / 255.0
self.black = np.array(black) / 255.0
self.verts, self.faces, self.texts = None, None, None
self.offset = None
@staticmethod
def gen_checker_xy(black, white, square_size=0.5, xlength=50.0, ylength=50.0):
"""
generate a checker board in parallel to x-y plane
starting from (0, 0) to (xlength, ylength), in meters
return: trimesh.Trimesh
"""
xsquares = int(xlength / square_size)
ysquares = int(ylength / square_size)
verts, faces, texts = [], [], []
fcount = 0
for i in range(xsquares):
for j in range(ysquares):
p1 = np.array([i * square_size, j * square_size, 0])
p2 = np.array([(i + 1) * square_size, j * square_size, 0])
p3 = np.array([(i + 1) * square_size, (j + 1) * square_size, 0])
verts.extend([p1, p2, p3])
faces.append([fcount * 3, fcount * 3 + 1, fcount * 3 + 2])
fcount += 1
p1 = np.array([i * square_size, j * square_size, 0])
p2 = np.array([(i + 1) * square_size, (j + 1) * square_size, 0])
p3 = np.array([i * square_size, (j + 1) * square_size, 0])
verts.extend([p1, p2, p3])
faces.append([fcount * 3, fcount * 3 + 1, fcount * 3 + 2])
fcount += 1
if (i + j) % 2 == 0:
texts.append(black)
texts.append(black)
else:
texts.append(white)
texts.append(white)
# now compose as mesh
mesh = trimesh.Trimesh(
vertices=np.array(verts) + np.array([-5, -5, 0]), faces=np.array(faces), process=False, face_colors=np.array(texts))
return mesh
"""
# --------------------------------
# Visualize avatar using body pose information and body model
# --------------------------------
"""
def save_animation(body_pose, savepath, bm, fps=60, resolution=(800, 800)):
imw, imh = resolution
mv = MeshViewer(width=imw, height=imh, use_offscreen=True)
faces = c2c(bm.f)
img_array = []
for fId in tqdm(range(body_pose.v.shape[0])):
body_mesh = trimesh.Trimesh(
vertices=c2c(body_pose.v[fId]),
faces=faces,
vertex_colors=np.tile(colors["purple"], (6890, 1)),
)
generator = CheckerBoard()
checker_mesh = generator.gen_checker_xy(generator.black, generator.white)
body_mesh.apply_transform(
trimesh.transformations.rotation_matrix(-90, (0, 0, 10))
)
body_mesh.apply_transform(
trimesh.transformations.rotation_matrix(30, (10, 0, 0))
)
body_mesh.apply_transform(trimesh.transformations.scale_matrix(0.5))
checker_mesh.apply_transform(
trimesh.transformations.rotation_matrix(-90, (0, 0, 10))
)
checker_mesh.apply_transform(
trimesh.transformations.rotation_matrix(30, (10, 0, 0))
)
checker_mesh.apply_transform(trimesh.transformations.scale_matrix(0.5))
mv.set_static_meshes([checker_mesh, body_mesh])
body_image = mv.render(render_wireframe=False)
body_image = body_image.astype(np.uint8)
body_image = cv2.cvtColor(body_image, cv2.COLOR_BGR2RGB)
img_array.append(body_image)
out = cv2.VideoWriter(savepath, cv2.VideoWriter_fourcc(*"DIVX"), fps, resolution)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
|
# MIT License
# Copyright (c) 2022 ETH Sensing, Interaction & Perception Lab
#
# This code is based on https://github.com/eth-siplab/AvatarPoser
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import torch
from human_body_prior.tools import tgm_conversion as tgm
from human_body_prior.tools.rotation_tools import aa2matrot, matrot2aa
from torch.nn import functional as F
def bgs(d6s):
d6s = d6s.reshape(-1, 2, 3).permute(0, 2, 1)
bsz = d6s.shape[0]
b1 = F.normalize(d6s[:, :, 0], p=2, dim=1)
a2 = d6s[:, :, 1]
c = torch.bmm(b1.view(bsz, 1, -1), a2.view(bsz, -1, 1)).view(bsz, 1) * b1
b2 = F.normalize(a2 - c, p=2, dim=1)
b3 = torch.cross(b1, b2, dim=1)
return torch.stack([b1, b2, b3], dim=-1)
def matrot2sixd(pose_matrot):
"""
:param pose_matrot: Nx3x3
:return: pose_6d: Nx6
"""
pose_6d = torch.cat([pose_matrot[:, :3, 0], pose_matrot[:, :3, 1]], dim=1)
return pose_6d
def aa2sixd(pose_aa):
"""
:param pose_aa Nx3
:return: pose_6d: Nx6
"""
pose_matrot = aa2matrot(pose_aa)
pose_6d = matrot2sixd(pose_matrot)
return pose_6d
def sixd2matrot(pose_6d):
"""
:param pose_6d: Nx6
:return: pose_matrot: Nx3x3
"""
rot_vec_1 = pose_6d[:, :3]
rot_vec_2 = pose_6d[:, 3:6]
rot_vec_3 = torch.cross(rot_vec_1, rot_vec_2)
pose_matrot = torch.stack([rot_vec_1, rot_vec_2, rot_vec_3], dim=-1)
return pose_matrot
def sixd2aa(pose_6d, batch=False):
"""
:param pose_6d: Nx6
:return: pose_aa: Nx3
"""
if batch:
B, J, C = pose_6d.shape
pose_6d = pose_6d.reshape(-1, 6)
pose_matrot = sixd2matrot(pose_6d)
pose_aa = matrot2aa(pose_matrot)
if batch:
pose_aa = pose_aa.reshape(B, J, 3)
return pose_aa
def sixd2quat(pose_6d):
"""
:param pose_6d: Nx6
:return: pose_quaternion: Nx4
"""
pose_mat = sixd2matrot(pose_6d)
pose_mat_34 = torch.cat(
(pose_mat, torch.zeros(pose_mat.size(0), pose_mat.size(1), 1)), dim=-1
)
pose_quaternion = tgm.rotation_matrix_to_quaternion(pose_mat_34)
return pose_quaternion
def quat2aa(pose_quat):
"""
:param pose_quat: Nx4
:return: pose_aa: Nx3
"""
return tgm.quaternion_to_angle_axis(pose_quat)
|
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
"""
Helpers for distributed training.
"""
import socket
import torch as th
import torch.distributed as dist
# Change this to reflect your cluster layout.
# The GPU for a given rank is (rank % GPUS_PER_NODE).
GPUS_PER_NODE = 8
SETUP_RETRY_COUNT = 3
used_device = 0
def setup_dist(device=0):
"""
Setup a distributed process group.
"""
global used_device
used_device = device
if dist.is_initialized():
return
def dev():
"""
Get the device to use for torch.distributed.
"""
global used_device
if th.cuda.is_available() and used_device >= 0:
return th.device(f"cuda:{used_device}")
return th.device("cpu")
def load_state_dict(path, **kwargs):
"""
Load a PyTorch file without redundant fetches across MPI ranks.
"""
return th.load(path, **kwargs)
def sync_params(params):
"""
Synchronize a sequence of Tensors across ranks from rank 0.
"""
for p in params:
with th.no_grad():
dist.broadcast(p, 0)
def _find_free_port():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
finally:
s.close()
|
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import numpy as np
import torch as th
from .diffusion_model import DiffusionModel
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(DiffusionModel):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = DiffusionModel(**kwargs)
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, sparse, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, sparse, **kwargs)
|
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history**2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
|
"""
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import datetime
import json
import os
import os.path as osp
import sys
import tempfile
import time
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("agrol-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout,log,csv").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
|
"""
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import torch
import torch as th
from diffusion.gaussian_diffusion import (
GaussianDiffusion,
LossType,
ModelMeanType,
ModelVarType,
)
class DiffusionModel(GaussianDiffusion):
def __init__(
self,
**kwargs,
):
super(DiffusionModel, self).__init__(
**kwargs,
)
def masked_l2(self, a, b):
bs, n, c = a.shape
loss = torch.mean(
torch.norm(
(a - b).reshape(-1, 6),
2,
1,
)
)
return loss
def training_losses(
self, model, x_start, t, sparse, model_kwargs=None, noise=None, dataset=None
):
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, self._scale_timesteps(t), sparse, **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["rot_mse"] = self.masked_l2(
target,
model_output,
)
terms["loss"] = terms["rot_mse"] + terms.get("vb", 0.0)
else:
raise NotImplementedError(self.loss_type)
return terms
|
"""
Helpers for various likelihood-based losses. These are ported from the original
Ho et al. diffusion models codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/utils.py
"""
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
import numpy as np
import torch as th
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
|
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
"""
Helpers to train with 16-bit precision.
"""
import numpy as np
import torch as th
import torch.nn as nn
from diffusion import logger
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
INITIAL_LOG_LOSS_SCALE = 20.0
def convert_module_to_f16(l):
"""
Convert primitive modules to float16.
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
def convert_module_to_f32(l):
"""
Convert primitive modules to float32, undoing convert_module_to_f16().
"""
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
l.weight.data = l.weight.data.float()
if l.bias is not None:
l.bias.data = l.bias.data.float()
def make_master_params(param_groups_and_shapes):
"""
Copy model parameters into a (differently-shaped) list of full-precision
parameters.
"""
master_params = []
for param_group, shape in param_groups_and_shapes:
master_param = nn.Parameter(
_flatten_dense_tensors(
[param.detach().float() for (_, param) in param_group]
).view(shape)
)
master_param.requires_grad = True
master_params.append(master_param)
return master_params
def model_grads_to_master_grads(param_groups_and_shapes, master_params):
"""
Copy the gradients from the model parameters into the master parameters
from make_master_params().
"""
for master_param, (param_group, shape) in zip(
master_params, param_groups_and_shapes
):
master_param.grad = _flatten_dense_tensors(
[param_grad_or_zeros(param) for (_, param) in param_group]
).view(shape)
def master_params_to_model_params(param_groups_and_shapes, master_params):
"""
Copy the master parameter data back into the model parameters.
"""
# Without copying to a list, if a generator is passed, this will
# silently not copy any parameters.
for master_param, (param_group, _) in zip(master_params, param_groups_and_shapes):
for (_, param), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
param.detach().copy_(unflat_master_param)
def unflatten_master_params(param_group, master_param):
return _unflatten_dense_tensors(master_param, [param for (_, param) in param_group])
def get_param_groups_and_shapes(named_model_params):
named_model_params = list(named_model_params)
scalar_vector_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim <= 1],
(-1),
)
matrix_named_params = (
[(n, p) for (n, p) in named_model_params if p.ndim > 1],
(1, -1),
)
return [scalar_vector_named_params, matrix_named_params]
def master_params_to_state_dict(
model, param_groups_and_shapes, master_params, use_fp16
):
if use_fp16:
state_dict = model.state_dict()
for master_param, (param_group, _) in zip(
master_params, param_groups_and_shapes
):
for (name, _), unflat_master_param in zip(
param_group, unflatten_master_params(param_group, master_param.view(-1))
):
assert name in state_dict
state_dict[name] = unflat_master_param
else:
state_dict = model.state_dict()
for i, (name, _value) in enumerate(model.named_parameters()):
assert name in state_dict
state_dict[name] = master_params[i]
return state_dict
def state_dict_to_master_params(model, state_dict, use_fp16):
if use_fp16:
named_model_params = [
(name, state_dict[name]) for name, _ in model.named_parameters()
]
param_groups_and_shapes = get_param_groups_and_shapes(named_model_params)
master_params = make_master_params(param_groups_and_shapes)
else:
master_params = [state_dict[name] for name, _ in model.named_parameters()]
return master_params
def zero_master_grads(master_params):
for param in master_params:
param.grad = None
def zero_grad(model_params):
for param in model_params:
# Taken from https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.add_param_group
if param.grad is not None:
param.grad.detach_()
param.grad.zero_()
def param_grad_or_zeros(param):
if param.grad is not None:
return param.grad.data.detach()
else:
return th.zeros_like(param)
class MixedPrecisionTrainer:
def __init__(
self,
*,
model,
use_fp16=False,
fp16_scale_growth=1e-3,
initial_lg_loss_scale=INITIAL_LOG_LOSS_SCALE,
):
self.model = model
self.use_fp16 = use_fp16
self.fp16_scale_growth = fp16_scale_growth
self.model_params = list(self.model.parameters())
self.master_params = self.model_params
self.param_groups_and_shapes = None
self.lg_loss_scale = initial_lg_loss_scale
if self.use_fp16:
self.param_groups_and_shapes = get_param_groups_and_shapes(
self.model.named_parameters()
)
self.master_params = make_master_params(self.param_groups_and_shapes)
self.model.convert_to_fp16()
def zero_grad(self):
zero_grad(self.model_params)
def backward(self, loss: th.Tensor):
if self.use_fp16:
loss_scale = 2**self.lg_loss_scale
(loss * loss_scale).backward()
else:
loss.backward()
def optimize(self, opt: th.optim.Optimizer):
if self.use_fp16:
return self._optimize_fp16(opt)
else:
return self._optimize_normal(opt)
def _optimize_fp16(self, opt: th.optim.Optimizer):
logger.logkv_mean("lg_loss_scale", self.lg_loss_scale)
model_grads_to_master_grads(self.param_groups_and_shapes, self.master_params)
grad_norm, param_norm = self._compute_norms(grad_scale=2**self.lg_loss_scale)
if check_overflow(grad_norm):
self.lg_loss_scale -= 1
logger.log(f"Found NaN, decreased lg_loss_scale to {self.lg_loss_scale}")
zero_master_grads(self.master_params)
return False
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
self.master_params[0].grad.mul_(1.0 / (2**self.lg_loss_scale))
opt.step()
zero_master_grads(self.master_params)
master_params_to_model_params(self.param_groups_and_shapes, self.master_params)
self.lg_loss_scale += self.fp16_scale_growth
return True
def _optimize_normal(self, opt: th.optim.Optimizer):
grad_norm, param_norm = self._compute_norms()
logger.logkv_mean("grad_norm", grad_norm)
logger.logkv_mean("param_norm", param_norm)
opt.step()
return True
def _compute_norms(self, grad_scale=1.0):
grad_norm = 0.0
param_norm = 0.0
for p in self.master_params:
with th.no_grad():
param_norm += th.norm(p, p=2, dtype=th.float32).item() ** 2
if p.grad is not None:
grad_norm += th.norm(p.grad, p=2, dtype=th.float32).item() ** 2
return np.sqrt(grad_norm) / grad_scale, np.sqrt(param_norm)
def master_params_to_state_dict(self, master_params):
return master_params_to_state_dict(
self.model, self.param_groups_and_shapes, master_params, self.use_fp16
)
def state_dict_to_master_params(self, state_dict):
return state_dict_to_master_params(self.model, state_dict, self.use_fp16)
def check_overflow(value):
return (value == float("inf")) or (value == -float("inf")) or (value != value)
|
"""
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
# MIT License
# Copyright (c) 2021 OpenAI
#
# This code is based on https://github.com/openai/guided-diffusion
# MIT License
# Copyright (c) 2022 Guy Tevet
#
# This code is based on https://github.com/GuyTevet/motion-diffusion-model
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import enum
import math
from copy import deepcopy
import numpy as np
import torch
import torch as th
from diffusion.losses import discretized_gaussian_log_likelihood, normal_kl
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps, scale_betas=1.0):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = scale_betas * 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = (
enum.auto()
) # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
dataset,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
lambda_rcxyz=0.0,
lambda_vel=1.0,
lambda_pose=1.0,
lambda_orient=1.0,
lambda_loc=1.0,
data_rep="rot",
lambda_root_vel=0.0,
lambda_vel_rcxyz=0.0,
lambda_fc=0.0,
):
self.dataset = dataset
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
self.rescale_timesteps = rescale_timesteps
self.data_rep = data_rep
if data_rep != "rot_vel" and lambda_pose != 1.0:
raise ValueError(
"lambda_pose is relevant only when training on velocities!"
)
self.lambda_pose = lambda_pose
self.lambda_orient = lambda_orient
self.lambda_loc = lambda_loc
self.lambda_rcxyz = lambda_rcxyz
self.lambda_vel = lambda_vel
self.lambda_root_vel = lambda_root_vel
self.lambda_vel_rcxyz = lambda_vel_rcxyz
self.lambda_fc = lambda_fc
if (
self.lambda_rcxyz > 0.0
or self.lambda_vel > 0.0
or self.lambda_root_vel > 0.0
or self.lambda_vel_rcxyz > 0.0
or self.lambda_fc > 0.0
):
assert (
self.loss_type == LossType.MSE
), "Geometric losses are supported by MSE loss type only!"
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
def masked_l2(self, a, b):
pass
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the dataset for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial dataset batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self,
model,
x,
t,
sparse,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
B, C = x.shape[:2]
assert t.shape == (B,)
if model_kwargs is not None:
model_output = model(x, self._scale_timesteps(t), sparse, **model_kwargs)
else:
model_output = model(x, self._scale_timesteps(t), sparse)
if model_kwargs is not None:
if (
"inpainting_mask" in model_kwargs["y"].keys()
and "inpainted_motion" in model_kwargs["y"].keys()
):
inpainting_mask, inpainted_motion = (
model_kwargs["y"]["inpainting_mask"],
model_kwargs["y"]["inpainted_motion"],
)
assert (
self.model_mean_type == ModelMeanType.START_X
), "This feature supports only X_start pred for mow!"
assert (
model_output.shape
== inpainting_mask.shape
== inpainted_motion.shape
)
model_output = (model_output * (1 - inpainting_mask)) + (
inpainted_motion * inpainting_mask
)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
# print('clip_denoised', clip_denoised)
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [
ModelMeanType.START_X,
ModelMeanType.EPSILON,
]: # THIS IS US!
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_mean_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, p_mean_var, **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, self._scale_timesteps(t), **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, p_mean_var, **model_kwargs)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def p_sample(
self,
model,
x,
t,
sparse,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
const_noise=False,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
# print('const_noise', const_noise)
if const_noise:
noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
# print('mean', out["mean"].shape, out["mean"])
# print('log_variance', out["log_variance"].shape, out["log_variance"])
# print('nonzero_mask', nonzero_mask.shape, nonzero_mask)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_with_grad(
self,
model,
x,
t,
sparse,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
with th.enable_grad():
x = x.detach().requires_grad_()
out = self.p_mean_variance(
model,
x,
t,
sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean_with_grad(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"].detach()}
def p_sample_loop(
self,
model,
shape,
sparse=None,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
dump_steps=None,
const_noise=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:param const_noise: If True, will noise all samples with the same noise throughout sampling
:return: a non-differentiable batch of samples.
"""
final = None
if dump_steps is not None:
dump = []
for i, sample in enumerate(
self.p_sample_loop_progressive(
model,
shape,
sparse=sparse,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
const_noise=const_noise,
)
):
if dump_steps is not None and i in dump_steps:
dump.append(deepcopy(sample["sample"]))
final = sample
if dump_steps is not None:
return dump
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
sparse=None,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
const_noise=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and "y" in model_kwargs:
model_kwargs["y"] = th.randint(
low=0,
high=model.num_classes,
size=model_kwargs["y"].shape,
device=model_kwargs["y"].device,
)
with th.no_grad():
sample_fn = (
self.p_sample_with_grad if cond_fn_with_grad else self.p_sample
)
out = sample_fn(
model,
img,
t,
sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
const_noise=const_noise,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
sparse,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out_orig = self.p_mean_variance(
model,
x,
t,
sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(
cond_fn, out_orig, x, t, model_kwargs=model_kwargs
)
else:
out = out_orig
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma**2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"]}
def ddim_sample_with_grad(
self,
model,
x,
t,
sparse,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
with th.enable_grad():
x = x.detach().requires_grad_()
out_orig = self.p_mean_variance(
model,
x,
t,
sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score_with_grad(
cond_fn, out_orig, x, t, model_kwargs=model_kwargs
)
else:
out = out_orig
out["pred_xstart"] = out["pred_xstart"].detach()
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma**2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out_orig["pred_xstart"].detach()}
def ddim_reverse_sample(
self,
model,
x,
t,
sparse,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next)
+ th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
sparse=None,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
dump_steps=None,
const_noise=False,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
if dump_steps is not None:
raise NotImplementedError()
if const_noise:
raise NotImplementedError()
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
sparse=sparse,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
sparse=None,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
sample_fn = (
self.ddim_sample_with_grad
if cond_fn_with_grad
else self.ddim_sample
)
out = sample_fn(
model,
img,
t,
sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def plms_sample(
self,
model,
x,
t,
sparse=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
cond_fn_with_grad=False,
order=2,
old_out=None,
):
"""
Sample x_{t-1} from the model using Pseudo Linear Multistep.
Same usage as p_sample().
"""
if not int(order) or not 1 <= order <= 4:
raise ValueError("order is invalid (should be int from 1-4).")
def get_model_output(x, t):
with th.set_grad_enabled(cond_fn_with_grad and cond_fn is not None):
x = x.detach().requires_grad_() if cond_fn_with_grad else x
out_orig = self.p_mean_variance(
model,
x,
t,
sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
if cond_fn_with_grad:
out = self.condition_score_with_grad(
cond_fn, out_orig, x, t, model_kwargs=model_kwargs
)
x = x.detach()
else:
out = self.condition_score(
cond_fn, out_orig, x, t, model_kwargs=model_kwargs
)
else:
out = out_orig
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
return eps, out, out_orig
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
eps, out, out_orig = get_model_output(x, t)
if order > 1 and old_out is None:
# Pseudo Improved Euler
old_eps = [eps]
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev) * eps
)
eps_2, _, _ = get_model_output(mean_pred, t - 1)
eps_prime = (eps + eps_2) / 2
pred_prime = self._predict_xstart_from_eps(x, t, eps_prime)
mean_pred = (
pred_prime * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev) * eps_prime
)
else:
# Pseudo Linear Multistep (Adams-Bashforth)
old_eps = old_out["old_eps"]
old_eps.append(eps)
cur_order = min(order, len(old_eps))
if cur_order == 1:
eps_prime = old_eps[-1]
elif cur_order == 2:
eps_prime = (3 * old_eps[-1] - old_eps[-2]) / 2
elif cur_order == 3:
eps_prime = (23 * old_eps[-1] - 16 * old_eps[-2] + 5 * old_eps[-3]) / 12
elif cur_order == 4:
eps_prime = (
55 * old_eps[-1]
- 59 * old_eps[-2]
+ 37 * old_eps[-3]
- 9 * old_eps[-4]
) / 24
else:
raise RuntimeError("cur_order is invalid.")
pred_prime = self._predict_xstart_from_eps(x, t, eps_prime)
mean_pred = (
pred_prime * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev) * eps_prime
)
if len(old_eps) >= order:
old_eps.pop(0)
nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
sample = mean_pred * nonzero_mask + out["pred_xstart"] * (1 - nonzero_mask)
return {
"sample": sample,
"pred_xstart": out_orig["pred_xstart"],
"old_eps": old_eps,
}
def plms_sample_loop(
self,
model,
shape,
sparse=None,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
order=2,
):
"""
Generate samples from the model using Pseudo Linear Multistep.
Same usage as p_sample_loop().
"""
final = None
for sample in self.plms_sample_loop_progressive(
model,
shape,
sparse=sparse,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
skip_timesteps=skip_timesteps,
init_image=init_image,
randomize_class=randomize_class,
cond_fn_with_grad=cond_fn_with_grad,
order=order,
):
final = sample
return final["sample"]
def plms_sample_loop_progressive(
self,
model,
shape,
sparse=None,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
skip_timesteps=0,
init_image=None,
randomize_class=False,
cond_fn_with_grad=False,
order=2,
):
"""
Use PLMS to sample from the model and yield intermediate samples from each
timestep of PLMS.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
if skip_timesteps and init_image is None:
init_image = th.zeros_like(img)
indices = list(range(self.num_timesteps - skip_timesteps))[::-1]
if init_image is not None:
my_t = th.ones([shape[0]], device=device, dtype=th.long) * indices[0]
img = self.q_sample(init_image, my_t, img)
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
old_out = None
for i in indices:
t = th.tensor([i] * shape[0], device=device)
if randomize_class and "y" in model_kwargs:
model_kwargs["y"] = th.randint(
low=0,
high=model.num_classes,
size=model_kwargs["y"].shape,
device=model_kwargs["y"].device,
)
with th.no_grad():
out = self.plms_sample(
model,
img,
t,
sparse=sparse,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
cond_fn_with_grad=cond_fn_with_grad,
order=order,
old_out=old_out,
)
yield out
old_out = out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, sparse=None, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model,
x_t,
t,
sparse=sparse,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(
self, model, x_start, t, sparse, model_kwargs=None, noise=None, dataset=None
):
pass
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
|
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import numpy as np
import torch
import torch.nn as nn
from model.networks import DiffMLP
class MetaModel(nn.Module):
def __init__(
self,
arch,
nfeats,
latent_dim=256,
num_layers=8,
dropout=0.1,
dataset="amass",
sparse_dim=54,
**kargs,
):
super().__init__()
self.arch = DiffMLP
self.dataset = dataset
self.input_feats = nfeats
self.latent_dim = latent_dim
self.num_layers = num_layers
self.dropout = dropout
self.sparse_dim = sparse_dim
self.cond_mask_prob = kargs.get("cond_mask_prob", 0.0)
self.input_process = nn.Linear(self.input_feats, self.latent_dim)
self.mlp = self.arch(
self.latent_dim, seq=kargs.get("input_motion_length"), num_layers=num_layers
)
self.embed_timestep = TimestepEmbeding(self.latent_dim)
self.sparse_process = nn.Linear(self.sparse_dim, self.latent_dim)
self.output_process = nn.Linear(self.latent_dim, self.input_feats)
def mask_cond_sparse(self, cond, force_mask=True):
bs, n, c = cond.shape
if force_mask:
return torch.zeros_like(cond)
elif self.training and self.cond_mask_prob > 0.0:
mask = torch.bernoulli(
torch.ones(bs, device=cond.device) * self.cond_mask_prob
).view(
bs, 1, 1
) # 1-> use null_cond, 0-> use real cond
return cond * (1.0 - mask)
else:
return cond
def forward(self, x, timesteps, sparse_emb, force_mask=False):
"""
x: [batch_size, nfeats, nframes], denoted x_t in the paper
sparse: [batch_size, nframes, sparse_dim], the sparse features
timesteps: [batch_size] (int)
"""
emb = self.embed_timestep(timesteps) # time step embedding : [1, bs, d]
# Pass the sparse signal to a FC
sparse_emb = self.sparse_process(
self.mask_cond_sparse(sparse_emb, force_mask=force_mask)
)
# Pass the input to a FC
x = self.input_process(x)
# Concat the sparse feature with input
x = torch.cat((sparse_emb, x), axis=-1)
output = self.mlp(x, emb)
# Pass the output to a FC and reshape the output
output = self.output_process(output)
return output
class TimestepEmbeding(nn.Module):
def __init__(self, d_model, max_len=5000):
super().__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, timesteps):
return self.pe[timesteps]
|
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import torch.nn as nn
###############################
############ Layers ###########
###############################
class MLPblock(nn.Module):
def __init__(self, dim, seq0, seq1, first=False, w_embed=True):
super().__init__()
self.w_embed = w_embed
self.fc0 = nn.Conv1d(seq0, seq1, 1)
if self.w_embed:
if first:
self.conct = nn.Linear(dim * 2, dim)
else:
self.conct = nn.Identity()
self.emb_fc = nn.Linear(dim, dim)
self.fc1 = nn.Linear(dim, dim)
self.norm0 = nn.LayerNorm(dim)
self.norm1 = nn.LayerNorm(dim)
self.act = nn.SiLU()
def forward(self, inputs):
if self.w_embed:
x = inputs[0]
embed = inputs[1]
x = self.conct(x) + self.emb_fc(self.act(embed))
else:
x = inputs
x_ = self.norm0(x)
x_ = self.fc0(x_)
x_ = self.act(x_)
x = x + x_
x_ = self.norm1(x)
x_ = self.fc1(x_)
x_ = self.act(x_)
x = x + x_
if self.w_embed:
return x, embed
else:
return x
class BaseMLP(nn.Module):
def __init__(self, dim, seq, num_layers, w_embed=True):
super().__init__()
layers = []
for i in range(num_layers):
layers.append(
MLPblock(dim, seq, seq, first=i == 0 and w_embed, w_embed=w_embed)
)
self.mlps = nn.Sequential(*layers)
def forward(self, x):
x = self.mlps(x)
return x
###############################
########### Networks ##########
###############################
class DiffMLP(nn.Module):
def __init__(self, latent_dim=512, seq=98, num_layers=12):
super(DiffMLP, self).__init__()
self.motion_mlp = BaseMLP(dim=latent_dim, seq=seq, num_layers=num_layers)
def forward(self, motion_input, embed):
motion_feats = self.motion_mlp([motion_input, embed])[0]
return motion_feats
class PureMLP(nn.Module):
def __init__(
self, latent_dim=512, seq=98, num_layers=12, input_dim=54, output_dim=132
):
super(PureMLP, self).__init__()
self.input_fc = nn.Linear(input_dim, latent_dim)
self.motion_mlp = BaseMLP(
dim=latent_dim, seq=seq, num_layers=num_layers, w_embed=False
)
self.output_fc = nn.Linear(latent_dim, output_dim)
def forward(self, motion_input):
motion_feats = self.input_fc(motion_input)
motion_feats = self.motion_mlp(motion_feats)
motion_feats = self.output_fc(motion_feats)
return motion_feats
|
# Copyright (c) Meta Platforms, Inc. All Rights Reserved
import glob
import os
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
class TrainDataset(Dataset):
def __init__(
self,
dataset,
mean,
std,
motions,
sparses,
input_motion_length=196,
train_dataset_repeat_times=1,
no_normalization=False,
):
self.dataset = dataset
self.mean = mean
self.std = std
self.motions = motions
self.sparses = sparses
self.train_dataset_repeat_times = train_dataset_repeat_times
self.no_normalization = no_normalization
self.motions = motions
self.sparses = sparses
self.input_motion_length = input_motion_length
def __len__(self):
return len(self.motions) * self.train_dataset_repeat_times
def inv_transform(self, data):
return data * self.std + self.mean
def __getitem__(self, idx):
motion = self.motions[idx % len(self.motions)]
sparse = self.sparses[idx % len(self.motions)]
seqlen = motion.shape[0]
if seqlen <= self.input_motion_length:
idx = 0
else:
idx = torch.randint(0, int(seqlen - self.input_motion_length), (1,))[0]
motion = motion[idx : idx + self.input_motion_length]
sparse = sparse[idx : idx + self.input_motion_length]
# Normalization
if not self.no_normalization:
motion = (motion - self.mean) / (self.std + 1e-8)
return motion.float(), sparse.float()
class TestDataset(Dataset):
def __init__(
self,
name,
mean,
std,
all_info,
filename_list,
normalize_sparse="none",
):
self.name = name
self.mean = mean
self.std = std
self.filename_list = filename_list
self.normalize_sparse = normalize_sparse
self.motions = []
self.sparses = []
self.body_params = []
self.head_motion = []
for i in all_info:
self.motions.append(i["rotation_local_full_gt_list"])
self.sparses.append(i["hmd_position_global_full_gt_list"])
self.body_params.append(i["body_parms_list"])
self.head_motion.append(i["head_global_trans_list"])
def __len__(self):
return len(self.motions)
def inv_transform(self, data):
return data * self.std + self.mean
def __getitem__(self, idx):
motion = self.motions[idx]
sparse = self.sparses[idx]
body_param = self.body_params[idx]
head_motion = self.head_motion[idx]
filename = self.filename_list[idx]
return (
motion,
sparse.unsqueeze(0),
body_param,
head_motion,
filename,
)
def get_mean_std_path(dataset):
return dataset + "_mean.pt", dataset + "_std.pt"
def get_motion(motion_list):
# rotation_local_full_gt_list : 6d rotation parameters
# hmd_position_global_full_gt_list : 3 joints(head, hands) 6d rotation/6d rotation velocity/global translation/global translation velocity
motions = [i["rotation_local_full_gt_list"] for i in motion_list]
sparses = [i["hmd_position_global_full_gt_list"] for i in motion_list]
return motions, sparses
def get_path(dataset_path, split):
data_list_path = []
parent_data_path = glob.glob(dataset_path + "/*")
for d in parent_data_path:
if os.path.isdir(d):
files = glob.glob(d + "/" + split + "/*pt")
data_list_path.extend(files)
return data_list_path
def load_data(dataset, dataset_path, split, **kwargs):
"""
Collect the data for the given split
Args:
- For test:
dataset : the name of the testing dataset
split : test or train
- For train:
dataset : the name of the training dataset
split : train or test
input_motion_length : the input motion length
Outout:
- For test:
filename_list : List of all filenames in the dataset
motion_list : List contains N dictoinaries, with
"hmd_position_global_full_gt_list" - sparse features of the 3 joints
"local_joint_parameters_gt_list" - body parameters Nx7[tx,ty,tz,rx,ry,rz] as the input of the human kinematic model
"head_global_trans_list" - Tx4x4 matrix which contains the global rotation and global translation of the head movement
mean : mean of train dataset
std : std of train dataset
- For train:
new_motions : motions indicates the sequences of rotation representation of each joint
new_sparses : sparses indicates the sequences of sparse features of the 3 joints
mean : mean of train dataset
std : std of train dataset
"""
if split == "test":
motion_list = get_path(dataset_path, split)
mean_path, std_path = get_mean_std_path(dataset)
filename_list = [
"-".join([i.split("/")[-3], i.split("/")[-1]]).split(".")[0]
for i in motion_list
]
motion_list = [torch.load(i) for i in tqdm(motion_list)]
mean = torch.load(os.path.join(dataset_path, mean_path))
std = torch.load(os.path.join(dataset_path, std_path))
return filename_list, motion_list, mean, std
assert split == "train"
assert (
"input_motion_length" in kwargs
), "Please specify the input_motion_length to load training dataset"
motion_list = get_path(dataset_path, split)
mean_path, std_path = get_mean_std_path(dataset)
input_motion_length = kwargs["input_motion_length"]
motion_list = [torch.load(i) for i in tqdm(motion_list)]
motions, sparses = get_motion(motion_list)
new_motions = []
new_sparses = []
for idx, motion in enumerate(motions):
if motion.shape[0] < input_motion_length: # Arbitrary choice
continue
new_sparses.append(sparses[idx])
new_motions.append(motions[idx])
if os.path.exists(os.path.join(dataset_path, mean_path)):
mean = torch.load(os.path.join(dataset_path, mean_path))
std = torch.load(os.path.join(dataset_path, std_path))
else:
tmp_data_list = torch.cat(new_motions, dim=0)
mean = tmp_data_list.mean(axis=0).float()
std = tmp_data_list.std(axis=0).float()
with open(os.path.join(dataset_path, mean_path), "wb") as f:
torch.save(mean, f)
with open(os.path.join(dataset_path, std_path), "wb") as f:
torch.save(std, f)
return new_motions, new_sparses, mean, std
def get_dataloader(
dataset,
split,
batch_size,
num_workers=32,
):
if split == "train":
shuffle = True
drop_last = True
num_workers = num_workers
else:
shuffle = False
drop_last = False
num_workers = 1
loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
drop_last=drop_last,
persistent_workers=False,
)
return loader
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from setuptools import setup, find_packages
with open('README.md', 'r') as f:
long_description = f.read()
with open('requirements.txt', 'r') as f:
requirements = [line.strip() for line in f]
setup(
name='access',
version='0.2',
description='Controllable Sentence Simplification',
long_description=long_description,
long_description_content_type='text/markdown',
author='Louis Martin <[email protected]>',
url='https://github.com/facebookreasearch/access',
packages=find_packages(exclude=['resources']),
install_requires=requirements,
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import wraps
import multiprocessing
import random
import re
from joblib import Parallel, delayed
import torch
from access.text import to_words
from access.utils.helpers import (open_files, yield_lines, yield_lines_in_parallel, get_temp_filepath, delete_files,
get_temp_filepaths)
def apply_line_method_to_file(line_method, input_filepath):
output_filepath = get_temp_filepath()
with open(input_filepath, 'r') as input_file, open(output_filepath, 'w') as output_file:
for line in input_file:
transformed_line = line_method(line.rstrip('\n'))
if transformed_line is not None:
output_file.write(transformed_line + '\n')
return output_filepath
def replace_lrb_rrb(text):
text = re.sub(r'-lrb-', '(', text, flags=re.IGNORECASE)
text = re.sub(r'-rrb-', ')', text, flags=re.IGNORECASE)
text = re.sub(r'-lsb-', '[', text, flags=re.IGNORECASE)
text = re.sub(r'-rsb-', ']', text, flags=re.IGNORECASE)
text = re.sub(r'-lcb-', '{', text, flags=re.IGNORECASE)
text = re.sub(r'-rcb-', '}', text, flags=re.IGNORECASE)
return text
def replace_lrb_rrb_file(filepath):
return apply_line_method_to_file(replace_lrb_rrb, filepath)
def to_lrb_rrb(text):
# TODO: Very basic
text = re.sub(r'((^| ))\( ', r'\1-lrb- ', text)
text = re.sub(r' \)((^| ))', r' -rrb-\1', text)
return text
def replace_back_quotes(text):
return text.replace('`', "'")
def replace_double_quotes(text):
return text.replace("''", '"')
def normalize_quotes(text):
return replace_double_quotes(replace_back_quotes(text))
def to_lrb_rrb_file(input_filepath):
return apply_line_method_to_file(to_lrb_rrb, input_filepath)
def lowercase_file(filepath):
return apply_line_method_to_file(lambda line: line.lower(), filepath)
def concatenate_files(input_filepaths, output_filepath):
with open(output_filepath, 'w') as output_f:
for input_file in input_filepaths:
with open(input_file, 'r') as input_f:
for line in input_f:
output_f.write(line)
def split_file(input_filepath, output_filepaths, round_robin=False):
if not round_robin:
raise NotImplementedError('Splitting files is only implemented as round robin.')
with open_files(output_filepaths, 'w') as files:
# We write each line to a different file in a round robin fashion
for i, line in enumerate(yield_lines(input_filepath)):
files[i % len(output_filepaths)].write(line + '\n')
def merge_files(input_filepaths, output_filepath, round_robin=False):
if not round_robin:
return concatenate_files(input_filepaths, output_filepath)
with open(output_filepath, 'w') as f:
for lines in yield_lines_in_parallel(input_filepaths, strict=False):
for line in lines:
if line is None:
return
f.write(line + '\n')
def get_real_n_jobs(n_jobs):
n_cpus = multiprocessing.cpu_count()
if n_jobs < 0:
# Adopt same logic as joblib
n_jobs = n_cpus + 1 + n_jobs
if n_jobs > n_cpus:
print('Setting n_jobs={n_jobs} > n_cpus={n_cpus}, setting n_jobs={n_cpus}')
n_jobs = n_cpus
assert 0 < n_jobs <= n_cpus
return n_jobs
def get_parallel_file_pair_preprocessor(file_pair_preprocessor, n_jobs):
if n_jobs == 1:
return file_pair_preprocessor
n_jobs = get_real_n_jobs(n_jobs)
@wraps(file_pair_preprocessor)
def parallel_file_pair_preprocessor(complex_filepath, simple_filepath, output_complex_filepath,
output_simple_filepath):
temp_complex_filepaths = get_temp_filepaths(n_jobs)
temp_simple_filepaths = get_temp_filepaths(n_jobs)
split_file(complex_filepath, temp_complex_filepaths, round_robin=True)
split_file(simple_filepath, temp_simple_filepaths, round_robin=True)
preprocessed_temp_complex_filepaths = get_temp_filepaths(n_jobs)
preprocessed_temp_simple_filepaths = get_temp_filepaths(n_jobs)
tasks = [
delayed(file_pair_preprocessor)(*paths)
for paths in zip(temp_complex_filepaths, temp_simple_filepaths, preprocessed_temp_complex_filepaths,
preprocessed_temp_simple_filepaths)
]
Parallel(n_jobs=n_jobs)(tasks)
merge_files(preprocessed_temp_complex_filepaths, output_complex_filepath, round_robin=True)
merge_files(preprocessed_temp_simple_filepaths, output_simple_filepath, round_robin=True)
delete_files(temp_complex_filepaths)
delete_files(temp_simple_filepaths)
delete_files(preprocessed_temp_complex_filepaths)
delete_files(preprocessed_temp_simple_filepaths)
return parallel_file_pair_preprocessor
def word_shuffle(words, max_swap=3):
noise = torch.rand(len(words)).mul_(max_swap)
permutation = torch.arange(len(words)).float().add_(noise).sort()[1]
return [words[i] for i in permutation]
def word_dropout(words, dropout_prob=0.1):
keep = torch.rand(len(words))
dropped_out_words = [word for i, word in enumerate(words) if keep[i] > dropout_prob]
if len(dropped_out_words) == 0:
return [words[random.randint(0, len(words) - 1)]]
return dropped_out_words
def word_blank(words, blank_prob=0.1):
keep = torch.rand(len(words))
return [word if keep[i] > blank_prob else '<BLANK>' for i, word in enumerate(words)]
def add_noise(sentence):
words = to_words(sentence)
words = word_shuffle(words, max_swap=3)
words = word_dropout(words, dropout_prob=0.1)
words = word_blank(words, blank_prob=0.1)
return ' '.join(words)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import wraps
from pathlib import Path
import shutil
import tempfile
from imohash import hashfile
from access.fairseq.base import fairseq_generate
from access.preprocessors import ComposedPreprocessor, load_preprocessors
from access.utils.helpers import count_lines
def memoize_simplifier(simplifier):
memo = {}
@wraps(simplifier)
def wrapped(complex_filepath, pred_filepath):
complex_filehash = hashfile(complex_filepath, hexdigest=True)
previous_pred_filepath = memo.get(complex_filehash)
if previous_pred_filepath is not None and Path(previous_pred_filepath).exists():
assert count_lines(complex_filepath) == count_lines(previous_pred_filepath)
# Reuse previous prediction
shutil.copyfile(previous_pred_filepath, pred_filepath)
else:
simplifier(complex_filepath, pred_filepath)
# Save prediction
memo[complex_filehash] = pred_filepath
return wrapped
def get_fairseq_simplifier(exp_dir, reload_preprocessors=False, **kwargs):
'''Method factory'''
@memoize_simplifier
def fairseq_simplifier(complex_filepath, output_pred_filepath):
# Trailing spaces for markdown formatting
print('simplifier_type="fairseq_simplifier" ')
print(f'exp_dir="{exp_dir}" ')
fairseq_generate(complex_filepath, output_pred_filepath, exp_dir, **kwargs)
preprocessors = None
if reload_preprocessors:
preprocessors = load_preprocessors(exp_dir)
if preprocessors is not None:
fairseq_simplifier = get_preprocessed_simplifier(fairseq_simplifier, preprocessors)
return fairseq_simplifier
def get_preprocessed_simplifier(simplifier, preprocessors):
composed_preprocessor = ComposedPreprocessor(preprocessors)
@memoize_simplifier
@wraps(simplifier)
def preprocessed_simplifier(complex_filepath, output_pred_filepath):
print(f'preprocessors={preprocessors}')
preprocessed_complex_filepath = tempfile.mkstemp()[1]
composed_preprocessor.encode_file(complex_filepath, preprocessed_complex_filepath)
preprocessed_output_pred_filepath = tempfile.mkstemp()[1]
simplifier(preprocessed_complex_filepath, preprocessed_output_pred_filepath)
composed_preprocessor.decode_file(preprocessed_output_pred_filepath,
output_pred_filepath,
encoder_filepath=complex_filepath)
preprocessed_simplifier.__name__ = f'{preprocessed_simplifier.__name__}_{composed_preprocessor.get_suffix()}'
return preprocessed_simplifier
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import lru_cache
import Levenshtein
import numpy as np
from access.resources.paths import FASTTEXT_EMBEDDINGS_PATH
from access.resources.prepare import prepare_fasttext_embeddings
from access.text import (to_words, remove_punctuation_tokens, remove_stopwords, spacy_process)
from access.utils.helpers import yield_lines
@lru_cache(maxsize=1)
def get_word2rank(vocab_size=np.inf):
prepare_fasttext_embeddings()
# TODO: Decrease vocab size or load from smaller file
word2rank = {}
line_generator = yield_lines(FASTTEXT_EMBEDDINGS_PATH)
next(line_generator) # Skip the first line (header)
for i, line in enumerate(line_generator):
if (i + 1) > vocab_size:
break
word = line.split(' ')[0]
word2rank[word] = i
return word2rank
def get_rank(word):
return get_word2rank().get(word, len(get_word2rank()))
def get_log_rank(word):
return np.log(1 + get_rank(word))
def get_lexical_complexity_score(sentence):
words = to_words(remove_stopwords(remove_punctuation_tokens(sentence)))
words = [word for word in words if word in get_word2rank()]
if len(words) == 0:
return np.log(1 + len(get_word2rank())) # TODO: This is completely arbitrary
return np.quantile([get_log_rank(word) for word in words], 0.75)
def get_levenshtein_similarity(complex_sentence, simple_sentence):
return Levenshtein.ratio(complex_sentence, simple_sentence)
def get_dependency_tree_depth(sentence):
def get_subtree_depth(node):
if len(list(node.children)) == 0:
return 0
return 1 + max([get_subtree_depth(child) for child in node.children])
tree_depths = [get_subtree_depth(spacy_sentence.root) for spacy_sentence in spacy_process(sentence).sents]
if len(tree_depths) == 0:
return 0
return max(tree_depths)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from abc import ABC
from functools import wraps, lru_cache
import hashlib
from pathlib import Path
import dill as pickle
import re
import shutil
from nevergrad.instrumentation import var
import numpy as np
import sentencepiece as spm
from access.feature_extraction import (get_lexical_complexity_score, get_levenshtein_similarity,
get_dependency_tree_depth)
from access.resources.paths import VARIOUS_DIR, get_data_filepath
from access.utils.helpers import (write_lines_in_parallel, yield_lines_in_parallel, add_dicts, get_default_args,
get_temp_filepath, safe_division, count_lines)
SPECIAL_TOKEN_REGEX = r'<[a-zA-Z\-_\d\.]+>'
PREPROCESSORS_REGISTRY = {}
def get_preprocessor_by_name(preprocessor_name):
return PREPROCESSORS_REGISTRY[preprocessor_name]
def get_preprocessors(preprocessor_kwargs):
preprocessors = []
for preprocessor_name, kwargs in preprocessor_kwargs.items():
preprocessors.append(get_preprocessor_by_name(preprocessor_name)(**kwargs))
return preprocessors
def extract_special_tokens(sentence):
'''Remove any number of token at the beginning of the sentence'''
match = re.match(fr'(^(?:{SPECIAL_TOKEN_REGEX} *)+) *(.*)$', sentence)
if match is None:
return '', sentence
special_tokens, sentence = match.groups()
return special_tokens.strip(), sentence
def remove_special_tokens(sentence):
return extract_special_tokens(sentence)[1]
def store_args(constructor):
@wraps(constructor)
def wrapped(self, *args, **kwargs):
if not hasattr(self, 'args') or not hasattr(self, 'kwargs'):
# TODO: Default args are not overwritten if provided as args
self.args = args
self.kwargs = add_dicts(get_default_args(constructor), kwargs)
return constructor(self, *args, **kwargs)
return wrapped
def dump_preprocessors(preprocessors, dir_path):
with open(Path(dir_path) / 'preprocessors.pickle', 'wb') as f:
pickle.dump(preprocessors, f)
def load_preprocessors(dir_path):
path = Path(dir_path) / 'preprocessors.pickle'
if not path.exists():
return None
with open(path, 'rb') as f:
return pickle.load(f)
class AbstractPreprocessor(ABC):
def __init_subclass__(cls, **kwargs):
'''Register all children in registry'''
super().__init_subclass__(**kwargs)
PREPROCESSORS_REGISTRY[cls.__name__] = cls
def __repr__(self):
args = getattr(self, 'args', ())
kwargs = getattr(self, 'kwargs', {})
args_repr = [repr(arg) for arg in args]
kwargs_repr = [f'{k}={repr(v)}' for k, v in sorted(kwargs.items(), key=lambda kv: kv[0])]
args_kwargs_str = ', '.join(args_repr + kwargs_repr)
return f'{self.__class__.__name__}({args_kwargs_str})'
def get_hash_string(self):
return self.__class__.__name__
def get_hash(self):
return hashlib.md5(self.get_hash_string().encode()).hexdigest()
def get_nevergrad_variables(self):
return {}
@property
def prefix(self):
return self.__class__.__name__.replace('Preprocessor', '')
def fit(self, complex_filepath, simple_filepath):
pass
def encode_sentence(self, sentence, encoder_sentence=None):
raise NotImplementedError
def decode_sentence(self, sentence, encoder_sentence=None):
raise NotImplementedError
def encode_sentence_pair(self, complex_sentence, simple_sentence):
if complex_sentence is not None:
complex_sentence = self.encode_sentence(complex_sentence)
if simple_sentence is not None:
simple_sentence = self.encode_sentence(simple_sentence)
return complex_sentence, simple_sentence
def encode_file(self, input_filepath, output_filepath, encoder_filepath=None):
if encoder_filepath is None:
# We will use an empty temporary file which will yield None for each line
encoder_filepath = get_temp_filepath(create=True)
with open(output_filepath, 'w') as f:
for input_line, encoder_line in yield_lines_in_parallel([input_filepath, encoder_filepath], strict=False):
f.write(self.encode_sentence(input_line, encoder_line) + '\n')
def decode_file(self, input_filepath, output_filepath, encoder_filepath=None):
if encoder_filepath is None:
# We will use an empty temporary file which will yield None for each line
encoder_filepath = get_temp_filepath(create=True)
with open(output_filepath, 'w') as f:
for encoder_sentence, input_sentence in yield_lines_in_parallel([encoder_filepath, input_filepath],
strict=False):
decoded_sentence = self.decode_sentence(input_sentence, encoder_sentence=encoder_sentence)
f.write(decoded_sentence + '\n')
def encode_file_pair(self, complex_filepath, simple_filepath, output_complex_filepath, output_simple_filepath):
'''Jointly encode a complex file and a simple file (can be aligned or not)'''
with write_lines_in_parallel([output_complex_filepath, output_simple_filepath], strict=False) as output_files:
for complex_line, simple_line in yield_lines_in_parallel([complex_filepath, simple_filepath], strict=False):
output_files.write(self.encode_sentence_pair(complex_line, simple_line))
class ComposedPreprocessor(AbstractPreprocessor):
@store_args
def __init__(self, preprocessors, sort=False):
if preprocessors is None:
preprocessors = []
if sort:
# Make sure preprocessors are always in the same order
preprocessors = sorted(preprocessors, key=lambda preprocessor: preprocessor.__class__.__name__)
self.preprocessors = preprocessors
def get_hash_string(self):
preprocessors_hash_strings = [preprocessor.get_hash_string() for preprocessor in self.preprocessors]
return f'ComposedPreprocessor(preprocessors={preprocessors_hash_strings})'
def get_suffix(self):
return '_'.join([p.prefix.lower() for p in self.preprocessors])
def fit(self, complex_filepath, simple_filepath):
for preprocessor in self.preprocessors:
pass
def encode_sentence(self, sentence, encoder_sentence=None):
for preprocessor in self.preprocessors:
sentence = preprocessor.encode_sentence(sentence, encoder_sentence)
return sentence
def decode_sentence(self, sentence, encoder_sentence=None):
for preprocessor in self.preprocessors:
sentence = preprocessor.decode_sentence(sentence, encoder_sentence)
return sentence
def encode_file(self, input_filepath, output_filepath, encoder_filepath=None):
for preprocessor in self.preprocessors:
intermediary_output_filepath = get_temp_filepath()
preprocessor.encode_file(input_filepath, intermediary_output_filepath, encoder_filepath)
input_filepath = intermediary_output_filepath
shutil.copyfile(input_filepath, output_filepath)
def decode_file(self, input_filepath, output_filepath, encoder_filepath=None):
for preprocessor in self.preprocessors:
intermediary_output_filepath = get_temp_filepath()
preprocessor.decode_file(input_filepath, intermediary_output_filepath, encoder_filepath)
input_filepath = intermediary_output_filepath
shutil.copyfile(input_filepath, output_filepath)
def encode_file_pair(self, complex_filepath, simple_filepath, output_complex_filepath, output_simple_filepath):
for preprocessor in self.preprocessors:
intermediary_output_complex_filepath = get_temp_filepath()
intermediary_output_simple_filepath = get_temp_filepath()
preprocessor.encode_file_pair(complex_filepath, simple_filepath, intermediary_output_complex_filepath,
intermediary_output_simple_filepath)
complex_filepath = intermediary_output_complex_filepath
simple_filepath = intermediary_output_simple_filepath
shutil.copyfile(complex_filepath, output_complex_filepath)
shutil.copyfile(simple_filepath, output_simple_filepath)
def encode_sentence_pair(self, complex_sentence, simple_sentence):
for preprocessor in self.preprocessors:
complex_sentence, simple_sentence = preprocessor.encode_sentence_pair(complex_sentence, simple_sentence)
return complex_sentence, simple_sentence
class FeaturePreprocessor(AbstractPreprocessor):
'''Prepend a computed feature at the beginning of the sentence'''
@store_args
def __init__(self, feature_name, get_feature_value, get_target_feature_value, bucket_size=0.05, noise_std=0):
self.get_feature_value = get_feature_value
self.get_target_feature_value = get_target_feature_value
self.bucket_size = bucket_size
self.noise_std = noise_std
self.feature_name = feature_name.upper()
def get_hash_string(self):
return (f'{self.__class__.__name__}(feature_name={repr(self.feature_name)}, bucket_size={self.bucket_size},'
f'noise_std={self.noise_std})')
def bucketize(self, value):
'''Round value to bucket_size to reduce the number of different values'''
return round(round(value / self.bucket_size) * self.bucket_size, 10)
def add_noise(self, value):
return value + np.random.normal(0, self.noise_std)
def get_feature_token(self, feature_value):
return f'<{self.feature_name}_{feature_value}>'
def encode_sentence(self, sentence, encoder_sentence=None):
desired_feature = self.bucketize(self.get_target_feature_value(remove_special_tokens(sentence)))
return f'{self.get_feature_token(desired_feature)} {sentence}'
def decode_sentence(self, sentence, encoder_sentence=None):
return sentence
def encode_sentence_pair(self, complex_sentence, simple_sentence):
feature = self.bucketize(
self.add_noise(
self.get_feature_value(remove_special_tokens(complex_sentence),
remove_special_tokens(simple_sentence))))
return f'{self.get_feature_token(feature)} {complex_sentence}', simple_sentence
class LevenshteinPreprocessor(FeaturePreprocessor):
@store_args
def __init__(self, target_ratio=0.8, bucket_size=0.05, noise_std=0):
self.target_ratio = target_ratio
super().__init__(self.prefix.upper(), self.get_feature_value, self.get_target_feature_value, bucket_size,
noise_std)
def get_nevergrad_variables(self):
return {'target_ratio': var.OrderedDiscrete(np.arange(0.4, 1 + 1e-6, self.bucket_size))}
def get_feature_value(self, complex_sentence, simple_sentence):
return get_levenshtein_similarity(complex_sentence, simple_sentence)
def get_target_feature_value(self, complex_sentence):
return self.target_ratio
class RatioPreprocessor(FeaturePreprocessor):
@store_args
def __init__(self, feature_extractor, target_ratio=0.8, bucket_size=0.05, noise_std=0):
self.feature_extractor = feature_extractor
self.target_ratio = target_ratio
super().__init__(self.prefix.upper(), self.get_feature_value, self.get_target_feature_value, bucket_size,
noise_std)
def get_nevergrad_variables(self):
return {'target_ratio': var.OrderedDiscrete(np.arange(0.4, 1.4 + 1e-6, self.bucket_size))}
def get_feature_value(self, complex_sentence, simple_sentence):
return min(safe_division(self.feature_extractor(simple_sentence), self.feature_extractor(complex_sentence)), 2)
def get_target_feature_value(self, complex_sentence):
return self.target_ratio
class LengthRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(len, *args, **kwargs)
class WordRankRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(get_lexical_complexity_score, *args, **kwargs)
class DependencyTreeDepthRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(get_dependency_tree_depth, *args, **kwargs)
class SentencePiecePreprocessor(AbstractPreprocessor):
@store_args
def __init__(self, vocab_size=10000, input_filepaths=None):
self.vocab_size = vocab_size
self.sentencepiece_model_path = VARIOUS_DIR / f'sentencepiece_model/sentencepiece_model_{self.vocab_size}.model'
self.input_filepaths = input_filepaths
if self.input_filepaths is None:
self.input_filepaths = [
get_data_filepath('wikilarge', 'train', 'complex'),
get_data_filepath('wikilarge', 'train', 'simple')
]
self.learn_sentencepiece()
@property
@lru_cache(maxsize=1)
def sp(self):
'''
We need to use a property because SentencenPieceProcessor is cannot pickled
> pickle.dumps(spm.SentencePieceProcessor())
----> TypeError: can't pickle SwigPyObject objects
'''
sp = spm.SentencePieceProcessor()
sp.Load(str(self.sentencepiece_model_path))
return sp
def get_hash_string(self):
return f'{self.__class__.__name__}(vocab_size={self.vocab_size})'
def learn_sentencepiece(self):
if self.sentencepiece_model_path.exists():
return
self.sentencepiece_model_path.parent.mkdir(parents=True, exist_ok=True)
sentencepiece_model_prefix = self.sentencepiece_model_path.parent / self.sentencepiece_model_path.stem
args_str = ' '.join([
f'--input={",".join([str(path) for path in self.input_filepaths])}',
f'--model_prefix={sentencepiece_model_prefix}',
f'--vocab_size={self.vocab_size}',
])
max_lines = 10**6
if sum([count_lines(filepath) for filepath in self.input_filepaths]) > max_lines:
args_str += f' --input_sentence_size={max_lines} --shuffle_input_sentence=true'
spm.SentencePieceTrainer.Train(args_str)
def fit(self, complex_filepath, simple_filepath):
# Args are not used
self.learn_sentencepiece()
def encode_sentence(self, sentence, encoder_sentence=None):
# TODO: Do we really need to extract the tokens
special_tokens, sentence = extract_special_tokens(sentence)
encoded_sentence = ' '.join(self.sp.EncodeAsPieces(sentence))
if special_tokens != '':
encoded_sentence = f'{special_tokens} {encoded_sentence}'
return encoded_sentence
def decode_sentence(self, sentence, encoder_sentence=None):
return self.sp.DecodePieces(sentence.split(' '))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import lru_cache
import re
from string import punctuation
from nltk.tokenize.nist import NISTTokenizer
from nltk.corpus import stopwords as nltk_stopwords
import spacy
# TODO: #language_specific
stopwords = set(nltk_stopwords.words('english'))
@lru_cache(maxsize=100) # To speed up subsequent calls
def word_tokenize(sentence):
tokenizer = NISTTokenizer()
sentence = ' '.join(tokenizer.tokenize(sentence))
# Rejoin special tokens that where tokenized by error: e.g. "<PERSON_1>" -> "< PERSON _ 1 >"
for match in re.finditer(r'< (?:[A-Z]+ _ )+\d+ >', sentence):
sentence = sentence.replace(match.group(), ''.join(match.group().split()))
return sentence
def to_words(sentence):
return sentence.split()
def remove_punctuation_characters(text):
return ''.join([char for char in text if char not in punctuation])
@lru_cache(maxsize=1000)
def is_punctuation(word):
return remove_punctuation_characters(word) == ''
@lru_cache(maxsize=100)
def remove_punctuation_tokens(text):
return ' '.join([w for w in to_words(text) if not is_punctuation(w)])
def remove_stopwords(text):
return ' '.join([w for w in to_words(text) if w.lower() not in stopwords])
@lru_cache(maxsize=1)
def get_spacy_model():
model = 'en_core_web_sm'
if not spacy.util.is_package(model):
spacy.cli.download(model)
spacy.cli.link(model, model, force=True, model_path=spacy.util.get_package_path(model))
return spacy.load(model) # python -m spacy download en_core_web_sm`
@lru_cache(maxsize=10**6)
def spacy_process(text):
return get_spacy_model()(str(text))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from itertools import product
from pathlib import Path
REPO_DIR = Path(__file__).resolve().parent.parent.parent
EXP_DIR = REPO_DIR / 'experiments'
RESOURCES_DIR = REPO_DIR / 'resources'
DATASETS_DIR = RESOURCES_DIR / 'datasets'
VARIOUS_DIR = RESOURCES_DIR / 'various'
FASTTEXT_EMBEDDINGS_PATH = VARIOUS_DIR / 'fasttext-vectors/wiki.en.vec'
MODELS_DIR = RESOURCES_DIR / 'models'
BEST_MODEL_DIR = MODELS_DIR / 'best_model'
LANGUAGES = ['complex', 'simple']
PHASES = ['train', 'valid', 'test']
def get_dataset_dir(dataset):
return DATASETS_DIR / dataset
def get_data_filepath(dataset, phase, language, i=None):
suffix = '' # Create suffix e.g. for multiple references
if i is not None:
suffix = f'.{i}'
filename = f'{dataset}.{phase}.{language}{suffix}'
return get_dataset_dir(dataset) / filename
def get_filepaths_dict(dataset):
return {(phase, language): get_data_filepath(dataset, phase, language)
for phase, language in product(PHASES, LANGUAGES)}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.