text
stringlengths 0
93.6k
|
---|
tot_ep += 1
|
print(f"Task success: {task_success} (wandb_step: {wandb.run.step}). Current successes: {sum(successes)}/{len(successes)}")
|
scene_logs = calculcate_metric_means({scene_id: episode_infos})
|
wandb.log({f"{scene_id}_{k}": v for k, v in scene_logs[scene_id].items()})
|
high_level_env.close()
|
return episode_infos, tot_ep
|
def main():
|
np.set_printoptions(precision=3, suppress=True)
|
config_file = get_config("moma_llm.yaml")
|
# NOTE: igibson will reload the config file, so changes here won't be relfected! Just for wandb logging
|
cfg = parse_config(config_file)
|
if cfg["seed"] > 0:
|
np.random.seed(cfg["seed"])
|
if cfg["datasplit"] == "train":
|
scene_ids = TRAINING_SCENES
|
elif cfg["datasplit"] == "test":
|
scene_ids = TEST_SCENES
|
else:
|
raise ValueError(f"Unknown datasplit {cfg['datasplit']}")
|
cfg.update({"scene_ids": scene_ids, "agent": cfg["agent"]})
|
wandb.init(project="[scene-llm]",
|
entity="robot-learning-lab",
|
config=cfg,
|
mode="online" if cfg["wandb"] else "disabled",
|
#name=f"{agent}"
|
)
|
# copy config file to wandb run dir, so modifications to the main config file won't affect current runs
|
new_config_file = Path(wandb.run.dir) / Path(config_file).name
|
shutil.copy(config_file, new_config_file)
|
config_file = str(new_config_file)
|
episode_infos = defaultdict(list)
|
tot_ep = 0
|
if isinstance(scene_ids, str):
|
scene_ids = [scene_ids]
|
for scene_id in scene_ids:
|
infos, tot_ep = evaluate_scene(config_file=config_file, cfg=cfg, scene_id=scene_id, tot_ep=tot_ep)
|
episode_infos[scene_id] = infos
|
log_summary_table(episode_infos=episode_infos)
|
plot_efficiency_curves(episode_infos=episode_infos, max_hl_steps=cfg["max_high_level_steps"])
|
wandb.run.finish()
|
print("Done!")
|
if __name__ == "__main__":
|
main()
|
# <FILESEP>
|
from pydantic import BaseModel, Field
|
from typing import List
|
class VideoSegment(BaseModel):
|
path: str = Field(description="Path to the video segment")
|
start: float = Field(description="Start time of the video segment")
|
end: float = Field(description="End time of the video segment")
|
start_frame: int = Field(default=None, description="Start frame of the video segment")
|
end_frame: int = Field(default=None, description="End frame of the video segment")
|
def dimensions(self):
|
import cv2
|
cap = cv2.VideoCapture(self.path)
|
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
|
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
|
return width, height
|
def duration(self):
|
return self.end - self.start
|
def fps(self):
|
import cv2
|
cap = cv2.VideoCapture(self.path)
|
fps = cap.get(cv2.CAP_PROP_FPS)
|
return fps
|
class Box(BaseModel):
|
class_id: int = Field(description="Class ID of the subject")
|
confidence: float = Field(description="Confidence of the subject")
|
x1: int = Field(description="X1 coordinate of the bounding box")
|
y1: int = Field(description="Y1 coordinate of the bounding box")
|
x2: int = Field(description="X2 coordinate of the bounding box")
|
y2: int = Field(description="Y2 coordinate of the bounding box")
|
id: int = Field(default=None, description="ID of the subject")
|
metadata: dict = Field(default=None, description="Metadata of the subject")
|
def area(self):
|
return (self.x2 - self.x1) * (self.y2 - self.y1)
|
def center(self):
|
return (self.x1 + self.x2) / 2, (self.y1 + self.y2) / 2
|
def width(self):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.