Fix a number of tests
This commit is contained in:
parent
1dfb2cd6fb
commit
1f5b918b10
|
@ -95,15 +95,17 @@ def main(random_selection=False, headless=False, short_exec=False):
|
||||||
orientation=T.euler2quat([-math.pi / 2, 0, 0]),
|
orientation=T.euler2quat([-math.pi / 2, 0, 0]),
|
||||||
)
|
)
|
||||||
|
|
||||||
input("The knife will fall on the apple and dice it. Press [ENTER] to continue.")
|
if short_exec == False:
|
||||||
|
input("The knife will fall on the apple and dice it. Press [ENTER] to continue.")
|
||||||
|
|
||||||
# Step simulation for a bit so that apple is diced
|
# Step simulation for a bit so that apple is diced
|
||||||
for i in range(1000):
|
for _ in range(1000):
|
||||||
env.step(th.empty(0))
|
env.step(th.empty(0))
|
||||||
|
|
||||||
input("Apple has been diced! Press [ENTER] to terminate the demo.")
|
if short_exec == False:
|
||||||
|
input("Apple has been diced! Press [ENTER] to terminate the demo.")
|
||||||
|
|
||||||
# Always close environment at the end
|
# Always close simulator at the end
|
||||||
env.close()
|
env.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ from omnigibson.macros import gm
|
||||||
gm.ENABLE_OBJECT_STATES = True
|
gm.ENABLE_OBJECT_STATES = True
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main(random_selection=False, headless=False, short_exec=False):
|
||||||
# Create the scene config to load -- empty scene with a stove object added
|
# Create the scene config to load -- empty scene with a stove object added
|
||||||
cfg = {
|
cfg = {
|
||||||
"scene": {
|
"scene": {
|
||||||
|
@ -56,7 +56,8 @@ def main():
|
||||||
assert not heat_source_state
|
assert not heat_source_state
|
||||||
|
|
||||||
# Toggle on stove, notify user
|
# Toggle on stove, notify user
|
||||||
input("Heat source will now turn ON: Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("Heat source will now turn ON: Press ENTER to continue.")
|
||||||
stove.states[object_states.ToggledOn].set_value(True)
|
stove.states[object_states.ToggledOn].set_value(True)
|
||||||
|
|
||||||
assert stove.states[object_states.ToggledOn].get_value()
|
assert stove.states[object_states.ToggledOn].get_value()
|
||||||
|
@ -71,20 +72,23 @@ def main():
|
||||||
env.step(th.empty(0))
|
env.step(th.empty(0))
|
||||||
|
|
||||||
# Toggle off stove, notify user
|
# Toggle off stove, notify user
|
||||||
input("Heat source will now turn OFF: Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("Heat source will now turn OFF: Press ENTER to continue.")
|
||||||
stove.states[object_states.ToggledOn].set_value(False)
|
stove.states[object_states.ToggledOn].set_value(False)
|
||||||
assert not stove.states[object_states.ToggledOn].get_value()
|
assert not stove.states[object_states.ToggledOn].get_value()
|
||||||
for _ in range(200):
|
for _ in range(200):
|
||||||
env.step(th.empty(0))
|
env.step(th.empty(0))
|
||||||
|
|
||||||
# Move stove, notify user
|
# Move stove, notify user
|
||||||
input("Heat source is now moving: Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("Heat source is now moving: Press ENTER to continue.")
|
||||||
stove.set_position(th.tensor([0, 1.0, 0.61]))
|
stove.set_position(th.tensor([0, 1.0, 0.61]))
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
env.step(th.empty(0))
|
env.step(th.empty(0))
|
||||||
|
|
||||||
# Toggle on stove again, notify user
|
# Toggle on stove again, notify user
|
||||||
input("Heat source will now turn ON: Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("Heat source will now turn ON: Press ENTER to continue.")
|
||||||
stove.states[object_states.ToggledOn].set_value(True)
|
stove.states[object_states.ToggledOn].set_value(True)
|
||||||
assert stove.states[object_states.ToggledOn].get_value()
|
assert stove.states[object_states.ToggledOn].get_value()
|
||||||
for i in range(500):
|
for i in range(500):
|
||||||
|
|
|
@ -8,7 +8,7 @@ from omnigibson.macros import gm
|
||||||
gm.ENABLE_OBJECT_STATES = True
|
gm.ENABLE_OBJECT_STATES = True
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main(random_selection=False, headless=False, short_exec=False):
|
||||||
# Define object configurations for objects to load -- we want to load a light and three bowls
|
# Define object configurations for objects to load -- we want to load a light and three bowls
|
||||||
obj_configs = []
|
obj_configs = []
|
||||||
|
|
||||||
|
@ -70,8 +70,9 @@ def main():
|
||||||
print("==== Initial state ====")
|
print("==== Initial state ====")
|
||||||
report_states(objs)
|
report_states(objs)
|
||||||
|
|
||||||
# Notify user that we're about to heat the object
|
if not short_exec:
|
||||||
input("Objects will be heated, and steam will slowly rise. Press ENTER to continue.")
|
# Notify user that we're about to heat the object
|
||||||
|
input("Objects will be heated, and steam will slowly rise. Press ENTER to continue.")
|
||||||
|
|
||||||
# Heated.
|
# Heated.
|
||||||
for obj in objs:
|
for obj in objs:
|
||||||
|
@ -83,7 +84,8 @@ def main():
|
||||||
# After a while, objects will be below the Steam temperature threshold.
|
# After a while, objects will be below the Steam temperature threshold.
|
||||||
print("==== Objects are now heated... ====")
|
print("==== Objects are now heated... ====")
|
||||||
print()
|
print()
|
||||||
for _ in range(2000):
|
max_iterations = 2000 if not short_exec else 100
|
||||||
|
for _ in range(max_iterations):
|
||||||
env.step(th.empty(0))
|
env.step(th.empty(0))
|
||||||
# Also print temperatures
|
# Also print temperatures
|
||||||
temps = [f"{obj.states[object_states.Temperature].get_value():>7.2f}" for obj in objs]
|
temps = [f"{obj.states[object_states.Temperature].get_value():>7.2f}" for obj in objs]
|
||||||
|
@ -94,8 +96,10 @@ def main():
|
||||||
print("==== Objects are no longer heated... ====")
|
print("==== Objects are no longer heated... ====")
|
||||||
report_states(objs)
|
report_states(objs)
|
||||||
|
|
||||||
# Close environment at the end
|
if not short_exec:
|
||||||
input("Demo completed. Press ENTER to shutdown environment.")
|
# Close environment at the end
|
||||||
|
input("Demo completed. Press ENTER to shutdown environment.")
|
||||||
|
|
||||||
env.close()
|
env.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ gm.USE_GPU_DYNAMICS = True
|
||||||
gm.ENABLE_HQ_RENDERING = True
|
gm.ENABLE_HQ_RENDERING = True
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main(random_selection=False, headless=False, short_exec=False):
|
||||||
# Create the scene config to load -- empty scene plus a cabinet
|
# Create the scene config to load -- empty scene plus a cabinet
|
||||||
cfg = {
|
cfg = {
|
||||||
"scene": {
|
"scene": {
|
||||||
|
@ -87,38 +87,45 @@ def main():
|
||||||
report_states()
|
report_states()
|
||||||
|
|
||||||
# Notify user that we're about to freeze the object, and then freeze the object
|
# Notify user that we're about to freeze the object, and then freeze the object
|
||||||
input("\nObject will be frozen. Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("\nObject will be frozen. Press ENTER to continue.")
|
||||||
obj.states[object_states.Temperature].set_value(-50)
|
obj.states[object_states.Temperature].set_value(-50)
|
||||||
report_states()
|
report_states()
|
||||||
|
|
||||||
# Notify user that we're about to cook the object, and then cook the object
|
# Notify user that we're about to cook the object, and then cook the object
|
||||||
input("\nObject will be cooked. Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("\nObject will be cooked. Press ENTER to continue.")
|
||||||
obj.states[object_states.Temperature].set_value(100)
|
obj.states[object_states.Temperature].set_value(100)
|
||||||
report_states()
|
report_states()
|
||||||
|
|
||||||
# Notify user that we're about to burn the object, and then burn the object
|
# Notify user that we're about to burn the object, and then burn the object
|
||||||
input("\nObject will be burned. Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("\nObject will be burned. Press ENTER to continue.")
|
||||||
obj.states[object_states.Temperature].set_value(250)
|
obj.states[object_states.Temperature].set_value(250)
|
||||||
report_states()
|
report_states()
|
||||||
|
|
||||||
# Notify user that we're about to reset the object to its default state, and then reset state
|
# Notify user that we're about to reset the object to its default state, and then reset state
|
||||||
input("\nObject will be reset to default state. Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("\nObject will be reset to default state. Press ENTER to continue.")
|
||||||
obj.states[object_states.Temperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE)
|
obj.states[object_states.Temperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE)
|
||||||
obj.states[object_states.MaxTemperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE)
|
obj.states[object_states.MaxTemperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE)
|
||||||
report_states()
|
report_states()
|
||||||
|
|
||||||
# Notify user that we're about to soak the object, and then soak the object
|
# Notify user that we're about to soak the object, and then soak the object
|
||||||
input("\nObject will be saturated with water. Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("\nObject will be saturated with water. Press ENTER to continue.")
|
||||||
obj.states[object_states.Saturated].set_value(env.scene.get_system("water"), True)
|
obj.states[object_states.Saturated].set_value(env.scene.get_system("water"), True)
|
||||||
report_states()
|
report_states()
|
||||||
|
|
||||||
# Notify user that we're about to unsoak the object, and then unsoak the object
|
# Notify user that we're about to unsoak the object, and then unsoak the object
|
||||||
input("\nObject will be unsaturated with water. Press ENTER to continue.")
|
if not short_exec:
|
||||||
|
input("\nObject will be unsaturated with water. Press ENTER to continue.")
|
||||||
obj.states[object_states.Saturated].set_value(env.scene.get_system("water"), False)
|
obj.states[object_states.Saturated].set_value(env.scene.get_system("water"), False)
|
||||||
report_states()
|
report_states()
|
||||||
|
|
||||||
# Close environment at the end
|
# Close environment at the end
|
||||||
input("Demo completed. Press ENTER to shutdown environment.")
|
if not short_exec:
|
||||||
|
input("Demo completed. Press ENTER to shutdown environment.")
|
||||||
env.close()
|
env.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -68,6 +68,7 @@ def main(random_selection=False, headless=False, short_exec=False):
|
||||||
while steps != max_steps:
|
while steps != max_steps:
|
||||||
print(f"Overlaid {carpet.states[Overlaid].get_value(breakfast_table)} ", end="\r")
|
print(f"Overlaid {carpet.states[Overlaid].get_value(breakfast_table)} ", end="\r")
|
||||||
env.step(th.empty(0))
|
env.step(th.empty(0))
|
||||||
|
steps += 1
|
||||||
|
|
||||||
# Shut down env at the end
|
# Shut down env at the end
|
||||||
env.close()
|
env.close()
|
||||||
|
|
|
@ -76,7 +76,9 @@ def main(random_selection=False, headless=False, short_exec=False):
|
||||||
if "3d" not in bbox_modality:
|
if "3d" not in bbox_modality:
|
||||||
from omnigibson.utils.deprecated_utils import colorize_bboxes
|
from omnigibson.utils.deprecated_utils import colorize_bboxes
|
||||||
|
|
||||||
colorized_img = colorize_bboxes(bboxes_2d_data=obs[bbox_modality], bboxes_2d_rgb=obs["rgb"], num_channels=4)
|
colorized_img = colorize_bboxes(
|
||||||
|
bboxes_2d_data=obs[bbox_modality], bboxes_2d_rgb=obs["rgb"].cpu().numpy(), num_channels=4
|
||||||
|
)
|
||||||
fpath = f"{bbox_modality}_img.png"
|
fpath = f"{bbox_modality}_img.png"
|
||||||
plt.imsave(fpath, colorized_img)
|
plt.imsave(fpath, colorized_img)
|
||||||
og.log.info(f"Saving modality [{bbox_modality}] image to: {fpath}")
|
og.log.info(f"Saving modality [{bbox_modality}] image to: {fpath}")
|
||||||
|
|
|
@ -327,7 +327,7 @@ class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
|
||||||
}
|
}
|
||||||
material.enable_emission = True if enabled else self._highlight_cached_values[material]["enable_emission"]
|
material.enable_emission = True if enabled else self._highlight_cached_values[material]["enable_emission"]
|
||||||
material.emissive_color = (
|
material.emissive_color = (
|
||||||
m.HIGHLIGHT_RGB if enabled else self._highlight_cached_values[material]["emissive_color"]
|
m.HIGHLIGHT_RGB if enabled else self._highlight_cached_values[material]["emissive_color"].tolist()
|
||||||
)
|
)
|
||||||
material.emissive_intensity = (
|
material.emissive_intensity = (
|
||||||
m.HIGHLIGHT_INTENSITY if enabled else self._highlight_cached_values[material]["emissive_intensity"]
|
m.HIGHLIGHT_INTENSITY if enabled else self._highlight_cached_values[material]["emissive_intensity"]
|
||||||
|
|
|
@ -184,7 +184,11 @@ class PrimitiveObject(StatefulObject):
|
||||||
raise ValueError("Prim type must either be PrimType.RIGID or PrimType.CLOTH for loading a primitive object")
|
raise ValueError("Prim type must either be PrimType.RIGID or PrimType.CLOTH for loading a primitive object")
|
||||||
|
|
||||||
visual_geom_prim.color = self._load_config["color"]
|
visual_geom_prim.color = self._load_config["color"]
|
||||||
visual_geom_prim.opacity = self._load_config["opacity"].item()
|
visual_geom_prim.opacity = (
|
||||||
|
self._load_config["opacity"].item()
|
||||||
|
if isinstance(self._load_config["opacity"], th.Tensor)
|
||||||
|
else self._load_config["opacity"]
|
||||||
|
)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def radius(self):
|
def radius(self):
|
||||||
|
@ -326,10 +330,13 @@ class PrimitiveObject(StatefulObject):
|
||||||
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
|
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
|
||||||
# Scale all three axes by the scaling factor
|
# Scale all three axes by the scaling factor
|
||||||
vals = th.tensor(attr.Get()).double() * scaling_factor
|
vals = th.tensor(attr.Get()).double() * scaling_factor
|
||||||
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v) for v in vals]))
|
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v.tolist()) for v in vals]))
|
||||||
geom.GetExtentAttr().Set(
|
geom.GetExtentAttr().Set(
|
||||||
lazy.pxr.Vt.Vec3fArray(
|
lazy.pxr.Vt.Vec3fArray(
|
||||||
[lazy.pxr.Gf.Vec3f(*(-self._extents / 2.0)), lazy.pxr.Gf.Vec3f(*(self._extents / 2.0))]
|
[
|
||||||
|
lazy.pxr.Gf.Vec3f(*(-self._extents / 2.0).tolist()),
|
||||||
|
lazy.pxr.Gf.Vec3f(*(self._extents / 2.0).tolist()),
|
||||||
|
]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -294,7 +294,8 @@ class VisionSensor(BaseSensor):
|
||||||
# All segmentation modalities return uint32 warp arrays, but PyTorch doesn't support it
|
# All segmentation modalities return uint32 warp arrays, but PyTorch doesn't support it
|
||||||
if modality in ["seg_semantic", "seg_instance", "seg_instance_id"]:
|
if modality in ["seg_semantic", "seg_instance", "seg_instance_id"]:
|
||||||
obs[modality] = obs[modality].view(lazy.warp.int32)
|
obs[modality] = obs[modality].view(lazy.warp.int32)
|
||||||
obs[modality] = lazy.warp.to_torch(obs[modality])
|
if not modality in ["bbox_2d_tight", "bbox_2d_loose", "bbox_3d"]:
|
||||||
|
obs[modality] = lazy.warp.to_torch(obs[modality])
|
||||||
if modality == "seg_semantic":
|
if modality == "seg_semantic":
|
||||||
id_to_labels = raw_obs["info"]["idToLabels"]
|
id_to_labels = raw_obs["info"]["idToLabels"]
|
||||||
obs[modality], info[modality] = self._remap_semantic_segmentation(obs[modality], id_to_labels)
|
obs[modality], info[modality] = self._remap_semantic_segmentation(obs[modality], id_to_labels)
|
||||||
|
@ -341,7 +342,7 @@ class VisionSensor(BaseSensor):
|
||||||
replicator_mapping[key] = categories[0]
|
replicator_mapping[key] = categories[0]
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
replicator_mapping[key] in semantic_class_id_to_name(self._scene).values()
|
replicator_mapping[key] in semantic_class_id_to_name().values()
|
||||||
), f"Class {val['class']} does not exist in the semantic class name to id mapping!"
|
), f"Class {val['class']} does not exist in the semantic class name to id mapping!"
|
||||||
|
|
||||||
image_keys = th.unique(img)
|
image_keys = th.unique(img)
|
||||||
|
@ -349,9 +350,7 @@ class VisionSensor(BaseSensor):
|
||||||
set(replicator_mapping.keys())
|
set(replicator_mapping.keys())
|
||||||
), "Semantic segmentation image does not match the original id_to_labels mapping."
|
), "Semantic segmentation image does not match the original id_to_labels mapping."
|
||||||
|
|
||||||
return VisionSensor.SEMANTIC_REMAPPER.remap(
|
return VisionSensor.SEMANTIC_REMAPPER.remap(replicator_mapping, semantic_class_id_to_name(), img, image_keys)
|
||||||
replicator_mapping, semantic_class_id_to_name(self._scene), img, image_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
def _remap_instance_segmentation(self, img, id_to_labels, semantic_img, semantic_labels, id=False):
|
def _remap_instance_segmentation(self, img, id_to_labels, semantic_img, semantic_labels, id=False):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -163,6 +163,7 @@ def get_all_system_categories():
|
||||||
og_categories_path = os.path.join(og_dataset_path, "systems")
|
og_categories_path = os.path.join(og_dataset_path, "systems")
|
||||||
|
|
||||||
categories = [f for f in os.listdir(og_categories_path) if not is_dot_file(f)]
|
categories = [f for f in os.listdir(og_categories_path) if not is_dot_file(f)]
|
||||||
|
categories.append("cloth")
|
||||||
return sorted(categories)
|
return sorted(categories)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -11,7 +11,7 @@ import torch as th
|
||||||
|
|
||||||
import omnigibson as og
|
import omnigibson as og
|
||||||
from omnigibson.macros import gm
|
from omnigibson.macros import gm
|
||||||
from omnigibson.utils.asset_utils import get_all_object_categories, get_og_avg_category_specs
|
from omnigibson.utils.asset_utils import get_all_object_categories, get_all_system_categories
|
||||||
|
|
||||||
MAX_INSTANCE_COUNT = th.iinfo(th.int32).max
|
MAX_INSTANCE_COUNT = th.iinfo(th.int32).max
|
||||||
MAX_CLASS_COUNT = th.iinfo(th.int32).max
|
MAX_CLASS_COUNT = th.iinfo(th.int32).max
|
||||||
|
@ -170,7 +170,7 @@ UNDER_OBJECTS = [
|
||||||
|
|
||||||
|
|
||||||
@cache
|
@cache
|
||||||
def semantic_class_name_to_id(scene):
|
def semantic_class_name_to_id():
|
||||||
"""
|
"""
|
||||||
Get mapping from semantic class name to class id
|
Get mapping from semantic class name to class id
|
||||||
|
|
||||||
|
@ -178,8 +178,8 @@ def semantic_class_name_to_id(scene):
|
||||||
dict: class name to class id
|
dict: class name to class id
|
||||||
"""
|
"""
|
||||||
categories = get_all_object_categories()
|
categories = get_all_object_categories()
|
||||||
|
systems = get_all_system_categories()
|
||||||
|
|
||||||
systems = sorted(scene.system_registry.object_names)
|
|
||||||
all_semantics = sorted(set(categories + systems + ["background", "unlabelled", "object", "light", "agent"]))
|
all_semantics = sorted(set(categories + systems + ["background", "unlabelled", "object", "light", "agent"]))
|
||||||
|
|
||||||
# Assign a unique class id to each class name with hashing, the upper limit here is the max of int32
|
# Assign a unique class id to each class name with hashing, the upper limit here is the max of int32
|
||||||
|
@ -189,11 +189,11 @@ def semantic_class_name_to_id(scene):
|
||||||
|
|
||||||
|
|
||||||
@cache
|
@cache
|
||||||
def semantic_class_id_to_name(scene):
|
def semantic_class_id_to_name():
|
||||||
"""
|
"""
|
||||||
Get mapping from semantic class id to class name
|
Get mapping from semantic class id to class name
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict: class id to class name
|
dict: class id to class name
|
||||||
"""
|
"""
|
||||||
return {v: k for k, v in semantic_class_name_to_id(scene).items()}
|
return {v: k for k, v in semantic_class_name_to_id().items()}
|
||||||
|
|
|
@ -10,6 +10,7 @@ import omni
|
||||||
import omni.graph.core as ogc
|
import omni.graph.core as ogc
|
||||||
import omni.timeline
|
import omni.timeline
|
||||||
import omni.usd as ou
|
import omni.usd as ou
|
||||||
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import torch as th
|
import torch as th
|
||||||
import warp as wp
|
import warp as wp
|
||||||
|
@ -681,10 +682,10 @@ def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb, num_channels=3):
|
||||||
for bbox_2d in bboxes_2d_data:
|
for bbox_2d in bboxes_2d_data:
|
||||||
semantic_id_list.append(bbox_2d["semanticId"])
|
semantic_id_list.append(bbox_2d["semanticId"])
|
||||||
bbox_2d_list.append(bbox_2d)
|
bbox_2d_list.append(bbox_2d)
|
||||||
semantic_id_list_np = th.unique(th.tensor(semantic_id_list))
|
semantic_id_list_np = np.unique(np.array(semantic_id_list))
|
||||||
color_list = random_colours(len(semantic_id_list_np.tolist()), True, num_channels)
|
color_list = random_colours(len(semantic_id_list_np.tolist()), True, num_channels)
|
||||||
for bbox_2d in bbox_2d_list:
|
for bbox_2d in bbox_2d_list:
|
||||||
index = th.where(semantic_id_list_np == bbox_2d["semanticId"])[0][0]
|
index = np.where(semantic_id_list_np == bbox_2d["semanticId"])[0][0]
|
||||||
bbox_color = color_list[index]
|
bbox_color = color_list[index]
|
||||||
outline = (bbox_color[0], bbox_color[1], bbox_color[2])
|
outline = (bbox_color[0], bbox_color[1], bbox_color[2])
|
||||||
if num_channels == 4:
|
if num_channels == 4:
|
||||||
|
@ -697,5 +698,5 @@ def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb, num_channels=3):
|
||||||
rgb_img_draw.rectangle(
|
rgb_img_draw.rectangle(
|
||||||
[(bbox_2d["x_min"], bbox_2d["y_min"]), (bbox_2d["x_max"], bbox_2d["y_max"])], outline=outline, width=2
|
[(bbox_2d["x_min"], bbox_2d["y_min"]), (bbox_2d["x_max"], bbox_2d["y_max"])], outline=outline, width=2
|
||||||
)
|
)
|
||||||
bboxes_2d_rgb = th.tensor(rgb_img)
|
bboxes_2d_rgb = np.array(rgb_img)
|
||||||
return bboxes_2d_rgb
|
return bboxes_2d_rgb
|
||||||
|
|
|
@ -153,7 +153,7 @@ class Remapper:
|
||||||
f"We do not have semantic information about bounding box semantic id {semantic_id} yet. Marking as unlabelled."
|
f"We do not have semantic information about bounding box semantic id {semantic_id} yet. Marking as unlabelled."
|
||||||
)
|
)
|
||||||
self.warning_printed.add(semantic_id)
|
self.warning_printed.add(semantic_id)
|
||||||
return semantic_class_name_to_id(scene)["unlabelled"]
|
return semantic_class_name_to_id()["unlabelled"]
|
||||||
return self.key_array[semantic_id]
|
return self.key_array[semantic_id]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,10 @@ EXAMPLES_TO_SKIP = [
|
||||||
"learning.navigation_policy_demo",
|
"learning.navigation_policy_demo",
|
||||||
"teleoperation.robot_teleoperate_demo",
|
"teleoperation.robot_teleoperate_demo",
|
||||||
# TODO: Temporarily skip the following examples
|
# TODO: Temporarily skip the following examples
|
||||||
"robots.all_robots_visualizer",
|
"robots.all_robots_visualizer", # waiting for base link bug to be fixed
|
||||||
|
"object_states.attachment_demo", # seg fualt??
|
||||||
|
"environments.behavior_env_demo", # This only works with pre-sampled cached BEHAVIOR activity scene
|
||||||
|
"robots.advanced.ik_example", # waiting for base link bug to be fixed; Fetch is fix base in this example
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue