Data generation code

This commit is contained in:
hzyjerry 2017-09-23 01:17:00 -07:00
parent 223080dadb
commit a82c8e20b5
27 changed files with 28626 additions and 1 deletions

1
.gitignore vendored
View File

@ -3,7 +3,6 @@
*.so
*.egg-info/
.DS_Store
data/
*.pth
src/
Dockerfile

3
data/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
node_modules/
11HB6XZSh1Q/
__pycache__/

8
data/activate_env.py Normal file
View File

@ -0,0 +1,8 @@
import os
import subprocess
python_path = subprocess.check_output(["which", "python"]).decode("utf-8")
virenv_path = python_path[:python_path.index("/bin")]
add_on_path = os.path.join(virenv_path, "python3.5", "site-packages")

182
data/create_images_utils.py Normal file
View File

@ -0,0 +1,182 @@
"""
Name: create_images_utils.py
Author: Sasha Sax, CVGL
Desc: Contains utilities which can be used to run
Usage:
blender -b -noaudio -enable-autoexec --python create_normal_images.py --
"""
# Import these two first so that we can import other packages
import os
import sys
sys.path.append( os.path.dirname( os.path.realpath(__file__) ) )
from activate_env import add_on_path
sys.path.append(add_on_path)
from load_settings import settings
# Import remaining packages
import bpy
import bpy_extras.mesh_utils
import glob
import io_utils
import json
import logging
import math
from mathutils import Vector, Euler
import numpy as np
import random
import shutil # Temporary dir
import time
import tempfile # Temporary dir
import utils
import uuid as uu
from utils import Profiler
def start_logging():
''' '''
# global logger
logger = io_utils.create_logger( __name__ )
utils.set_random_seed()
basepath = os.getcwd()
return logger, basepath
def setup_rendering( setup_scene_fn, setup_nodetree_fn, logger, save_dir, apply_texture=None ):
''' Sets up everything required to render a scene
Args:
Returns:
render_save_path: A path where rendered images will be saved (single file)
'''
scene=bpy.context.scene
if apply_texture:
apply_texture( scene=bpy.context.scene )
setup_scene_fn( scene )
render_save_path = setup_nodetree_fn( scene, save_dir )
return render_save_path
def setup_and_render_image( task_name, basepath, view_number, view_dict, camera_poses, execute_render_fn, logger=None, clean_up=True ):
''' Mutates the given camera and uses it to render the image called for in
'view_dict'
Args:
task_name: task name + subdirectory to save images
basepath: model directory
view_number: The index of the current view
view_dict: A 'view_dict' for a point/view
camera_poses: A dict of camera_uuid -> camera_pose
execute_render_fn: A function which renders the desired image
logger: A logger to write information out to
clean_up: Whether to delete cameras after use
Returns:
None (Renders image)
'''
scene = bpy.context.scene
camera_uuid = view_dict[ "camera_uuid" ]
point_uuid = view_dict[ "point_uuid" ]
camera, camera_data, scene = utils.create_camera(
location=camera_poses[ camera_uuid ][ "position" ],
rotation=view_dict[ "camera_original_rotation" ],
field_of_view=view_dict[ "field_of_view_rads" ],
scene=scene,
camera_name='RENDER_CAMERA' )
if settings.CREATE_PANOS:
utils.make_camera_data_pano( camera_data )
save_path = io_utils.get_file_name_for(
dir=get_save_dir( basepath, task_name ),
point_uuid=camera_uuid,
view_number=settings.PANO_VIEW_NAME,
camera_uuid=camera_uuid,
task=task_name,
ext=io_utils.img_format_to_ext[ settings.PREFERRED_IMG_EXT.lower() ] )
#camera.rotation_euler = Euler( view_dict["camera_original_rotation"], settings.EULER_ROTATION_ORDER )
camera.rotation_euler = Euler( view_dict["camera_original_rotation"] )
camera.rotation_euler.rotate( Euler( view_dict[ "rotation_from_original_to_point" ] ) )
execute_render_fn( scene, save_path )
else:
if settings.CREATE_NONFIXATED:
save_path = io_utils.get_file_name_for(
dir=get_save_dir( basepath, task_name ),
point_uuid=point_uuid,
view_number=view_number,
camera_uuid=camera_uuid,
task=task_name + "_nonfixated",
ext=io_utils.img_format_to_ext[ settings.PREFERRED_IMG_EXT.lower() ] )
camera.rotation_euler = Euler( view_dict["camera_original_rotation"], settings.EULER_ROTATION_ORDER )
camera.rotation_euler.rotate( Euler( view_dict[ "rotation_from_original_to_nonfixated" ] , settings.EULER_ROTATION_ORDER ) )
execute_render_fn( scene, save_path )
if settings.CREATE_FIXATED:
save_path = io_utils.get_file_name_for(
dir=get_save_dir( basepath, task_name ),
point_uuid=point_uuid,
view_number=view_number,
camera_uuid=camera_uuid,
task=task_name + "_fixated",
ext=io_utils.img_format_to_ext[ settings.PREFERRED_IMG_EXT.lower() ] )
# Aim camera at target by rotating a known amount
camera.rotation_euler = Euler( view_dict["camera_original_rotation"] )
camera.rotation_euler.rotate( Euler( view_dict[ "rotation_from_original_to_point" ] ) )
execute_render_fn( scene, save_path )
if clean_up:
utils.delete_objects_starting_with( "RENDER_CAMERA" ) # Clean up
def get_save_dir( basepath, task_name ):
if settings.CREATE_PANOS:
return os.path.join( basepath, 'pano', task_name )
else:
return os.path.join( basepath, task_name )
def get_number_imgs( point_infos ):
if settings.CREATE_PANOS:
return len( point_infos )
else:
n_imgs = 0
if settings.CREATE_FIXATED:
n_imgs += sum( [len( pi ) for pi in point_infos] )
if settings.CREATE_NONFIXATED:
n_imgs += sum( [len( pi ) for pi in point_infos] )
return n_imgs
def run( setup_scene_fn, setup_nodetree_fn, model_dir, task_name, apply_texture_fn=None ):
''' Runs image generation given some render helper functions
Args:
stop_at: A 2-Tuple of (pt_idx, view_idx). If specified, running will cease (not cleaned up) at the given point/view'''
utils.set_random_seed()
logger = io_utils.create_logger( __name__ )
with Profiler( "Setup", logger ) as prf:
save_dir = os.path.join( model_dir, 'pano', task_name )
model_info = io_utils.load_model_and_points( model_dir, typ='LEGO' )
scene = bpy.context.scene
if apply_texture_fn:
apply_texture_fn( scene=bpy.context.scene )
execute_render = utils.make_render_fn( setup_scene_fn, setup_nodetree_fn, logger=logger) # takes (scene, save_dir)
debug_at = ( settings.DEBUG_AT_POINT, settings.DEBUG_AT_VIEW )
n_imgs = get_number_imgs( model_info[ 'point_infos' ] )
with Profiler( 'Render', logger ) as pflr:
img_number = 0
for point_number, point_info in enumerate( model_info[ 'point_infos' ] ):
for view_number, view_dict in enumerate( point_info ):
if settings.CREATE_PANOS and view_number != 1:
continue # we only want to create 1 pano per camera
img_number += 1
if debug_at[0] is not None:
if debug_at != ( point_number, view_number ):
continue
setup_and_render_image( task_name, model_dir,
camera_poses=model_info[ 'camera_poses' ],
clean_up=debug_at == (None, None),
execute_render_fn=execute_render,
logger=logger,
view_dict=view_dict,
view_number=view_number )
pflr.step( 'finished img {}/{}'.format( img_number, n_imgs ) )
if debug_at == ( point_number, view_number ):
return
return

105
data/create_mist_images.py Normal file
View File

@ -0,0 +1,105 @@
"""
Name: create_depth_images.py
Author: Sasha Sax, CVGL
Modified by: Zhiyang He
Desc: Creates mist versions of standard RGB images by using the matterport models.
This reads in all the point#.json files and rendering the corresponding images in depth,
where depth is defined relative to the center of the camera sensor.
Usage:
blender -b -noaudio -enable-autoexec --python create_mist_images.py --
"""
import bpy
import os
import sys
sys.path.append( os.path.dirname( os.path.realpath(__file__) ) )
from activate_env import add_on_path
sys.path.append(add_on_path)
from load_settings import settings
import create_images_utils
import utils
TASK_NAME = 'mist'
def main():
apply_texture_fn = None
create_images_utils.run(
set_render_settings,
setup_nodetree_for_render,
model_dir=os.getcwd(),
task_name=TASK_NAME,
apply_texture_fn=apply_texture_fn )
def set_render_settings( scene ):
"""
Sets the render settings for speed.
Args:
scene: The scene to be rendered
"""
if settings.CREATE_PANOS:
scene.render.engine = 'CYCLES'
else:
scene.render.engine = 'BLENDER_RENDER'
utils.set_preset_render_settings( scene, presets=[ 'BASE', 'NON-COLOR' ] )
# Simplifying assummptions for depth
scene.render.layers[ "RenderLayer" ].use_pass_combined = False
scene.render.layers[ "RenderLayer" ].use_pass_z = False
scene.render.layers[ "RenderLayer" ].use_pass_mist = True
# Use mist to simulate depth
world = bpy.data.worlds["World"]
world.horizon_color = (1.,1.,1.)
world.ambient_color = (0,0,0)
world.mist_settings.use_mist = True
world.mist_settings.start = 0. # min range
world.mist_settings.depth = settings.MIST_MAX_DISTANCE_METERS # max range
world.mist_settings.intensity = 0. # minimum mist level
world.mist_settings.height = 0. # mist is prevalent at all z-values
world.mist_settings.falloff = 'LINEAR'
def setup_nodetree_for_render( scene, outdir ):
"""
Creates the scene so that a depth image will be saved.
Args:
scene: The scene that will be rendered
outdir: The directory to save raw renders to
Returns:
save_path: The path to which the image will be saved
"""
# Use node rendering for python control
scene.use_nodes = True
tree = scene.node_tree
links = tree.links
# Make sure there are no existing nodes
for node in tree.nodes:
tree.nodes.remove( node )
# Set up a renderlayer and plug it into our remapping layer
inp = tree.nodes.new('CompositorNodeRLayers')
mist_output = 16 # index 16 is the mist pass
if scene.render.engine == 'CYCLES':
image_data = inp.outputs[ mist_output ]
elif scene.render.engine == 'BLENDER_RENDER':
inv = tree.nodes.new('CompositorNodeInvert')
links.new( inp.outputs[ mist_output ], inv.inputs[ 1 ] )
image_data = inv.outputs[ 0 ]
save_path = utils.create_output_node( tree, image_data, outdir,
color_mode='BW',
file_format=settings.PREFERRED_IMG_EXT,
color_depth=settings.DEPTH_BITS_PER_CHANNEL )
return save_path
if __name__=="__main__":
with utils.Profiler( "create_mist_images.py" ):
main()

View File

@ -0,0 +1,179 @@
"""
Name: create_normals_images.py
Author: Sasha Sax, CVGL
Modified by: Zhiyang He
Desc: Creates surface normals versions of standard RGB images by using the matterport models.
This reads in all the point#.json files and rendering the corresponding images with surface normals.
Usage:
blender -b -noaudio -enable-autoexec --python create_normal_images.py --
"""
# Import these two first so that we can import other packages
import os
import sys
import bpy
# Import remaining packages
sys.path.append( os.path.dirname( os.path.realpath(__file__) ) )
from activate_env import add_on_path
sys.path.append(add_on_path)
from load_settings import settings
import create_images_utils
import utils
TASK_NAME = 'normal'
def main():
apply_texture_fn = None
if settings.CREATE_PANOS:
apply_texture_fn = apply_normals_texture
create_images_utils.run(
set_scene_render_settings,
setup_nodetree_for_render,
model_dir=os.getcwd(),
task_name=TASK_NAME,
apply_texture_fn=apply_texture_fn )
def apply_normals_texture( scene ):
if not settings.CREATE_PANOS:
raise EnvironmentError( 'Only panoramic normal images need a texture, but settings.CREATE_PANOS is True' )
render = bpy.context.scene.render
render.engine = 'CYCLES'
# Create material
mat = bpy.data.materials.new( 'normals' )
mat.use_nodes = True
tree = mat.node_tree
links = tree.links
# Make sure there are no existing nodes
for node in tree.nodes:
tree.nodes.remove( node )
nodes = tree.nodes
# Use bump map to get normsls
bump = nodes.new( 'ShaderNodeBump' )
# Map to new color
map_node = nodes.new("ShaderNodeMapping")
map_node.translation[0] = 0.5
map_node.translation[1] = 0.5
map_node.translation[2] = 0.5
map_node.scale[0] = 0.5
map_node.scale[1] = 0.5
map_node.scale[2] = -0.5
links.new( bump.outputs[ 0 ], map_node.inputs[ 0 ] )
split_node = nodes.new("ShaderNodeSeparateRGB")
links.new( map_node.outputs[ 0 ], split_node.inputs[ 0 ] )
combine_node = nodes.new("ShaderNodeCombineRGB")
links.new( split_node.outputs[ 0 ], combine_node.inputs[ 0 ] ) # R
links.new( split_node.outputs[ 1 ], combine_node.inputs[ 2 ] ) # G
links.new( split_node.outputs[ 2 ], combine_node.inputs[ 1 ] ) # B
# Make the material emit that color (so it's visible in render)
emit_node = nodes.new("ShaderNodeEmission")
links.new( combine_node.outputs[ 0 ], emit_node.inputs[ 0 ] )
# Now output that color
out_node = nodes.new("ShaderNodeOutputMaterial")
links.new( emit_node.outputs[ 0 ], out_node.inputs[ 0 ] )
mat.use_shadeless = True
# Now apply this material to the mesh
mesh = utils.get_mesh()
bpy.context.scene.objects.active = mesh
bpy.ops.object.material_slot_add()
mesh.material_slots[ 0 ].material = mat
def set_scene_render_settings( scene ):
"""
Sets the render settings for speed.
Args:
scene: The scene to be rendered
"""
utils.set_preset_render_settings( scene, presets=['BASE', 'NON-COLOR'] )
# Set passes
scene.render.layers[ "RenderLayer" ].use_pass_combined = True
scene.render.layers[ "RenderLayer" ].use_pass_z = False
scene.render.layers[ "RenderLayer" ].use_pass_normal = True
def setup_nodetree_for_render( scene, tmpdir ):
"""
Creates the scene so that a surface normals image will be saved.
Note that this method works, but not for Blender 2.69 which is
the version that exists on Napoli. Therefore, prefer the other
method 'setup_scene_for_normals_render_using_matcap'
Args:
scene: The scene that will be rendered
tmpdir: The directory to save raw renders to
Returns:
save_path: The path to which the image will be saved
"""
# Use node rendering for python control
scene.use_nodes = True
tree = scene.node_tree
links = tree.links
# Make sure there are no existing nodes
for node in tree.nodes:
tree.nodes.remove( node )
# We want to use the normals pass in blender, but Blender uses its own
# special colors for normals. So we need to map from Blender's colors
# to the standard ones.
# Set up a renderlayer and plug it into our remapping layer
inp = tree.nodes.new('CompositorNodeRLayers')
if settings.CREATE_PANOS: # Panos get the normals from texture
image_data = inp.outputs[ 0 ]
bpy.data.worlds["World"].horizon_color = (0.5, 0.5, 0.5)
else: # Other images get the normals from the scene
# Remap Blender colors to std
grey = ( 0.5, 0.5, 0.5, 1 ) #BCBCBC
mix1 = tree.nodes.new('CompositorNodeMixRGB')
mix1.blend_type = 'MULTIPLY'
mix1.inputs[ 2 ].default_value = grey
links.new( inp.outputs[ 3 ], mix1.inputs[ 1 ] ) # inp.outputs[ 3 ] is the normals socket
mix2 = tree.nodes.new('CompositorNodeMixRGB')
mix2.blend_type = 'ADD'
mix2.inputs[ 2 ].default_value = grey
links.new( mix1.outputs[ 0 ], mix2.inputs[ 1 ] )
split = tree.nodes.new('CompositorNodeSepRGBA')
links.new( mix2.outputs[ 0 ], split.inputs[ 0 ] )
inv = tree.nodes.new('CompositorNodeInvert')
links.new( split.outputs[ 0 ], inv.inputs[ 1 ] )
combine = tree.nodes.new('CompositorNodeCombRGBA')
links.new( inv.outputs[ 0 ], combine.inputs[ 0 ] ) # R
links.new( split.outputs[ 1 ], combine.inputs[ 1 ] ) # G
links.new( split.outputs[ 2 ], combine.inputs[ 2 ] ) # B
links.new( split.outputs[ 3 ], combine.inputs[ 3 ] ) # A
image_data = combine.outputs[ 0 ]
# Now save out the normals image and return the path
save_path = utils.create_output_node( tree, image_data, tmpdir,
color_mode='RGB',
file_format=settings.PREFERRED_IMG_EXT )
return save_path
if __name__=="__main__":
with utils.Profiler( "create_normal_images.py" ):
main()

421
data/create_rgb_images.py Normal file
View File

@ -0,0 +1,421 @@
"""
Name: create_rgb_images.py
Author: Sasha Sax, CVGL
Modified by: Zhiyang He
Desc: Creates RGB images of points in the points/ directory. Currently creates
fixated, nonfixated.
Usage:
blender -b -noaudio --enable-autoexec --python create_rgb_images.py --
"""
import os
import sys
sys.path.append( os.path.dirname( os.path.realpath(__file__) ) )
from activate_env import add_on_path
sys.path.append(add_on_path)
from load_settings import settings
from create_images_utils import get_number_imgs
import io_utils
import utils
from utils import Profiler
import bmesh
import bpy
from collections import defaultdict
import glob
import json
import math
from mathutils import Vector, Euler
from mathutils.geometry import intersect_ray_tri
from PIL import Image
import re
import shutil
ORIGIN = (0.0, 0.0, 0.0)
DEFAULT_ROTATION_FOR_INITIAL_SKYBOX = Euler( (0,0,0), settings.EULER_ROTATION_ORDER) # ( math.pi / 2, 0, math.pi / 2 ) for 'XYZ' ordering
DEFAULT_ROTATION_FOR_INITIAL_SKYBOX.rotate_axis( 'Z', math.pi / 2 )
DEFAULT_ROTATION_FOR_INITIAL_SKYBOX.rotate_axis( 'X', math.pi / 2 )
TASK = "rgb"
CUBE_SIZE = 1.0 # Sasha: I think this shouldn't matter
basepath = os.getcwd()
utils.set_random_seed()
def main():
global logger
logger = io_utils.create_logger( __name__ )
utils.delete_all_objects_in_context()
# Create the cube
create_cube( radius=CUBE_SIZE, location=ORIGIN )
obj = bpy.context.object
mesh = obj.data
scene = bpy.context.scene
camera_poses = io_utils.collect_camera_poses_from_csvfile( io_utils.get_camera_pose_file( basepath ) )
# Create the camera and target
empty = utils.create_empty( 'Empty', Vector( ORIGIN ) )
# Load points
point_infos = io_utils.load_saved_points_of_interest( basepath )
debug_at = ( settings.DEBUG_AT_POINT, settings.DEBUG_AT_VIEW )
# Choose the render engine based on whether we need panos
if settings.CREATE_PANOS:
scene.render.engine = 'CYCLES'
else:
scene.render.engine='BLENDER_RENDER'
n_images = get_number_imgs( point_infos )
image_number = 1
with Profiler( "Render", logger=logger ) as pflr:
for point_number, point_info in enumerate( point_infos ):
for view_num, view_of_point in enumerate( point_info ):
if debug_at[0] is not None:
if debug_at != ( point_number, view_num ):
continue
camera, camera_data, _ = utils.create_camera( ORIGIN,
rotation=DEFAULT_ROTATION_FOR_INITIAL_SKYBOX,
field_of_view=view_of_point[ "field_of_view_rads" ] )
initial_camera_rotation_in_real_world = Euler( view_of_point[ 'camera_original_rotation' ],
settings.EULER_ROTATION_ORDER )
rgb_cube_model_offset = utils.get_euler_rotation_between(
initial_camera_rotation_in_real_world,
DEFAULT_ROTATION_FOR_INITIAL_SKYBOX )
wrap_material_around_cube( view_of_point[ "camera_uuid" ], mesh, os.path.join("img", "high"), ".jpg" )
if settings.CREATE_PANOS:
utils.make_camera_data_pano( camera_data )
save_path = io_utils.get_file_name_for(
dir=os.path.join( basepath, 'pano', TASK ),
point_uuid=view_of_point[ "camera_uuid" ],
view_number=settings.PANO_VIEW_NAME,
camera_uuid=view_of_point[ "camera_uuid" ],
task=TASK,
ext=io_utils.img_format_to_ext[ settings.PREFERRED_IMG_EXT.lower() ] )
set_render_settings( scene, save_path )
quiet_render( image_number, n_images, pflr )
image_number += 1
break # Only want one pano/sweep
if settings.CREATE_NONFIXATED:
# Aim camera at target by rotating a known amount
camera.rotation_euler = initial_camera_rotation_in_real_world
camera.rotation_euler.rotate( Euler( view_of_point[ 'rotation_from_original_to_nonfixated' ],
settings.EULER_ROTATION_ORDER ) )
camera.rotation_euler.rotate( rgb_cube_model_offset )
# Create non-fixated image and optionally add X on pixel
rgb_non_fixated_path = io_utils.get_file_name_for(
dir=os.path.join( basepath, TASK ),
point_uuid=view_of_point[ "point_uuid" ],
view_number=view_num,
camera_uuid=view_of_point[ "camera_uuid" ],
task=TASK + "_nonfixated",
ext=io_utils.img_format_to_ext[ settings.PREFERRED_IMG_EXT.lower() ] )
set_render_settings( scene, rgb_non_fixated_path )
quiet_render( image_number, n_images, pflr )
image_number += 1
if settings.CREATE_FIXATED: # Render fixated image
# Point camera at correct location
# Aim camera at target by rotating a known amount
camera.rotation_euler = initial_camera_rotation_in_real_world
camera.rotation_euler.rotate( Euler( view_of_point[ 'rotation_from_original_to_point' ], settings.EULER_ROTATION_ORDER ) )
camera.rotation_euler.rotate( rgb_cube_model_offset )
# Render the image
rgb_render_path = io_utils.get_file_name_for(
dir=os.path.join( basepath, TASK ),
point_uuid=view_of_point[ "point_uuid" ],
view_number=view_num,
camera_uuid=view_of_point[ "camera_uuid" ],
task=TASK + "_fixated",
ext=io_utils.img_format_to_ext[ settings.PREFERRED_IMG_EXT.lower() ] )
set_render_settings( scene, rgb_render_path )
quiet_render( image_number, n_images, pflr )
image_number += 1
if debug_at == ( point_number, view_num ):
return
utils.delete_objects_starting_with( "Camera" ) # Clean up
# ------------------------------------------
# Copied from original
# ------------------------------------------
def _legacy_point_camera_at_target():
""" Do not use this method for pointing a camera at the target. It will point the
camera in the right direction, but Blender forces an axis to point in the global
'up' direction which may not align with the camera's initial up direction. If
the two do not align, then the aimed camera will be rotated from the correct
camera extrinsics. This will cause reconstruction error down the line.
The legacy code is here for reference ONLY.
"""
# else: # Using projection method, which works but takes a dependency on the pixel coords
# cube_face_idx = utils.skybox_img_idx_to_cube_face_idx[ view_of_point[ "skybox_img" ] ]
# vertices = [ mesh.vertices[idx].co for idx in mesh.polygons[ cube_face_idx ].vertices ]
# locked_axis = verts_from_three_to_two( vertices )
# pixel_range = ( 0, settings.RESOLUTION )
# pixel_coords = io_utils.get_pixel_in_skybox_for_point_from_view_dict( view_of_point )
# vertex = translate_pixel_to_cube_verts( cube_face_idx, pixel_range, pixel_coords, locked_axis )
# empty.location = vertex
# utils.point_camera_at_target( camera, empty, align_with_global_up_axis=True )
pass
def verts_from_three_to_two(vertices):
""" By Farhan """
first_vert = vertices[0]
if all(v[0] == first_vert[0] for v in vertices):
return (0, first_vert[0])
elif all(v[1] == first_vert[1] for v in vertices):
return (1, first_vert[1])
else:
return (2, first_vert[2])
def translate_pixel_to_cube_verts(i, pixel_range, pixel_coords, locked_axis):
""" By Farhan + Sasha """
axis, value = locked_axis
if i == 0 or i == 1:
x_or_y = pixel_to_cube(pixel_range, (-CUBE_SIZE, CUBE_SIZE), pixel_coords[0])
y_or_z = pixel_to_cube(pixel_range, (CUBE_SIZE, -CUBE_SIZE), pixel_coords[1])
elif i == 2 or i == 3 or i == 4:
x_or_y = pixel_to_cube(pixel_range, (CUBE_SIZE, -CUBE_SIZE), pixel_coords[0])
y_or_z = pixel_to_cube(pixel_range, (CUBE_SIZE, -CUBE_SIZE), pixel_coords[1])
else:
x_or_y = pixel_to_cube(pixel_range, (-CUBE_SIZE, CUBE_SIZE), pixel_coords[0])
y_or_z = pixel_to_cube(pixel_range, (CUBE_SIZE, -CUBE_SIZE), pixel_coords[1])
if axis == 0:
return (value, x_or_y, y_or_z)
elif axis == 1:
return (x_or_y, value, y_or_z)
else:
return (x_or_y, y_or_z, value)
def pixel_to_cube(old_range, new_range, pixel_coord):
""" By Farhan """
from_min, from_max = old_range
to_min, to_max = new_range
x = pixel_coord if to_max > to_min else from_max-pixel_coord
new_range_len = abs(to_max - to_min)
divisor = from_max / new_range_len
return (x / divisor) - to_max if to_max > to_min else (x / divisor) - to_min
# ---------------------------------------------------------
def adjust_texture_mapping_for_face( tex_mapping, cube_face_idx ):
if cube_face_idx == 0: # Front
tex_mapping.mapping_x = 'Y'
tex_mapping.mapping_y = 'X'
elif cube_face_idx == 1: # Right
tex_mapping.mapping_x = 'Y'
tex_mapping.mapping_y = 'X'
elif cube_face_idx == 2: # Back
tex_mapping.mapping_x = 'Y'
tex_mapping.mapping_y = 'X'
elif cube_face_idx == 3: # Left
tex_mapping.mapping_x = 'Y'
tex_mapping.mapping_y = 'X'
elif cube_face_idx == 4: # Bottom
if bpy.context.scene.render.engine == 'BLENDER_RENDER':
tex_mapping.scale[1] = -1
elif cube_face_idx == 5: # Top
if bpy.context.scene.render.engine == 'BLENDER_RENDER':
tex_mapping.scale[0] = -1
else:
pass
def create_cube( radius, location=ORIGIN ):
"""
Creates a cube at the origin
"""
bpy.ops.mesh.primitive_cube_add(radius=radius, location=location, enter_editmode=True)
bpy.ops.uv.unwrap() # cube_project(cube_size=1.0/radius)
bpy.ops.object.mode_set(mode='OBJECT')
def create_texture_from_img(filepath):
"""
Creates a texture of the given image.
Args:
filepath: A string that contains the path to the Image
Returns:
texture: A texture that contains the given Image
"""
texture = bpy.data.textures.new("ImageTexture", type='IMAGE')
img = bpy.data.images.load(filepath)
texture.image = img
# To bleed the img over the seams
texture.extension = 'EXTEND'
# For sharp edges
texture.use_mipmap = False
# texture.use_interpolation = False
# texture.filter_type = 'BOX'
texture.filter_size = 0.80
return texture
def flip_over_axis(texture, axis):
"""
Creates a new texture that is the old one flipped over the given axis. Saves a copy called with the extensions '.flipped.jpg'
Args:
texture: A Blender texture
axis: One of [ Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM ]
Returns:
texture: the same texture with the pattern flipped over the given axis
"""
img_path = texture.image.filepath
print( img_path )
new_img = Image.open(img_path)
new_img = new_img.transpose(axis)
new_img.save(img_path + ".flipped.jpg")
new_img_flipped = bpy.data.images.load(img_path + ".flipped.jpg")
texture.image = new_img_flipped
return texture
def get_or_create_image_material( uuid, img_dir, ext, cube_face_idx ):
img_idx = utils.cube_face_idx_to_skybox_img_idx[ cube_face_idx ]
material_name = "Mat_{0}_{1}".format( uuid, img_idx )
img_path = os.path.join(img_dir, uuid + "_skybox" + str(img_idx) + ext)
if material_name in bpy.data.materials:
return bpy.data.materials[ material_name ]
else:
if bpy.context.scene.render.engine == 'BLENDER_RENDER':
texture = create_texture_from_img( img_path )
# To appear in a render, the image must be a texture which is on a material which applied to a face
material = utils.create_material_with_texture( texture, name=material_name )
# adjust_material_for_face( material, cube_face_idx )
adjust_texture_mapping_for_face( material.texture_slots[0], cube_face_idx )
elif bpy.context.scene.render.engine == 'CYCLES':
# Create material
material = bpy.data.materials.new( material_name )
material.use_nodes = True
tree = material.node_tree
links = tree.links
# Make sure there are no existing nodes
for node in tree.nodes:
tree.nodes.remove( node )
nodes = tree.nodes
inp = nodes.new( type="ShaderNodeTexCoord" )
# Set img as texture
tex = nodes.new( type="ShaderNodeTexImage" )
tex.image = bpy.data.images.load( img_path )
tex.extension = 'EXTEND'
tex.interpolation = 'Closest'
# Adjust the faces (using increasingly convoluted methods)
if cube_face_idx == 4:
links.new( inp.outputs[ 0 ], tex.inputs[ 0 ] )
elif cube_face_idx == 5:
obj = bpy.context.object
mesh = obj.data
for face_idx, face in enumerate( mesh.polygons ):
if face_idx != cube_face_idx: continue
for vert_idx, loop_idx in zip(face.vertices, face.loop_indices):
uv_coords = mesh.uv_layers.active.data[loop_idx].uv
if vert_idx==1:
uv_coords.x = 0
uv_coords.y = 1
elif vert_idx==3:
uv_coords.x = 0
uv_coords.y = 0
elif vert_idx==5:
uv_coords.x = 1
uv_coords.y = 1
elif vert_idx==7:
uv_coords.x = 1
uv_coords.y = 0
else:
adjust_texture_mapping_for_face( tex.texture_mapping, cube_face_idx )
# Make the material emit the image (so it's visible in render)
emit_node = nodes.new("ShaderNodeEmission")
links.new( tex.outputs[ 0 ], emit_node.inputs[ 0 ] )
# Now output that img
out_node = nodes.new("ShaderNodeOutputMaterial")
links.new( emit_node.outputs[ 0 ], out_node.inputs[ 0 ] )
return material
def set_render_settings( scene, rgb_render_path ):
"""
Sets the render settings for speed.
Args:
scene: The scene to be rendered
"""
utils.set_preset_render_settings( scene, presets=['BASE'] )
# Quality settings
scene.render.resolution_percentage = 100
scene.render.tile_x = settings.TILE_SIZE
scene.render.tile_y = settings.TILE_SIZE
scene.render.filepath = rgb_render_path
scene.render.image_settings.color_mode = 'RGB'
scene.render.image_settings.color_depth = settings.COLOR_BITS_PER_CHANNEL
scene.render.image_settings.file_format = settings.PREFERRED_IMG_EXT.upper()
def wrap_material_around_cube(uuid, mesh, img_dir, ext):
"""
This will create images on the inside of the cube that correspond to the skybox images in the model.
Args:
uuid: A string that contains the uuid of the skybox
mesh: The Blender mesh for the cube.
img_dir: A string that contains the dir where {uuid}_skybox{i}.{ext} can be found
ext: The ext for skybox images. In our case, .jpg
"""
cube = bpy.data.objects["Cube"]
# We need to make the cube the active object so that we can add materials
bpy.context.scene.objects.active = cube
while len(cube.material_slots) < 6:
bpy.ops.object.material_slot_add()
# bpy.ops.object.mode_set(mode='OBJECT')
for cube_face_idx, f in enumerate(mesh.polygons):
material = get_or_create_image_material( uuid, img_dir, ext, cube_face_idx )
cube.material_slots[cube_face_idx].material = material
f.material_index = cube_face_idx
def quiet_render( img_number, n_images, pflr ):
# redirect output to log file
logfile = 'blender_render.log'
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
# do the rendering
bpy.ops.render.render(write_still=True)
pflr.step( 'finished img {}/{}'.format( img_number, n_images ) )
# disable output redirection
os.close(1)
os.dup(old)
os.close(old)
os.remove(logfile)
if __name__ == "__main__":
with Profiler( "create_rgb_images.py" ):
main()

62
data/datatasks.py Normal file
View File

@ -0,0 +1,62 @@
from subprocess import call
import os
class DataTasks:
code_dir = ""
blender_py = "blender -b -noaudio --enable-autoexec --python"
def __init__(self, code_dir, model_root, model_id):
self.code_dir = os.path.abspath(code_dir)
self.model_root = os.path.abspath(model_root)
self.model_path = os.path.join(model_root, model_id)
self.model_id = model_id
def generate_points(self, num_needed, min_view, max_view):
os.chdir(self.model_path)
code_point = os.path.abspath(os.path.join(self.code_dir, "generate_points.py"))
command_str = "%s %s --" % (self.blender_py, code_point)
command_list = command_str.split()
call(command_list + ["--NUM_POINTS_NEEDED", str(num_needed), "--MIN_VIEWS", str(min_view), "--MAX_VIEWS", str(max_view)])
os.chdir(self.code_dir)
print("Finished: %s point generation" % self.model_id)
return True
def create_obj_file(self):
os.chdir(self.model_path)
code_obj = os.path.abspath(os.path.join(self.code_dir, 'decode', 'decode.js'))
command_str = "node %s --rootdir=%s --model=%s" % (code_obj, self.model_root, self.model_id)
command_list = command_str.split()
call(command_list)
os.chdir(self.code_dir)
print("Finished: %s object creation" % self.model_id)
return True
def create_rgb_images(self):
os.chdir(self.model_path)
code_rgb = os.path.abspath(os.path.join(self.code_dir, "create_rgb_images.py"))
command_str = "%s %s --" % (self.blender_py, code_rgb)
command_list = command_str.split()
call(command_list)
os.chdir(self.code_dir)
print("Finished: %s create rgb images" % self.model_id)
return True
def create_mist_images(self):
os.chdir(self.model_path)
code_mist = os.path.abspath(os.path.join(self.code_dir, "create_mist_images.py"))
command_str = "%s %s --" % (self.blender_py, code_mist)
command_list = command_str.split()
call(command_list)
os.chdir(self.code_dir)
print("Finished: %s create mist images" % self.model_id)
return True
def create_normal_images(self):
os.chdir(self.model_path)
code_normal = os.path.abspath(os.path.join(self.code_dir, "create_normal_images.py"))
command_str = "%s %s --" % (self.blender_py, code_normal)
command_list = command_str.split()
call(command_list)
os.chdir(self.code_dir)
print("Finished: %s create normal images" % self.model_id)
return True

168
data/decode/DamLoader.js Normal file
View File

@ -0,0 +1,168 @@
/**
* Customized DamLoader
* Input: .dam input file path, .obj output path
* Output: boolean, whether loading succeed. If succeed,
* .obj file will be stored at given path
*/
var THREE = require("./Three");
var PROTOBUF = require("./Protobuf.js");
var fs = require("fs");
require('./helpers.js')
var LOW = 'LOW'
var MID = 'MID'
var HIGH = 'HIGH'
function DamLoader(inputDBPath, outputDBPath, quality=HIGH) {
this.inputDB = inputDBPath,
this.outputDB = outputDBPath,
this.quality = quality,
this.builder = PROTOBUF.loadProto(protoToken),
this.decoder = this.builder.build("binary_mesh"),
this.mtlName = "complete.mtl"
}
DamLoader.prototype = {
constructor: DamLoader,
load: function(id, mtlName, meshUrl) {
self = this;
var damPath = self.inputDB + '/' + id + '/dam';
fs.readFile(self.selectDam(damPath), function read(err, data) {
if (err) {
throw err;
}
// Invoke the next step here however you like
// console.log(content); // Put all of the code here (not the best solution)
self.parse(data, id, meshUrl)
});
if (mtlName) {
this.mtlName = mtlName;
}
},
selectDam: function(dir) {
var selected;
var allFiles = [];
var files = fs.readdirSync(dir)
files.forEach(file => {
allFiles.push(file)
});
if (this.quality == LOW) {
selected = allFiles.filter(function(f) {
return f.endsWith('_10k.dam')
})
} else if (this.quality == MID) {
selected = allFiles.filter(function(f) {
return f.endsWith('_50k.dam')
})
} else {
// High quality
selected = allFiles.filter(function(f) {
return (!f.endsWith('v2.dam')) && (!f.endsWith('_10k.dam'))
&& (!f.endsWith('_50k.dam')) && (f !== 'tesselate.dam') && f.endsWith('.dam')
})
}
return dir + '/' + selected
},
parse: function(e, id, t) {
var o = this.readProtobuf(e);
try {
// c.time("convert to webgl"),
this.convertProtobufToSceneObject(o, id, t)
// c.timeEnd("convert to webgl")
} catch (e) {
console.log("failed parsing .dam");
throw new Error();
// return c.error("failed parsing .dam"),
// c.error(e.message),
}
},
readProtobuf: function(e) {
var t;
try {
// c.time("parse proto"),
t = this.decoder.decode(e)
// c.timeEnd("parse proto")
} catch (e) {
console.log("failed parsing proto for .dam");
throw new Error();
// return c.error("failed parsing proto for .dam"),
// c.error(e.message),
// null
}
return t
},
convertProtobufToSceneObject: function(e, id, t) {
self = this;
function a(e) {
var i = new THREE.BufferGeometry;
return i.addAttribute("position", new THREE.BufferAttribute(new Float32Array(e.vertices.xyz,0,3),3)),
e.vertices.uv.length > 0 && i.addAttribute("uv", new THREE.BufferAttribute(new Float32Array(e.vertices.uv,0,2),2)),
i.setIndex(new THREE.BufferAttribute(new Uint32Array(e.faces.faces,0,1),1)),
i.applyMatrix(s),
i.computeBoundingBox(),
new CHUNK({
geometry: i,
textureName: e.material_name,
name: e.chunk_name,
meshUrl: t
})
}
if (0 === e.chunk.length) {
console.log("No chunks in damfile...");
return ;
}
var s = new THREE.Matrix4;
s.set(1, 0, 0, 0, 0, 0, 1, 0, 0, -1, 0, 0, 0, 0, 0, 1);
var l = e.chunk.map(a);
// 3D Model Scraping & Saving
var exporter = new THREE.OBJExporter();
if (document.mergeModels) {
for (var index = 0; index < l.length; index++) {
l[0].geometry.merge(l[index].geometry);
}
self.saveFile(exporter.parse(l[0]), id,
"out_res.obj");
console.log("Success (merged): " + id);
} else {
// for (var index = 0; index < l.length; index++) {
// self.saveFile(exporter.parse(l[index]), id,
// id + "_" + self.quality + "_" + index + ".obj");
//}
this.mtlName = id + '.mtl'; // need to be commented out for Gates 2nd floor
self.saveFile(exporter.parse(l, this.mtlName), id,
"out_res.obj");
self.saveFile(exporter.generateMtl(l), id, this.mtlName);
};
if (l) {
return ;
} else {
console.log(".dam protobuf came out with no chunks...")
throw new Error()
}
},
saveFile: function(content, id, name) {
if (self.outputDB !== "") {
if (!fs.existsSync(self.outputDB + '/' + id)){
fs.mkdirSync(self.outputDB + '/' + id);
}
var filePath = self.outputDB + '/' + id + '/modeldata/' + name
fs.writeFileSync(filePath, content)
}
}
}
module.exports = DamLoader

1
data/decode/Protobuf.js Executable file
View File

@ -0,0 +1 @@
module.exports = require("./protobuf/protobuf.js");

424
data/decode/Three.js Normal file
View File

@ -0,0 +1,424 @@
/**
* Three JS
* Add in customized THREE.OBJExporter
*/
var THREE = require("./three_src.js");
THREE.OBJExporter = function () {};
THREE.OBJExporter.prototype = {
constructor: THREE.OBJExporter,
parse: function ( objectArray, mtlName ) {
var outputMTLFile = mtlName? 'mtllib ' + mtlName: 'mtllib tesselate.mtl';
var outputVertex = '';
var outputVertexUvs = '';
var outputVertexN = '';
var outputFace = '';
var output = '';
var indexVertex = 0;
var indexVertexUvs = 0;
var indexNormals = 0;
var vertex = new THREE.Vector3();
var normal = new THREE.Vector3();
var uv = new THREE.Vector2();
var i, j, l, m, face = [];
var parseMesh = function ( mesh ) {
var nbVertex = 0;
var nbNormals = 0;
var nbVertexUvs = 0;
var geometry = mesh.geometry;
var normalMatrixWorld = new THREE.Matrix3();
if ( geometry instanceof THREE.Geometry ) {
geometry = new THREE.BufferGeometry().setFromObject( mesh );
}
if ( geometry instanceof THREE.BufferGeometry ) {
// shortcuts
var vertices = geometry.getAttribute( 'position' );
var normals = geometry.getAttribute( 'normal' );
var uvs = geometry.getAttribute( 'uv' );
var indices = geometry.getIndex();
// name of the mesh object
outputFace += '\nusemtl ' + mesh.textureName + '\n';
outputFace += 'o ' + mesh.name + '\n';
// vertices
if( vertices !== undefined ) {
for ( i = 0, l = vertices.count; i < l; i ++, nbVertex++ ) {
vertex.x = vertices.getX( i );
vertex.y = vertices.getY( i );
vertex.z = vertices.getZ( i );
// transfrom the vertex to world space
vertex.applyMatrix4( mesh.matrixWorld );
// transform the vertex to export format
outputVertex += 'v ' + vertex.x + ' ' + vertex.y + ' ' + vertex.z + '\n';
}
}
// uvs
if( uvs !== undefined ) {
for ( i = 0, l = uvs.count; i < l; i ++, nbVertexUvs++ ) {
uv.x = uvs.getX( i );
uv.y = uvs.getY( i );
// transform the uv to export format
outputVertexUvs += 'vt ' + uv.x + ' ' + uv.y + '\n';
}
}
// normals
if( normals !== undefined ) {
normalMatrixWorld.getNormalMatrix( mesh.matrixWorld );
for ( i = 0, l = normals.count; i < l; i ++, nbNormals++ ) {
normal.x = normals.getX( i );
normal.y = normals.getY( i );
normal.z = normals.getZ( i );
// transfrom the normal to world space
normal.applyMatrix3( normalMatrixWorld );
// transform the normal to export format
outputVertexN += 'vn ' + normal.x + ' ' + normal.y + ' ' + normal.z + '\n';
}
}
// faces
if( indices !== null ) {
for ( i = 0, l = indices.count; i < l; i += 3 ) {
for( m = 0; m < 3; m ++ ){
j = indices.getX( i + m ) + 1;
face[ m ] = ( indexVertex + j ) + '/' + ( uvs ? ( indexVertexUvs + j ) : '' ) + '/' + ( indexNormals + j );
}
// transform the face to export format
outputFace += 'f ' + face.join( ' ' ) + "\n";
}
} else {
for ( i = 0, l = vertices.count; i < l; i += 3 ) {
for( m = 0; m < 3; m ++ ){
j = i + m + 1;
face[ m ] = ( indexVertex + j ) + '/' + ( uvs ? ( indexVertexUvs + j ) : '' ) + '/' + ( indexNormals + j );
}
// transform the face to export format
outputFace += 'f ' + face.join( ' ' ) + "\n";
}
}
} else {
console.warn( 'THREE.OBJExporter.parseMesh(): geometry type unsupported', geometry );
}
// update index
indexVertex += nbVertex;
indexVertexUvs += nbVertexUvs;
indexNormals += nbNormals;
};
var parseLine = function( line ) {
var nbVertex = 0;
var geometry = line.geometry;
var type = line.type;
if ( geometry instanceof THREE.Geometry ) {
geometry = new THREE.BufferGeometry().setFromObject( line );
}
if ( geometry instanceof THREE.BufferGeometry ) {
// shortcuts
var vertices = geometry.getAttribute( 'position' );
var indices = geometry.getIndex();
// name of the line object
output += 'o ' + line.name + '\n';
if( vertices !== undefined ) {
for ( i = 0, l = vertices.count; i < l; i ++, nbVertex++ ) {
vertex.x = vertices.getX( i );
vertex.y = vertices.getY( i );
vertex.z = vertices.getZ( i );
// transfrom the vertex to world space
vertex.applyMatrix4( line.matrixWorld );
// transform the vertex to export format
output += 'v ' + vertex.x + ' ' + vertex.y + ' ' + vertex.z + '\n';
}
}
if ( type === 'Line' ) {
output += 'l ';
for ( j = 1, l = vertices.count; j <= l; j++ ) {
output += ( indexVertex + j ) + ' ';
}
output += '\n';
}
if ( type === 'LineSegments' ) {
for ( j = 1, k = j + 1, l = vertices.count; j < l; j += 2, k = j + 1 ) {
output += 'l ' + ( indexVertex + j ) + ' ' + ( indexVertex + k ) + '\n';
}
}
} else {
console.warn('THREE.OBJExporter.parseLine(): geometry type unsupported', geometry );
}
// update index
indexVertex += nbVertex;
};
for (o = 0; o < objectArray.length; o += 1){
var child = objectArray[o]
if ( child instanceof THREE.Mesh ) {
parseMesh( child );
}
output = outputMTLFile + '\n' + outputVertex + '\n' + outputVertexN + '\n' +
outputVertexUvs + '\n' + outputFace;
}
/*
object.traverse = function ( callback ) {
callback( object );
var children = object.children;
for ( var i = 0, l = children.length; i < l; i ++ ) {
children[ i ].traverse( callback );
}
},
object.traverse( function ( child ) {
if ( child instanceof THREE.Mesh ) {
parseMesh( child );
}
if ( child instanceof THREE.Line ) {
parseLine( child );
}
} );
*/
return output;
},
generateMtl: function ( objectArray) {
var output = '';
var generatedTexture = [];
var parseMeshMtl = function ( mesh ) {
var geometry = mesh.geometry;
var normalMatrixWorld = new THREE.Matrix3();
if ( geometry instanceof THREE.Geometry ) {
geometry = new THREE.BufferGeometry().setFromObject( mesh );
}
if ( geometry instanceof THREE.BufferGeometry ) {
// name of the mesh object
if (!generatedTexture.includes(mesh.textureName)) {
output += 'newmtl ' + mesh.textureName + '\n';
output += 'map_Ka ' + mesh.textureName + '\n';
output += 'map_Kd ' + mesh.textureName + '\n\n';
generatedTexture.push(mesh.textureName);
}
} else {
console.warn( 'THREE.OBJExporter.parseMesh(): geometry type unsupported', geometry );
}
};
for (o = 0; o < objectArray.length; o += 1){
var child = objectArray[o]
if ( child instanceof THREE.Mesh ) {
parseMeshMtl( child );
}
}
return output;
}
};
/*
3D Model Scraping:
Helper function for merging multiple 3D elements
*/
THREE.BufferGeometry.prototype.merge = function ( geometry ) {
if ( geometry instanceof THREE.BufferGeometry === false ) {
console.error( 'THREE.BufferGeometry.merge(): geometry not an instance of THREE.BufferGeometry.', geometry );
return;
}
var attributes = this.attributes;
if( this.index ){
var indices = geometry.index.array;
var offset = attributes[ 'position' ].count;
for( var i = 0, il = indices.length; i < il; i++ ) {
indices[i] = offset + indices[i];
}
this.index.array = Uint32ArrayConcat( this.index.array, indices );
}
for ( var key in attributes ) {
if ( geometry.attributes[ key ] === undefined ) continue;
attributes[ key ].array = Float32ArrayConcat( attributes[ key ].array, geometry.attributes[ key ].array );
}
return this;
/***
* @param {Float32Array} first
* @param {Float32Array} second
* @returns {Float32Array}
* @constructor
*/
function Float32ArrayConcat(first, second)
{
var firstLength = first.length,
result = new Float32Array(firstLength + second.length);
result.set(first);
result.set(second, firstLength);
return result;
}
/**
* @param {Uint32Array} first
* @param {Uint32Array} second
* @returns {Uint32Array}
* @constructor
*/
function Uint32ArrayConcat(first, second)
{
var firstLength = first.length,
result = new Uint32Array(firstLength + second.length);
result.set(first);
result.set(second, firstLength);
return result;
}
};
module.exports = THREE

Binary file not shown.

12
data/decode/decode.js Normal file
View File

@ -0,0 +1,12 @@
var DamLoader = require("./DamLoader.js")
var argv = require('optimist').argv
document = {};
document.mergeModels = false;
rootDir = argv.rootdir
modelId = argv.model
loader = new DamLoader(rootDir, rootDir)
loader.load(modelId)

572
data/decode/helpers.js Normal file
View File

@ -0,0 +1,572 @@
var THREE = require("./Three");
var PROTOBUF = require("./Protobuf.js");
var fs = require("fs");
var buffer = require("buffer");
var CoreViewModes = {
PANORAMA: "panorama",
DOLLHOUSE: "dollhouse",
FLOORPLAN: "floorplan"
}
var Viewmode = {
MESH: "mesh",
OUTDOOR: "outdoor",
TRANSITIONING: "transitioning",
toInt: function(e) {
switch (e) {
case this.PANORAMA:
return 1;
case this.DOLLHOUSE:
return 2;
case this.FLOORPLAN:
return 3;
case this.OUTDOOR:
return 4;
case this.TRANSITIONING:
return -1
}
},
fromInt: function(e) {
switch (e) {
case "1":
case 1:
return this.PANORAMA;
case "2":
case 2:
return this.DOLLHOUSE;
case "3":
case 3:
return this.FLOORPLAN;
case "4":
case 4:
return this.OUTDOOR
}
},
convertWorkshopModeInt: function(e) {
switch (e) {
case "0":
case 0:
return this.PANORAMA;
case "1":
case 1:
return this.FLOORPLAN;
case "2":
case 2:
return this.DOLLHOUSE;
case "3":
case 3:
return this.MESH
}
}
};
var COLORS = {
newBlue: new THREE.Color(4967932),
altBlue: new THREE.Color(47355),
classicBlue: new THREE.Color(53759),
mpYellow: new THREE.Color(16502016),
mpOrange: new THREE.Color(16428055),
mpBlue: new THREE.Color(12096),
mpLtGrey: new THREE.Color(13751252),
mpDkGrey: new THREE.Color(10000019),
mpRed: new THREE.Color(12525854),
mpOrangeDesat: new THREE.Color(16764529),
mpBlueDesat: new THREE.Color(4034734),
mpRedDesat: new THREE.Color(14705505),
white: new THREE.Color(16777215),
black: new THREE.Color(0),
_desat: function(e, t) {
var i = t || .3
, r = (new THREE.Color).copy(e).getHSL();
return (new THREE.Color).setHSL(r.h, r.s * (1 - i), r.l)
},
_darken: function(e, t) {
var i = t || .2
, r = (new THREE.Color).copy(e).getHSL();
return (new THREE.Color).setHSL(r.h, r.s, r.l * (1 - i))
}
}
var o = "precision highp float;\nprecision highp int;\n\nuniform mat4 modelMatrix;\nuniform mat4 modelViewMatrix;\nuniform mat4 projectionMatrix;\nuniform mat4 viewMatrix;\nuniform mat3 normalMatrix;\nuniform vec3 cameraPosition;\nattribute vec3 position;\nattribute vec3 normal;\nattribute vec2 uv;\n"
var a = "precision highp float;\nprecision highp int;\n\nuniform mat4 viewMatrix;\nuniform vec3 cameraPosition;\n";
var SHADERS = {
basicTextured: {
uniforms: {
tDiffuse: {
type: "t",
value: null
},
alpha: {
type: "f",
value: 1
}
},
vertexShader: "varying vec2 vUv;\nvoid main() {\n vUv = uv;\n gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);\n}",
fragmentShader: "varying vec2 vUv;\nuniform float alpha;\nuniform sampler2D tDiffuse;\nvoid main() {\n vec4 texColor = texture2D(tDiffuse, vUv);\n gl_FragColor = vec4(texColor.rgb, texColor.a * alpha);\n}"
},
copyCubeMap: {
uniforms: {
tDiffuse: {
type: "t",
value: null
},
alpha: {
type: "f",
value: 1
}
},
vertexShader: "varying vec3 vWorldPos;\nvoid main() {\n vWorldPos = vec3(-position.x, -position.y, position.z);\n gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0);\n}",
fragmentShader: "varying vec3 vWorldPos;\nuniform float alpha;\nuniform samplerCube tDiffuse;\nvoid main() {\n vec4 texColor = textureCube(tDiffuse, vWorldPos);\n gl_FragColor = vec4(texColor.rgb, texColor.a * alpha);\n}"
},
cube: {
uniforms: {
map: {
type: "t",
value: null
},
opacity: {
type: "f",
value: 1
}
},
vertexShader: o + "varying vec3 vWorldPosition;\n\nvoid main() {\n vWorldPosition = position;\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );\n}\n",
fragmentShader: a + "uniform samplerCube map;\nuniform float opacity;\n\nvarying vec3 vWorldPosition;\n\nvoid main() {\n vec4 color = textureCube( map, vec3( -vWorldPosition.x, vWorldPosition.yz ) );\n gl_FragColor = vec4(color.rgb, opacity);\n}\n"
},
model: {
uniforms: {
map: {
type: "t",
value: null
},
modelAlpha: {
type: "f",
value: 1 // r.modelAlpha
},
opacity: {
type: "f",
value: 1
},
progress: {
type: "f",
value: 0
},
blackout: {
type: "i",
value: 0
},
pano0Map: {
type: "t",
value: null
},
pano0Position: {
type: "v3",
value: new THREE.Vector3
},
pano0Matrix: {
type: "m4",
value: new THREE.Matrix4
},
pano1Map: {
type: "t",
value: null
},
pano1Position: {
type: "v3",
value: new THREE.Vector3
},
pano1Matrix: {
type: "m4",
value: new THREE.Matrix4
}
},
vertexShader: o + "uniform vec3 pano0Position;\nuniform mat4 pano0Matrix;\n\nuniform vec3 pano1Position;\nuniform mat4 pano1Matrix;\n\nvarying vec2 vUv;\nvarying vec3 vWorldPosition0;\nvarying vec3 vWorldPosition1;\n\nvoid main() {\n\n vUv = uv;\n vec4 worldPosition = modelMatrix * vec4(position, 1.0);\n\n vec3 positionLocalToPanoCenter0 = worldPosition.xyz - pano0Position;\n vWorldPosition0 = (vec4(positionLocalToPanoCenter0, 1.0) * pano0Matrix).xyz;\n vWorldPosition0.x *= -1.0;\n\n vec3 positionLocalToPanoCenter1 = worldPosition.xyz - pano1Position;\n vWorldPosition1 = (vec4(positionLocalToPanoCenter1, 1.0) * pano1Matrix).xyz;\n vWorldPosition1.x *= -1.0;\n\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );\n\n}\n",
fragmentShader: a + "uniform sampler2D map;\nuniform float modelAlpha;\nuniform float opacity;\nuniform float progress;\nuniform int blackout;\n\nuniform vec3 pano0Position;\nuniform samplerCube pano0Map;\n\nuniform vec3 pano1Position;\nuniform samplerCube pano1Map;\n\nvarying vec2 vUv;\nvarying vec3 vWorldPosition0;\nvarying vec3 vWorldPosition1;\n\nvoid main() {\n\tconst vec4 BLACK = vec4(0.0, 0.0, 0.0, 1.0);\n\tconst vec4 GREY = vec4(0.5, 0.5, 0.5, 1.0);\n\n\tvec4 colorFromPanos;\n\tvec4 colorFromPano0 = textureCube( pano0Map, vWorldPosition0.xyz);\n\tvec4 colorFromPano1 = textureCube( pano1Map, vWorldPosition1.xyz);\n\n\tif (blackout == 0) {\n\t\tcolorFromPanos = mix(colorFromPano0, colorFromPano1, progress);\n\t} else if (blackout == 1) {\n\t\tcolorFromPanos = mix(colorFromPano0, BLACK, min(1.0, progress*2.0));\n\t\tcolorFromPanos = mix(colorFromPanos, colorFromPano1, max(0.0, progress * 2.0 - 1.0));\n\t} else if (blackout == 2) {\n\t\tcolorFromPanos = mix(colorFromPano0, BLACK, progress);\n\t} else if (blackout == 3) {\n\t\tcolorFromPanos = mix(BLACK, colorFromPano1, max(0.0, progress * 2.0 - 1.0));\n\t} \n\n\tvec4 colorFromTexture = texture2D( map, vUv );\n\tcolorFromPanos = mix(colorFromPanos, colorFromTexture, modelAlpha);\n\n\tfloat whiteness = 1.0 - smoothstep(0.1, 0.2, opacity);\n\tcolorFromPanos = mix(colorFromPanos, GREY, whiteness);\n\tgl_FragColor = vec4(colorFromPanos.rgb, opacity);\n}\n"
},
modelOutside: {
uniforms: {
map: {
type: "t",
value: null
},
opacity: {
type: "f",
value: 1
}
},
vertexShader: o + "varying vec2 vUv;\n\nvoid main() {\n\n vUv = uv;\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );\n\n}\n",
fragmentShader: a + "uniform sampler2D map;\nuniform float opacity;\nvarying vec2 vUv;\n\nvec4 white = vec4(0.5, 0.5, 0.5, 1.0);\n\nvoid main() {\n\n vec4 colorFromTexture = texture2D( map, vUv );\n float whiteness = 1.0 - smoothstep(0.1, 0.2, opacity);\n colorFromTexture = mix(colorFromTexture, white, whiteness);\n gl_FragColor = vec4(colorFromTexture.rgb, opacity);\n\n}\n"
},
ribbon: {
uniforms: {
map: {
type: "t",
value: null
},
opacity: {
type: "f",
value: 1
},
color: {
type: "c",
value: new THREE.Color(COLORS.newBlue) // r.path.color)
}
},
vertexShader: o + "varying vec2 vUv;\nvarying vec3 vN;\nvarying vec4 vP;\n\nvoid main() {\n\n vUv = uv;\n vN= normalMatrix * normal;\n vP = modelViewMatrix * vec4( position, 1.0 );\n gl_Position = projectionMatrix * vP;\n}\n",
fragmentShader: a + "uniform sampler2D map;\nuniform float opacity;\nvarying vec2 vUv;\nuniform vec3 color;\nvarying vec3 vN; // show-1182\nvarying vec4 vP; // show-1182\n\nvoid main() {\n\t// TODO add scroll-in and pulsing behaviors\n\tvec3 vNn = normalize(vN);\n\tvec3 vPn = normalize(vP.xyz);\n\tfloat f = pow(1.0-abs(dot(vNn,vPn)),0.2);\n vec4 colorFromTexture = texture2D( map, vUv );\n colorFromTexture.a *= f;\n gl_FragColor = vec4((color.rgb*colorFromTexture.rgb),\n \t\t\t\t\t\t(opacity*colorFromTexture.a));\n}\n"
},
waypoint: {
uniforms: {
map: {
type: "t",
value: null
},
opacity: {
type: "f",
value: 1
},
pulse: {
type: "f",
value: 1
},
nearFade: {
type: "v2",
value: new THREE.Vector2(2 * 0.1, 2 * 0.24)
// value: new THREE.Vector2(2 * r.insideNear,2 * r.path.waypointIndoorRadius)
},
color: {
type: "c",
value: new THREE.Color(COLORS.newBlue) // r.reticuleColor)
}
},
vertexShader: o + "varying vec2 vUv;\nvarying vec4 vPointView;\n\nvoid main() {\n\n vUv = uv;\n vPointView = modelViewMatrix * vec4( position, 1.0 );\n gl_Position = projectionMatrix * vPointView;\n\n}\n",
fragmentShader: a + "uniform sampler2D map;\nuniform float opacity;\nuniform float pulse; // another opacity, with a different clock\nuniform vec2 nearFade;\nvarying vec2 vUv;\nvarying vec4 vPointView;\nuniform vec3 color;\n\nvoid main() {\n\t// TODO add scroll-in and pulsing behaviors\n\tfloat depthFade = min(1.0, (abs(vPointView.z)-nearFade.x)/(nearFade.y-nearFade.x));\n vec4 colorFromTexture = texture2D( map, vUv );\t\t// we only use the alpha!\n gl_FragColor = vec4(color.rgb,\n \t\t\t\t\t\t(pulse*opacity*colorFromTexture.a * depthFade));\n}\n"
},
modelDebug: {
uniforms: {
map: {
type: "t",
value: null
},
modelAlpha: {
type: "f",
value: 1 // r.modelAlpha
},
depthmapRatio: {
type: "f",
value: 0
},
opacity: {
type: "f",
value: 1
},
progress: {
type: "f",
value: 0
},
considerOcclusion: {
type: "i",
value: !1 // r.fancierTransition
},
highlightPanoSelection: {
type: "i",
value: 0
},
useThirdPano: {
type: "i",
value: null // r.useThirdPano
},
pano0Map: {
type: "t",
value: null
},
pano0Depth: {
type: "t",
value: null
},
pano0Position: {
type: "v3",
value: new THREE.Vector3
},
pano0Matrix: {
type: "m4",
value: new THREE.Matrix4
},
pano0Weight: {
type: "f",
value: null // r.transition.pano0Weight
},
pano1Map: {
type: "t",
value: null
},
pano1Depth: {
type: "t",
value: null
},
pano1Position: {
type: "v3",
value: new THREE.Vector3
},
pano1Matrix: {
type: "m4",
value: new THREE.Matrix4
},
pano1Weight: {
type: "f",
value: null // r.transition.pano1Weight
},
pano2Map: {
type: "t",
value: null
},
pano2Depth: {
type: "t",
value: null
},
pano2Position: {
type: "v3",
value: new THREE.Vector3
},
pano2Matrix: {
type: "m4",
value: new THREE.Matrix4
},
pano2Weight: {
type: "f",
value: null // r.transition.pano2Weight
}
},
vertexShader: o + "uniform vec3 pano0Position;\nuniform mat4 pano0Matrix;\n\nuniform vec3 pano1Position;\nuniform mat4 pano1Matrix;\n\nuniform vec3 pano2Position;\nuniform mat4 pano2Matrix;\n\nvarying vec2 vUv;\nvarying vec3 vWorldPosition0;\nvarying vec3 vWorldPosition1;\nvarying vec3 vWorldPosition2;\n\nvarying vec4 worldPosition;\n\nvoid main() {\n\n vUv = uv;\n worldPosition = modelMatrix * vec4(position, 1.0);\n\n vec3 positionLocalToPanoCenter0 = worldPosition.xyz - pano0Position;\n vWorldPosition0 = (vec4(positionLocalToPanoCenter0, 1.0) * pano0Matrix).xyz;\n vWorldPosition0.x *= -1.0;\n\n vec3 positionLocalToPanoCenter1 = worldPosition.xyz - pano1Position;\n vWorldPosition1 = (vec4(positionLocalToPanoCenter1, 1.0) * pano1Matrix).xyz;\n vWorldPosition1.x *= -1.0;\n\n vec3 positionLocalToPanoCenter2 = worldPosition.xyz - pano2Position;\n vWorldPosition2 = (vec4(positionLocalToPanoCenter2, 2.0) * pano2Matrix).xyz;\n vWorldPosition2.x *= -1.0;\n\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );\n\n}\n",
fragmentShader: a + "uniform sampler2D map;\nuniform float depthmapRatio;\nuniform float modelAlpha;\nuniform float opacity;\nuniform float progress;\nuniform int considerOcclusion;\nuniform int highlightPanoSelection;\nuniform int useThirdPano;\n\nuniform vec3 pano0Position;\nuniform samplerCube pano0Map;\nuniform samplerCube pano0Depth;\nuniform float pano0Weight;\n\nuniform vec3 pano1Position;\nuniform samplerCube pano1Map;\nuniform samplerCube pano1Depth;\nuniform float pano1Weight;\n\nuniform vec3 pano2Position;\nuniform samplerCube pano2Map;\nuniform samplerCube pano2Depth;\nuniform float pano2Weight;\n\nvarying vec2 vUv;\nvarying vec3 vWorldPosition0;\nvarying vec3 vWorldPosition1;\nvarying vec3 vWorldPosition2;\n\nvarying vec4 worldPosition;\n\nvoid main() {\n\n vec4 depthFromPano0 = textureCube( pano0Depth, vWorldPosition0.xyz );\n vec4 depthFromPano1 = textureCube( pano1Depth, vWorldPosition1.xyz );\n vec4 depthFromPano2 = textureCube( pano2Depth, vWorldPosition2.xyz );\n\n vec4 colorFromPano0 = textureCube( pano0Map, vWorldPosition0.xyz );\n vec4 colorFromPano1 = textureCube( pano1Map, vWorldPosition1.xyz );\n vec4 colorFromPano2 = textureCube( pano2Map, vWorldPosition2.xyz );\n\n float distanceToPano0 = distance(worldPosition.xyz, pano0Position);\n float distanceToPano1 = distance(worldPosition.xyz, pano1Position);\n float distanceToPano2 = distance(worldPosition.xyz, pano2Position);\n\n float cameraToPano0 = distance(cameraPosition.xyz, pano0Position);\n float cameraToPano1 = distance(cameraPosition.xyz, pano1Position);\n float cameraToPano2 = distance(cameraPosition.xyz, pano2Position);\n\n float contributionFromPano0 = cameraToPano0 == 0.0 ? 1000.0 : pano0Weight / cameraToPano0;\n float contributionFromPano1 = cameraToPano1 == 0.0 ? 1000.0 : pano1Weight / cameraToPano1;\n float contributionFromPano2 = cameraToPano2 == 0.0 ? 1000.0 : pano2Weight / cameraToPano2;\n\n contributionFromPano0 *= 1.0 / distanceToPano0;\n contributionFromPano1 *= 1.0 / distanceToPano1;\n contributionFromPano2 *= 1.0 / distanceToPano2;\n\n if(considerOcclusion == 1) {\n bool occludedFromPano0 = distanceToPano0 / 10.0 > 1.01 - depthFromPano0.x;\n bool occludedFromPano1 = distanceToPano1 / 10.0 > 1.01 - depthFromPano1.x;\n bool occludedFromPano2 = distanceToPano2 / 10.0 > 1.01 - depthFromPano2.x;\n\n if(occludedFromPano0){contributionFromPano0 *= 0.1;}\n if(occludedFromPano1){contributionFromPano1 *= 0.1;}\n if(occludedFromPano2){contributionFromPano2 *= 0.1;}\n //if(occludedFromPano0 && occludedFromPano1 && !occludedFromPano2) { contributionFromPano2 += 0.5; }\n }\n\n float contributionSum = contributionFromPano0 + contributionFromPano1 + contributionFromPano2;\n contributionFromPano0 /= contributionSum;\n contributionFromPano1 /= contributionSum;\n contributionFromPano2 /= contributionSum;\n\n vec4 colorFromPanos = colorFromPano0 * contributionFromPano0;\n colorFromPanos += colorFromPano1 * contributionFromPano1;\n colorFromPanos += colorFromPano2 * contributionFromPano2;\n\n vec4 depthFromPanos = depthFromPano0 * contributionFromPano0;\n depthFromPanos += depthFromPano1 * contributionFromPano1;\n depthFromPanos += depthFromPano2 * contributionFromPano2;\n\n vec4 colorFromTexture = texture2D( map, vUv );\n colorFromPanos = mix(colorFromPanos, colorFromTexture, modelAlpha);\n\n if(highlightPanoSelection == 1) {\n colorFromPanos.r = contributionFromPano0;\n colorFromPanos.g = contributionFromPano1;\n colorFromPanos.b = contributionFromPano2;\n }\n\n gl_FragColor = vec4(mix(colorFromPanos, depthFromPanos, depthmapRatio).rgb, opacity);\n\n}\n"
},
customDepth: {
uniforms: {
panoPosition: {
type: "v3",
value: new THREE.Vector3
}
},
vertexShader: o + "varying vec4 worldPosition;\n\nvoid main() {\n\n worldPosition = modelMatrix * vec4(position, 1.0);\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );\n\n}\n",
fragmentShader: a + "uniform vec3 panoPosition;\nvarying vec4 worldPosition;\n\nvoid main() {\n\n float depth = distance(worldPosition.xyz, panoPosition);\n float color = 1.0 - depth / 10.0;\n gl_FragColor = vec4(color, color, color, 1.0);\n\n}\n"
},
skysphere: {
uniforms: {
radius: {
type: "f",
value: 0
}
},
vertexShader: o + "varying vec4 worldPosition;\n\nvoid main() {\n\n worldPosition = modelMatrix * vec4(position, 1.0);\n gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );\n\n}\n",
fragmentShader: a + "varying vec4 worldPosition;\nuniform float radius;\n\nvoid main() {\n\n vec4 topColor = vec4(0.094, 0.102, 0.11, 1.0);\n vec4 bottomColor = vec4(0.2, 0.216, 0.235, 1.0);\n float normalizedHeight = (worldPosition.y + radius) / (radius * 2.0);\n float ratio = smoothstep(0.0, 0.5, normalizedHeight);\n gl_FragColor = mix(bottomColor, topColor, ratio);\n\n}\n"
}
}
var UTILCOMMON = {
delayOneFrame: function(e) {
window.setTimeout(e, 1)
},
normalizeUrl: function(e) {
return e.replace("https://", "http://")
},
domainFromUrl: function(e) {
var t = /^([^:]*:\/\/)?(www\.)?([^\/]+)/.exec(e);
return t ? t[3] : e
},
average: function(e, t) {
if (0 === e.length)
return null;
for (var i = 0, n = 0, r = 0; r < e.length; r++) {
var o = t ? e[r][t] : e[r];
i += o,
n++
}
return i / n
},
countUnique: function(e) {
for (var t = {}, i = 0; i < e.length; i++)
t[e[i]] = 1 + (t[e[i]] || 0);
return Object.keys(t).length
},
averageVectors: function(e, t) {
var i = new THREE.Vector3;
if (0 === e.length)
return i;
for (var r = 0, o = 0; o < e.length; o++) {
var a = t ? e[o][t] : e[o];
i.add(a),
r++
}
return i.divideScalar(r)
},
equalLists: function(e, t) {
if (e.length !== t.length)
return !1;
for (var i = 0; i < e.length; i++)
if (e[i] !== t[i])
return !1;
return !0
},
lowerMedian: function(e, t) {
if (0 === e.length)
return null;
t = t || 2,
e.sort(function(e, t) {
return e - t
});
var i = Math.floor(e.length / t);
return e[i]
},
stableSort: function(e, t) {
return e.map(function(e, t) {
return {
value: e,
index: t
}
}).sort(function(e, i) {
var n = t(e.value, i.value);
return 0 !== n ? n : e.index - i.index
}).map(function(e) {
return e.value
})
},
filterAll: function(e, t) {
return e.filter(function(e) {
return t.every(function(t) {
return t(e)
})
})
},
formatDate: function(e) {
return [e.getFullYear(), e.getMonth() + 1, e.getDate()].join("-")
},
formatDatetime: function(e) {
return [e.getFullYear(), e.getMonth() + 1, e.getDate(), e.getHours(), e.getMinutes()].join("-")
},
randomString: function(e) {
for (var t = "", i = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", n = 0; n < e; n++)
t += i.charAt(Math.floor(Math.random() * i.length));
return t
},
nth: function(e) {
return e %= 10,
1 === e ? e + "st" : 2 === e ? e + "nd" : 3 === e ? e + "rd" : e + "th"
},
extendObject: function(e, t) {
return Object.keys(t).forEach(function(i) {
e[i] = t[i]
}),
e
},
deepExtend: function e(t) {
t = t || {};
for (var i = 1; i < arguments.length; i++) {
var n = arguments[i];
if (n)
for (var r in n)
n.hasOwnProperty(r) && ("object" == typeof n[r] ? t[r] = e(t[r], n[r]) : t[r] = n[r])
}
return t
},
inherit: function(e, t) {
e.prototype = Object.create(t.prototype),
e.prototype.constructor = e
},
extend: function(e, t) {
for (var i in t.prototype)
e.prototype[i] = t.prototype[i]
}
}
UTILCOMMON.extendObject(Viewmode, CoreViewModes)
Math.sign = function(e) {
return e < 0 ? -1 : 1
}
var ModelTextureMaterial = function(e) {
e = e || {},
THREE.RawShaderMaterial.call(this, UTILCOMMON.extendObject({
fragmentShader: SHADERS.model.fragmentShader,
vertexShader: SHADERS.model.vertexShader,
uniforms: THREE.UniformsUtils.clone(SHADERS.model.uniforms),
name: "ModelTextureMaterial"
}, e))
}
ModelTextureMaterial.prototype = Object.create(THREE.RawShaderMaterial.prototype),
ModelTextureMaterial.prototype.constructor = ModelTextureMaterial,
ModelTextureMaterial.prototype.setProjectedPanos = function(e, t, i) {
i && (this.uniforms.progress.value = 0),
e.tiled || (e.skybox.loaded || (e.skybox.needsUpdate = !0),
e.skybox.loaded = !0),
this.uniforms.pano0Map.value = e.skybox,
this.uniforms.pano0Position.value.copy(e.position),
this.uniforms.pano0Matrix.value.copy(e.skyboxMesh.matrixWorld),
t.tiled || (t.skybox.loaded || (t.skybox.needsUpdate = !0),
t.skybox.loaded = !0),
this.uniforms.pano1Map.value = t.skybox,
this.uniforms.pano1Position.value.copy(t.position),
this.uniforms.pano1Matrix.value.copy(t.skyboxMesh.matrixWorld)
}
CHUNK = function(e) {
this.materialInside = new ModelTextureMaterial({
side: THREE.DoubleSide
});
var t = THREE.UniformsUtils.clone(SHADERS.modelOutside.uniforms);
this.materialOutside = new THREE.RawShaderMaterial({
fragmentShader: SHADERS.modelOutside.fragmentShader,
vertexShader: SHADERS.modelOutside.vertexShader,
uniforms: t,
side: THREE.FrontSide,
name: "chunkOut"
}),
THREE.Mesh.call(this, e.geometry, this.materialInside),
this.name = e.name || "",
this.textureName = e.textureName,
this.meshUrl = e.meshUrl
}
CHUNK.prototype = Object.create(THREE.Mesh.prototype),
CHUNK.prototype.setTextureMap = function(e) {
this.materialInside.uniforms.map.value = e,
this.materialOutside.uniforms.map.value = e
}
CHUNK.prototype.setMode = function(e) {
var t = (e === Viewmode.DOLLHOUSE || e === Viewmode.FLOORPLAN) ? this.materialOutside : this.materialInside;
t.side = e === Viewmode.PANORAMA ? THREE.DoubleSide : THREE.FrontSide,
t.transparent = this.material.transparent,
t.uniforms.opacity.value = this.material.uniforms.opacity.value,
this.material = t
}
protoToken = new Buffer("bWVzc2FnZSBiaW5hcnlfbWVzaCB7CglyZXBlYXRlZCBjaHVua19zaW1wbGUgY2h1bmsgPSAxOwoJcmVwZWF0ZWQgY2h1bmtfcXVhbnRpemVkIHF1YW50aXplZF9jaHVuayA9IDI7Cn0KCi8vIERlZmluaXRpb24gb2YgdmVydGljZXM6IDNEIGNvb3JkaW5hdGVzLCBhbmQgMkQgdGV4dHVyZSBjb29yZGluYXRlcy4KbWVzc2FnZSB2ZXJ0aWNlc19zaW1wbGUgewoJcmVwZWF0ZWQgZmxvYXQgeHl6ID0gMSBbcGFja2VkPXRydWVdOyAgLy8geF8wLHlfMCx6XzAsIHhfMSx5XzEsel8xLCAuLi4KCXJlcGVhdGVkIGZsb2F0IHV2ID0gMiBbcGFja2VkPXRydWVdOyAgLy8gdV8wLHZfMCwgdV8xLHZfMSwgLi4uCn0KCi8vIEluZGV4ZXMgb2YgdmVydGljZXMgb2YgZmFjZXMKbWVzc2FnZSBmYWNlc19zaW1wbGUgewoJcmVwZWF0ZWQgdWludDMyIGZhY2VzID0gMSBbcGFja2VkPXRydWVdOyAvLyBpMDAsaTAxLGkwMiwgaTEwLGkxMSxpMTIsIC4uLgp9CgovLyBBIHNpbXBseSBlbmNvZGVkIGNodW5rLgovLyBUT0RPOiBhZGQgY2h1bmsgcHJvcGVyaXRlcyAoc3VjaCBhcyAicmVmbGVjdGl2ZSIpCm1lc3NhZ2UgY2h1bmtfc2ltcGxlIHsKCW9wdGlvbmFsIHZlcnRpY2VzX3NpbXBsZSB2ZXJ0aWNlcyA9IDE7CglvcHRpb25hbCBmYWNlc19zaW1wbGUgZmFjZXMgPSAyOwoJb3B0aW9uYWwgc3RyaW5nIGNodW5rX25hbWUgPSAzOwoJb3B0aW9uYWwgc3RyaW5nIG1hdGVyaWFsX25hbWUgPSA0Owp9CgovLyBRdWFudGl6ZWQgdmVyc2lvbnMgZm9sbG93OgptZXNzYWdlIHZlcnRpY2VzX3F1YW50aXplZCB7CglvcHRpb25hbCBmbG9hdCBxdWFudGl6YXRpb24gPSAxOwoJcmVwZWF0ZWQgZmxvYXQgdHJhbnNsYXRpb24gPSAyOwoJcmVwZWF0ZWQgc2ludDMyIHggPSAzIFtwYWNrZWQ9dHJ1ZV07CglyZXBlYXRlZCBzaW50MzIgeSA9IDQgW3BhY2tlZD10cnVlXTsKCXJlcGVhdGVkIHNpbnQzMiB6ID0gNSBbcGFja2VkPXRydWVdOwp9CgptZXNzYWdlIHV2X3F1YW50aXplZCB7CglvcHRpb25hbCBzdHJpbmcgbmFtZSA9IDE7CglvcHRpb25hbCBmbG9hdCBxdWFudGl6YXRpb24gPSAyOwoJcmVwZWF0ZWQgc2ludDMyIHUgPSAzIFtwYWNrZWQ9dHJ1ZV07CglyZXBlYXRlZCBzaW50MzIgdiA9IDQgW3BhY2tlZD10cnVlXTsKfQoKLy8gSW5kZXhlcyBvZiB2ZXJ0aWNlcyBvZiBmYWNlcwptZXNzYWdlIGZhY2VzX2NvbXByZXNzZWQgewoJcmVwZWF0ZWQgc2ludDMyIGZhY2VzID0gMSBbcGFja2VkPXRydWVdOyAvLyBpMDAsaTAxLGkwMiwgaTEwLGkxMSxpMTIsIC4uLgp9CgptZXNzYWdlIGNodW5rX3F1YW50aXplZCB7CglvcHRpb25hbCBzdHJpbmcgY2h1bmtfbmFtZSA9IDE7CglvcHRpb25hbCBzdHJpbmcgbWF0ZXJpYWxfbmFtZSA9IDI7CglvcHRpb25hbCB2ZXJ0aWNlc19xdWFudGl6ZWQgdmVydGljZXMgPSAzOwoJcmVwZWF0ZWQgdXZfcXVhbnRpemVkIHV2cyA9IDQ7CglvcHRpb25hbCBmYWNlc19zaW1wbGUgZmFjZXMgPSA1Owp9Cg==", "base64");
module.exports = {
CoreViewModes: CoreViewModes,
Viewmode: Viewmode,
COLORS: COLORS,
SHADERS: SHADERS,
UTILCOMMON: UTILCOMMON,
ModelTextureMaterial: ModelTextureMaterial,
CHUNK: CHUNK,
protoToken: protoToken
}

27
data/decode/package.json Executable file
View File

@ -0,0 +1,27 @@
{
"name": "decodejs",
"version": "0.1.0",
"description": "Transforming matterport .dam file to .obj format",
"contributors": [
"Zhiyang He <hzyjerry@gmail.com>"
],
"homepage": "https://github.com/dcodeIO/protobuf.js",
"keywords": [
"buffer",
"protobuf",
"bytebuffer"
],
"dependencies": {
"bytebuffer": "~5"
},
"license": "Apache-2.0",
"engines": {
"node": ">=0.8"
},
"browser": {
"fs": false,
"path": false
},
"scripts": {
}
}

5223
data/decode/protobuf/protobuf.js Executable file

File diff suppressed because it is too large Load Diff

10
data/decode/rename.py Normal file
View File

@ -0,0 +1,10 @@
# Reformat texture file names
# Incorrect: 73e85207dc844aa4b814c73120c3b53f_texture_jpg_high%2F73e85207dc844aa4b814c73120c3b53f_005.jpg
# Correct : 73e85207dc844aa4b814c73120c3b53f_005.jpg
import os
for filename in os.listdir("."):
if not '_50k' in filename and ".jpg" in filename:
start = filename.index('_texture')
end = len(filename) - filename[::-1].index('_') - 1
os.rename(filename, filename[:start] + filename[end:])

19190
data/decode/three_src.js Normal file

File diff suppressed because one or more lines are too long

50
data/generate_data.sh Normal file
View File

@ -0,0 +1,50 @@
#!/bin/bash
root=/cvgl/group/taskonomy/raw/*
src=$PWD
for model in $root
do
echo "processing $model"
folder=$(basename $model)
if [ -e $src/$folder ]; then
echo $src/$folder exists
elif [ ! -f $model/modeldata/out_res.ply ] || [ ! -f $model/modeldata/sweep_locations.csv ]; then
echo $folder$ does not contain model
elif [ -e $model/pano ] || [ -e $model/points ]; then
echo points or pano already exists
else
mkdir $src/$folder
cp $model/modeldata/sweep_locations.csv $src/$folder/
#skyboxes=$model/img/low/*skybox0*
#for skybox in $skyboxes
#do
# name=$(basename ${skybox})
# name=../../../..$model/img/low/${name::-5}
# echo $name
#
# while [ $(($(ps -e | grep blender | wc -l)+$(ps -e | grep cube2sphere | wc -l))) -gt 20 ]; do
# sleep 0.3
# echo waiting
# done
# (cube2sphere ${name}1.jpg ${name}3.jpg ${name}4.jpg ${name}2.jpg ${name}0.jpg ${name}5.jpg -r 512 256 -fPNG -o $folder/$(basename ${skybox} .jpg) )&
#done
cd $model
/cvgl/software/blender-2.78a/blender -b -noaudio --enable-autoexec --python /cvgl2/u/feixia/representation-learning-data/scripts/generate_points.py -- --NUM_POINTS_NEEDED 2 --MIN_VIEWS 1 --MAX_VIEWS 1
/cvgl/software/blender-2.78a/blender -b -noaudio --enable-autoexec --python /cvgl2/u/feixia/representation-learning-data/scripts/create_rgb_images.py --
/cvgl/software/blender-2.78a/blender -b -noaudio --enable-autoexec --python /cvgl2/u/feixia/representation-learning-data/scripts/create_normal_images.py --
/cvgl/software/blender-2.78a/blender -b -noaudio --enable-autoexec --python /cvgl2/u/feixia/representation-learning-data/scripts/create_mist_images.py --
#wait
mv points pano
mv pano $src/$folder/
fi
done

374
data/generate_points.py Normal file
View File

@ -0,0 +1,374 @@
"""
Name: generate_points.py
Author: Sasha Sax, CVGL
Modified by: Zhiyang He
Desc: Selects points that have at least a given number of views and saves information useful for loading them.
Will also mark the point as a pixel on point_{uuid}__view_{num}__markedskybox.{ext} if MARK_POINT_WITH_X is
enabled in settings.py
Usage:
blender -b -noaudio --enable-autoexec --python generate_points.py -- NUM_POINTS MIN_VIEWS MAX_VIEWS
"""
# Import these two first so that we can import other packages
import os
import sys
sys.path.append( os.path.dirname( os.path.realpath(__file__) ) )
from activate_env import add_on_path
sys.path.append(add_on_path)
from load_settings import settings
import io_utils
# Import remaining packages
import argparse
import bpy
import bpy_extras.mesh_utils
import glob
import json
import math
from mathutils import Vector, Euler
import numpy as np
import random
import time
import utils
from utils import Profiler, create_empty
import uuid
utils.set_random_seed()
parser = argparse.ArgumentParser()
parser.add_argument('--NUM_POINTS_NEEDED', type=int, required=True,
help='The number of points to generate')
parser.add_argument('--MIN_VIEWS', type=int, required=True,
help='The minimum number of views per point')
parser.add_argument('--MAX_VIEWS', type=int, required=True,
help='The maximum number of views per point (-1 to disable)')
basepath = os.getcwd()
TASK_NAME = 'points'
def parse_local_args( args ):
local_args = args[ args.index( '--' ) + 1: ]
return parser.parse_known_args( local_args )
def main():
global args, logger
args, remaining_args = parse_local_args( sys.argv )
logger = io_utils.create_logger( __name__ )
# io_utils.load_settings( remaining_args )
# utils.validate_blender_settings( settings )
assert(args.NUM_POINTS_NEEDED > 1)
utils.delete_all_objects_in_context()
if settings.VERBOSITY >= settings.VERBOSITY_LEVELS[ 'INFO' ]:
print( "Num points: {0} | Min views: {1} | Max views: {2}".format( args.NUM_POINTS_NEEDED, args.MIN_VIEWS, args.MAX_VIEWS ) )
# Get camera locations and optionally filter by enabled
camera_poses = io_utils.collect_camera_poses_from_csvfile( io_utils.get_camera_pose_file( basepath ) )
if settings.USE_ONLY_ENABLED_CAMERAS:
camera_poses = io_utils.filter_camera_sweeps_by_enabled_in_file( camera_poses, io_utils.get_camera_is_enabled_file( basepath ) )
# valid_cameras = [ 'fab20a57533646ce8da7ced527766b93', '1d12cda3bb31406ab49646bf27376d6a' ] # 'e1071efa828c432087a60ecb7b498453',
# valid_cameras = [ '1d12cda3bb31406ab49646bf27376d6a' ] # 'e1071efa828c432087a60ecb7b498453',
# camera_poses = { k:cp for k, cp in camera_poses.items() if k in valid_cameras }
# Load the model
model = io_utils.import_mesh( basepath )
if not os.path.isdir( os.path.join( basepath, TASK_NAME ) ):
os.mkdir( os.path.join( basepath, TASK_NAME ) )
# Generate the points
if settings.POINT_TYPE == 'SWEEP':
generate_points_from_each_sweep( camera_poses, basepath )
elif settings.POINT_TYPE == 'CORRESPONDENCES':
generate_point_correspondences( model, camera_poses, basepath )
else:
raise NotImplementedError( 'Unknown settings.POINT_TYPE: ' + settings.POINT_TYPE )
def generate_points_from_each_sweep( camera_poses, basepath ):
''' Generates and saves points into basepath. Each point file corresponds to one cameara and
contains an array of different view_dicts for that camera. These view_dicts are distinct from
the ones created by generate_point_correspondences since these views to not share a target point.
Args:
camera_poses: A Dict of camera_uuids -> camera extrinsics
basepath: The directory in which to save points
Returns:
None (saves points)
'''
def sample( sample_i ):
if settings.CREATE_PANOS:
if sample_i == 0: # Top
return math.pi, math.pi / 2, settings.FIELD_OF_VIEW_MATTERPORT_RADS
elif sample_i == 1: # Front
return 0.0, 0.0, settings.FIELD_OF_VIEW_MATTERPORT_RADS
elif sample_i == 2: # Right
return math.pi / 2, 0.0, settings.FIELD_OF_VIEW_MATTERPORT_RADS
elif sample_i == 3: # Back
return math.pi , 0.0, settings.FIELD_OF_VIEW_MATTERPORT_RADS
elif sample_i == 4: # Left
return -math.pi / 2., 0.0, settings.FIELD_OF_VIEW_MATTERPORT_RADS
elif sample_i == 5: # Bottom
return math.pi, -math.pi/2, settings.FIELD_OF_VIEW_MATTERPORT_RADS
else:
raise ValueError( 'Too many samples for a panorama! (Max 6)')
else:
# How to generate samples from a camera sweep
yaw = np.random.uniform( low=-math.pi, high=math.pi )
pitch = settings.MAX_ANGLE_OF_CAMERA_BASE_FROM_PLANE_OF_ROTATION + 1
while abs( pitch ) > settings.MAX_ANGLE_OF_CAMERA_BASE_FROM_PLANE_OF_ROTATION:
pitch = np.random.normal( loc=0.0, scale=math.radians( 15. ) )
# FOV
z_val = 2
while z_val > 1:
z_val = np.random.normal( loc=0.0, scale=1. )
z_val = np.abs( z_val )
fov = settings.FIELD_OF_VIEW_MAX_RADS - z_val * ( settings.FIELD_OF_VIEW_MAX_RADS - settings.FIELD_OF_VIEW_MIN_RADS )
return yaw, pitch, fov
# Generate random points for each camera:
for camera_uuid in camera_poses.keys():
save_point_from_camera_sweep( sample, camera_uuid, camera_poses, basepath )
def generate_point_correspondences( model, camera_poses, basepath ):
''' Generates and saves points into basepath. These points are generated as correspondences
where each point_uuid.json is an array of view_dicts, or information about a camera which
has line-of-sight to the desired point. Each view_dict includes information about the
target point, too.
Args:
model: A Blender mesh that will be used to propose points
camera_poses: A Dict of camera_uuids -> camera extrinsics
basepath: The directory in which to save points
Returns:
None (saves points)
'''
n_generated = 0
while n_generated < args.NUM_POINTS_NEEDED:
utils.delete_objects_starting_with( "Camera" )
with Profiler( "Generate point", logger ):
point_uuid = str( uuid.uuid4() ) # Can also use hardcoded "TEST"
if settings.FILE_NAMING_CONVENTION == 'DEBUG':
point_uuid = str( n_generated )
next_point, visible_cameras, obliquenesses_dict = get_viable_point_and_corresponding_cameras(
model,
camera_poses,
min_views=args.MIN_VIEWS,
point_num=n_generated)
save_point_from_correspondence( visible_cameras, next_point, point_uuid, obliquenesses_dict, basepath )
n_generated += 1
def save_point_from_camera_sweep( sampling_fn, camera_uuid, camera_poses, basepath ):
'''
Args:
sampling_fn: A function which taskes in (sample_number) and returns (yaw, pitch, fov)
camera_uuid: The key of this camera inside camera_poses
camera_poses: All of the camera extrinsics for all cameras
basepath: The directory to save this point in
Returns:
None (samples point and saves it in basepath)
'''
with Profiler( "Save point", logger ):
point_data = []
point_uuid = str( uuid.uuid4() ) # Can also use hardcoded "TEST"
if settings.FILE_NAMING_CONVENTION == 'DEBUG':
point_uuid = str( camera_uuid )
# Save each sampled camera position into point_data
for sample_i in range( args.NUM_POINTS_NEEDED ):
yaw, pitch, fov = sampling_fn( sample_i )
print("Get yaw, pitch, fov", yaw, pitch, fov, sample_i)
view_dict = io_utils.get_save_info_for_sweep(
fov, pitch, yaw, point_uuid, camera_uuid, camera_poses, settings.RESOLUTION )
point_data.append( view_dict )
# Save result out
outfile_path = os.path.join( basepath, TASK_NAME, "point_" + point_uuid + ".json" )
with open( outfile_path, 'w' ) as outfile:
json.dump( point_data, outfile )
def save_point_from_correspondence( visible_cameras, next_point, point_uuid, obliquenesses_dict, basepath ):
''' Saves out a CORRESPONDENCE-type point to a file in basepath.
Each point_uuid.json is an array of view_dicts, or information about a camera which
has line-of-sight to the desired point. Each view_dict includes information about the
target point, too.
Args:
visible_cameras: A list of all camera_poses which have line-of-sight to next_point
next_point: A 3-tuple of the XYZ coordinates of the target_point
point_uuid: A uuid to call this point. Defines the filename.
obliquenesses_dict: A dict of camera_uuid -> obliqueness of the face relative to camera
basepath: Directory under which to save point information
Returns:
None (Save a point file under basepath)
'''
with Profiler( "Save point" ):
empty = utils.create_empty( "Empty", next_point )
point_data = []
# So that we're not just using the same camera for each point
shuffled_views = list( visible_cameras )
random.shuffle( shuffled_views )
for view_number, camera_uuid in enumerate( shuffled_views ):
point_normal, obliqueness_angle = obliquenesses_dict[ camera_uuid ]
next_point_data = io_utils.get_save_info_for_correspondence( empty,
point=next_point,
point_uuid=point_uuid,
point_normal=tuple( point_normal ),
camera_uuid=camera_uuid,
cameras=visible_cameras,
obliqueness_angle=obliqueness_angle,
resolution=settings.RESOLUTION )
point_data.append( next_point_data )
if view_number == int( args.MAX_VIEWS ):
break
if view_number == settings.STOP_VIEW_NUMBER:
break
outfile_path = os.path.join(basepath, TASK_NAME, "point_" + point_uuid + ".json")
with open( outfile_path, 'w' ) as outfile:
json.dump( point_data, outfile )
def get_random_point_from_mesh( num_points, model ):
"""
Generates a given number of random points from the mesh
"""
# return [ Vector( ( -1, 0, 0 ) ) ] # Sink
me = model.data
me.calc_tessface() # recalculate tessfaces
tessfaces_select = [f for f in me.tessfaces if f.select]
random.shuffle( tessfaces_select )
multiplier = 1 if len(tessfaces_select) >= num_points else num_points // len(tessfaces_select)
return bpy_extras.mesh_utils.face_random_points(multiplier, tessfaces_select[:num_points])
def get_viable_point_and_corresponding_cameras( model, camera_locations, min_views=3, point_num=None ):
"""
Keeps randomly sampling points from the mesh until it gets one that is viewable from at least
'min_views' camera locations.
Args:
model: A Blender mesh object
min_views: The minimum viable number of views
camera_locations: A list of dicts which have information about the camera location
point_num: The index of the point in test_assets/points_to_generate.json - needs to be
specified iff settings.MODE == 'TEST'
Returns:
point: A point that has at least 'min_views' cameras with line-of-sight on point
visible: A Dict of visible cameras---camera_uuid -> extrinsics
obliquness: A Dict of camera_uuid->( point_normal, obliqueness_angle )
"""
count = 0
while True:
# Generate point and test
if settings.MODE == 'TEST':
with open( "test_assets/points_to_generate.json", 'r' ) as fp:
candidate_point_tuple = io_utils.get_point_loc_in_model_from_view_dict( json.load( fp )[ point_num ] )
candidate_point = Vector( candidate_point_tuple )
else:
candidate_point = get_random_point_from_mesh( 1, model )[0]
# candidate_point = Vector( (-1.8580, -0.9115, 3.6539) )
cameras_with_view_of_candidate = {}
obliquenesses_dict = {}
n_views_with_los = 0
for camera_uuid, camera_extrinsics in camera_locations.items():
camera_rotation_euler = Euler( camera_extrinsics[ 'rotation' ], settings.EULER_ROTATION_ORDER )
camera_location = camera_extrinsics[ 'position' ]
camera, _, scene = utils.create_camera( location=camera_location, rotation=camera_rotation_euler,
field_of_view=settings.FIELD_OF_VIEW_MATTERPORT_RADS,
camera_name="viable_point_camera" )
# Compute whether to use this view
los_normal_and_obliquness = try_get_line_of_sight_obliqueness( camera.location, candidate_point )
contains_base = is_view_of_point_containing_camera_base( camera, candidate_point )
# Debug logging
if settings.VERBOSITY >= settings.VERBOSITY_LEVELS[ 'DEBUG' ]:
print( "\nCamera name:", camera_uuid )
print( "\tCamera position:", tuple( camera.location ) )
print( "\tCamera initial rotation:", tuple( camera.rotation_euler ) )
print( "\tPitch:", math.degrees( io_utils.get_pitch_of_point( camera, candidate_point ) ) )
print( "\tPoint:", tuple( candidate_point ) )
print( "\tLine of Sight: {0} | Contains Base: {1}".format( los_normal_and_obliquness, contains_base ) )
# Count the number of cameras with a view
if los_normal_and_obliquness:
n_views_with_los += 1
# if use viable view, save it for this point
if los_normal_and_obliquness and not contains_base:
point_normal, obliquness = los_normal_and_obliquness
cameras_with_view_of_candidate[ camera_uuid ] = camera_extrinsics
obliquenesses_dict[ camera_uuid ] = ( point_normal, obliquness )
# Decide whether to continue looking for points
count += 1
if settings.VERBOSITY >= settings.VERBOSITY_LEVELS[ 'INFO' ]:
print( "N views: {0} | line of sight: {1} ".format( len( cameras_with_view_of_candidate ), n_views_with_los ) )
if len( cameras_with_view_of_candidate ) >= min_views:
break
if count % 100 == 0: # Don't look more than 100 times
if settings.VERBOSITY >= settings.VERBOSITY_LEVELS[ 'INFO' ]:
print( "Finding a point taking a long time... {0} iters".format( count ) )
break
utils.delete_objects_starting_with( "viable_point_camera" ) # Clean up
return candidate_point, cameras_with_view_of_candidate, obliquenesses_dict
def is_view_of_point_containing_camera_base( camera, point ):
"""
Checks whether the given camera has a valid view of the point.
This currently just checks for line of sight, and that the blurry camera
base is not visible in the picture.
Args:
camera: A Blender camera
point: A 3-tuple of coordinates of the target point
Returns:
bool
"""
angle_to_plane = io_utils.get_pitch_of_point( camera=camera, point=point )
return abs( angle_to_plane ) > settings.MAX_ANGLE_OF_CAMERA_BASE_FROM_PLANE_OF_ROTATION
def try_get_line_of_sight_obliqueness( start, end, scene=bpy.context.scene ):
"""
Casts a ray in the direction of start to end and returns the surface
normal of the face containing 'end', and also the angle between the
normal and the cast ray. If the cast ray does not hit 'end' before
hitting anything else, it returns None.
Args:
start: A Vector
end: A Vector
scene: A Blender scene
Returns:
( normal_of_end, obliqueness_angle )
normal_of_end: A Vector normal to the face containing end
obliqueness_angle: A scalar in rads
"""
scene = bpy.context.scene
if ( bpy.app.version[1] >= 75 ):
direction = end - Vector(start)
(ray_hit, location, normal, index, obj, matrix) = scene.ray_cast( start, direction )
else:
direction = end - Vector(start) # We need to double the distance since otherwise
farther_end = end + direction # The ray might stop short of the target
(ray_hit, obj, matrix, location, normal) = scene.ray_cast( start, farther_end )
if not ray_hit or (location - end).length > settings.LINE_OF_SITE_HIT_TOLERANCE:
return None
obliqueness_angle = min( direction.angle( normal ), direction.angle( -normal ) )
return normal, obliqueness_angle
if __name__=='__main__':
with Profiler( "generate_points.py" ):
main()

View File

@ -0,0 +1,76 @@
from __future__ import print_function
import argparse
import os
import csv
from shutil import copyfile
def restore_all_identities(model_dirs):
for model in model_dirs:
sweep_origin = os.path.join(".", model, "sweep_locations_origin.csv")
sweep_restore = os.path.join(".", model, "sweep_locations.csv")
sweep_modeldata = os.path.join(".", model, "modeldata", "sweep_locations.csv")
if os.path.isfile(sweep_restore):
os.remove(sweep_restore)
if os.path.isfile(sweep_origin):
os.rename(sweep_origin, sweep_restore)
copyfile(sweep_restore, sweep_modeldata)
def create_identities(model_dirs):
for model in model_dirs:
sweep_origin = os.path.join(".", model, "sweep_locations_origin.csv")
sweep_default = os.path.join(".", model, "sweep_locations.csv")
sweep_uuid = os.path.join(".", model, "camera_uuids.csv")
sweep_modeldata = os.path.join(".", model, "modeldata", "sweep_locations.csv")
print(sweep_origin)
if os.path.isfile(sweep_default):
os.rename(sweep_default, sweep_origin)
write_to_identity(sweep_default, sweep_uuid, sweep_origin)
copyfile(sweep_default, sweep_modeldata)
def write_to_identity(dest_path, uuid_path, pose_path):
dest_file = open(dest_path, 'w')
uuid_file = open(uuid_path, 'r')
pose_file = open(pose_path, 'r')
uuid_reader = csv.reader(uuid_file, delimiter=",")
pose_reader = csv.reader(pose_file, delimiter=",")
pose_writer = csv.writer(dest_file, delimiter=",")
uuid_line = uuid_reader.next()
while uuid_line:
pose_line = pose_reader.next()
uuid = uuid_line[0]
pose_position = pose_line[1:4]
pose_identity = [uuid] + pose_position + [0.44443511962890625, 0.3106224536895752, -0.7182869911193848, 0.4359528422355652] + [0, 0]
pose_writer.writerow(pose_identity)
try:
uuid_line = uuid_reader.next()
except:
break
dest_file.close()
uuid_file.close()
pose_file.close()
if __name__ == "__main__":
all_models = []
for model in os.listdir('.'):
if os.path.isfile(os.path.join(".", model, "sweep_locations.csv")) or os.path.isfile(os.path.join(".", model, "sweep_locations_origin.csv")):
all_models.append(model)
parser = argparse.ArgumentParser()
parser.add_argument('--restore', type=bool, default=False, help="Delete created identity file. Default false, set true to clean up")
opt = parser.parse_args()
if opt.restore:
restore_all_identities(all_models)
else:
create_identities(all_models)

719
data/io_utils.py Normal file
View File

@ -0,0 +1,719 @@
"""
Name: io_utils.py
Author: Sasha Sax, CVGL
Modified by: Zhiyang He
Desc: Contains utilities for saving and loading information
Usage: for import only
"""
import sys
import os
sys.path.append( os.path.dirname( os.path.realpath(__file__) ) )
from activate_env import add_on_path
sys.path.append(add_on_path)
from load_settings import settings
try:
import bpy
from mathutils import Vector, Matrix, Quaternion, Euler
import utils
from utils import create_camera, axis_and_positive_to_cube_face, cube_face_idx_to_skybox_img_idx
except:
if settings.VERBOSITY >= settings.VERBOSITY_LEVELS[ 'WARNING' ]:
print( "Can't import Blender-dependent libraries in io_utils.py. Proceeding, and assuming this is kosher...")
import ast
import csv
import glob
import json
import logging
import math
# import numpy as np
import os
import time
axis_and_positive_to_skybox_idx = {
( "X", True ): 1,
( "X", False ): 3,
( "Y", True ): 0,
( "Y", False ): 5,
( "Z", True ): 2,
( "Z", False ): 4
}
skybox_number_to_axis_and_rotation = { 5: ('X', -math.pi / 2),
0: ('X', math.pi / 2),
4: ('Y', 0.0),
3: ('Y', math.pi / 2),
2: ('Y', math.pi),
1: ('Y', -math.pi / 2) }
img_format_to_ext = { "png": 'png', "jpeg": "jpg", "jpg": "jpg" }
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel( logging.INFO )
def collect_camera_poses_from_csvfile( infile ):
"""
Reads the camera uuids and locations from the given file
Returns:
points: A Dict of the camera locations from uuid -> position, rotation, and quaterion.
Quaterions are wxyz ordered
"""
points = {}
with open(infile) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
uuid = row[0]
position = (float(row[1]), float(row[2]), float(row[3]))
quaternion_wxyz = (float(row[7]), float(row[4]), float(row[5]), float(row[6]))
if settings.VERBOSITY >= settings.VERBOSITY_LEVELS[ 'DEBUG' ]:
print( "Camera: {0}, rotation: {1}".format( uuid, quaternion_wxyz ) )
# quaternion_xyzw = (float(row[4]), float(row[5]), float(row[6]), float(row[7]))
rotation = convert_quaternion_to_euler(quaternion_wxyz)
points[uuid] = (position, rotation, quaternion_wxyz)
points[uuid] = { 'position': position, 'rotation': rotation, 'quaternion': quaternion_wxyz }
csvfile.close()
return points
def convert_quaternion_to_euler(quaternion):
blender_quat = Quaternion( quaternion )
result = blender_quat.to_euler( settings.EULER_ROTATION_ORDER )
# levels the quaternion onto the plane images were taken at
result.rotate_axis( 'X', math.pi/2 )
# result[0] = result[0] + (math.pi / 2)
return result
def create_logger( logger_name ):
logging.basicConfig()
logger = logging.getLogger(logger_name)
logger.setLevel( settings.LOGGING_LEVEL )
return logger
def delete_materials():
''' Deletes all materials in the scene. This can be useful for stanardizing meshes. '''
# https://blender.stackexchange.com/questions/27190/quick-way-to-remove-thousands-of-materials-from-an-object
C = bpy.context
for i in range(1,len(C.object.material_slots)):
C.object.active_material_index = 1
bpy.ops.object.material_slot_remove()
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action = 'SELECT')
bpy.ops.object.material_slot_assign()
bpy.ops.object.mode_set(mode = 'OBJECT')
def filter_camera_sweeps_by_enabled_in_file( camera_sweeps, infile ):
"""
Keeps the points which are enabled in jsonfile
"""
with open(infile) as jsonfile:
data = json.load(jsonfile)
return {k: v for k, v in camera_sweeps.items() if data['sweeps'][k]['enabled']}
def get_2d_point_from_3d_point(three_d_point, K, RT):
''' By Farhan '''
P = K * RT
product = P*Vector(three_d_point)
two_d_point = (product[0] / product[2], product[1] / product[2])
return two_d_point
def get_2d_point_and_decision_vector_from_3d_point(camera_data, location, rotation, target):
''' By Farhan '''
K = get_calibration_matrix_K_from_blender(camera_data)
RT = get_3x4_RT_matrix_from_blender(Vector(location), rotation)
P = K*RT
decision_vector = P*Vector(target)
x, y = get_2d_point_from_3d_point(target, K, RT)
return (x, y, decision_vector)
def get_3x4_RT_matrix_from_blender(location, rotation):
''' By Farhan '''
# bcam stands for blender camera
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = rotation.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam * location
#
# Use matrix_world instead to account for all constraints
R_world2bcam = rotation.to_matrix()
#R_world2bcam.invert()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam*cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = location
# Build the coordinate transform matrix from world to computer vision camera
R_world2cv = R_world2bcam #R_bcam2cv*R_world2bcam
T_world2cv = T_world2bcam #R_bcam2cv*T_world2bcam
# put into 3x4 matrix
RT = Matrix((
R_world2cv[0][:] + (T_world2cv[0],),
R_world2cv[1][:] + (T_world2cv[1],),
R_world2cv[2][:] + (T_world2cv[2],),
(0, 0, 0, 1)
))
return RT
def get_calibration_matrix_K_from_blender(camd):
''' By Farhan '''
f_in_mm = camd.lens
scene = bpy.context.scene
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
# scale = scene.render.resolution_percentage / 100
scale = 1
sensor_width_in_mm = camd.sensor_width
sensor_height_in_mm = camd.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
f_in_px = (f_in_mm * resolution_x_in_px) / sensor_width_in_mm
if (camd.sensor_fit == 'VERTICAL'):
# the sensor height is fixed (sensor fit is horizontal),
# the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix K
u_0 = resolution_x_in_px*scale / 2
v_0 = resolution_y_in_px*scale / 2
skew = 0 # only use rectangular pixels
K = Matrix(
((f_in_px, skew, u_0),
( 0 , f_in_px, v_0),
( 0 , 0, 1 )))
return K
def get_camera_is_enabled_file( dir ):
return os.path.join( dir, settings.CAMERA_IS_ENABLED_FILE )
def get_camera_pose_file( dir ):
return os.path.join( dir, "modeldata", settings.CAMERA_POSE_FILE )
def get_file_name_for( dir, point_uuid, view_number, camera_uuid, task, ext ):
"""
Returns the filename for the given point, view, and task
Args:
dir: The parent directory for the model
task: A string definint the task name
point_uuid: The point identifier
view_number: This is the nth view of the point
camera_uuid: An identifier for the camera
ext: The file extension to use
"""
view_specifier = view_number
# if settings.FILE_NAMING_CONVENTION == 'DEBUG':
# view_specifier = camera_uuid
filename = "point_{0}_view_{1}_domain_{2}.{3}".format( point_uuid, view_specifier, task, ext )
return os.path.join( dir, filename )
def get_model_file( dir, typ='RAW' ):
if typ == 'RAW':
model_file = settings.MODEL_FILE
elif typ == 'SEMANTIC':
model_file = settings.SEMANTIC_MODEL_FILE
elif typ == 'SEMANTIC_PRETTY':
model_file = settings.SEMANTIC_PRETTY_MODEL_FILE
elif typ == 'LEGO':
model_file = settings.LEGO_MODEL_FILE
else:
raise ValueError( 'Unknown type of model file: {0}'.format( typ ) )
return os.path.join( dir, "modeldata", model_file )
def get_point_loc_in_model_from_view_dict( view_info ):
""" Returns the location (3-tuple) of the point in the model given the loaded view_dict """
return ( view_info['model_x'], view_info['model_y'], view_info['model_z'] )
def get_pitch_of_point( camera, point ):
"""
Args:
camera: A Blender camera
point: A 3-tuple of coordinates of the target point
Returns:
pitch: A float
"""
# Just check whether the direction of the target point is within pi / 12 of the plane of rotation
point_in_local_coords = camera.matrix_world.inverted() * Vector( point )
angle_to_normal = Vector( (0,1,0) ).angle( point_in_local_coords )
angle_to_plane = math.pi / 2. - angle_to_normal
return angle_to_plane
def get_pixel_in_skybox_for_point_from_view_dict( view_info ):
""" Returns the pixel location (pair) of the point in the skybox image given the loaded view_dict """
return ( view_info['skybox_pixel_x'], view_info['skybox_pixel_y'] )
def get_save_info_for_correspondence( empty, point, point_uuid, point_normal, camera_uuid, cameras, obliqueness_angle, resolution ):
"""
Creates info for a point and camera that allows easy loading of a camera in Blender
Args:
empty: An Empty located at the point
point: The xyz coordinates of the point to create the save info for
point_uuid: The uuid pertaining to this point
point_normal: The normal of the face the point lies on
camera_uuid: The uuid of the camera for which we will be creating info for
cameras: This a dict of many cameras for which camera_uuid is a key
obliqueness_angle: Angle formed between the point_normal and camera->point_location, in rads
resolution: Skybox camera resolution
Returns:
save_dict: A Dict of useful information. Currently it's keys are
camera_distance: The distance from the camera to the point in meters
camera_location: The location of the camera in the 3d model
camera_original_rotation: The rotation_euler of the camera in the 3d model
img_path: The path to the unique image for this uuid that has line-of-sight on the point
model_x: The x coordinate of the point in the model
model_y: The y coordinate of the point in the model
model_z: The z coordinate of the point in the model
nonfixated_pixel_x:
nonfixated_pixel_y:
obliqueness_angle: Angle formed between the point_normal and camera->point_location, in rads
point_normal: The normal of the face the point lies on
rotation_of_skybox: The Euler rotation that, when the camera is set to inside the cube, will provide the skybox image
rotation_from_original_to_point: Apply to camera_original_rotation to aim camera at target
skybox_img: The unique skybox image number that has line-of-sight on the point
skybox_pixel_x: The exact x pixel in the skybox image where the point will be
skybox_pixel_y: The exact y pixel in the skybox image where the point will be
uuid: The uuid of this camera
"""
# TODO(sasha): The arguments are ugly
point_data = {}
# Save basic info
point_data[ 'model_x' ] = point[0]
point_data[ 'model_y' ] = point[1]
point_data[ 'model_z' ] = point[2]
point_data[ 'camera_uuid' ] = camera_uuid
point_data[ 'point_uuid' ] = point_uuid
point_data[ 'field_of_view_rads' ] = settings.FIELD_OF_VIEW_RADS
# Unpack the camera extrinsics
camera_extrinsics = cameras[ camera_uuid ]
location = camera_extrinsics['position']
rotation_euler = camera_extrinsics['rotation']
point_data[ 'camera_distance' ] = ( Vector( location ) - Vector( point ) ).magnitude
point_data[ 'camera_location' ] = location
point_data[ 'obliqueness_angle' ] = obliqueness_angle
point_data[ 'point_normal' ] = point_normal
# rotation_euler = Euler( rotation_euler ) # This is for debugging camera movement
# rotation_euler.rotate_axis( 'Z', math.pi / 4 )
quaternion = Quaternion( camera_extrinsics['quaternion'] )
## SKYBOX
# Find and save skybox number
camera, camera_data, scene = create_camera( location, rotation_euler,
field_of_view=settings.FIELD_OF_VIEW_MATTERPORT_RADS,
camera_name="Camera_save_point_1" )
skybox_number = get_skybox_img_number_containing_point( location, rotation_euler, empty )
point_data[ 'camera_original_rotation' ] = tuple( rotation_euler )
point_data[ 'skybox_img' ] = skybox_number
point_data[ 'img_path' ] = os.path.join("./img/high", "{0}_skybox{1}.jpg".format( camera_uuid, skybox_number ) )
point_data[ 'point_pitch' ] = get_pitch_of_point( camera, point )
# Save the rotation_euler for the camera to point at the skybox image in the model
new_camera, new_camera_data, scene = create_camera( location, rotation_euler,
resolution=settings.MATTERPORT_SKYBOX_RESOLUTION,
field_of_view=settings.FIELD_OF_VIEW_MATTERPORT_RADS,
camera_name="Camera_save_point_2" )
axis_of_rotation, rotation_from_start = skybox_number_to_axis_and_rotation[ skybox_number ]
new_camera.rotation_euler.rotate_axis( axis_of_rotation, rotation_from_start )
if skybox_number == 0: # Need to rotate top and bottom images
new_camera.rotation_euler.rotate_axis( 'Z', math.pi / 2 )
if skybox_number == 5: # Need to rotate top and bottom images
new_camera.rotation_euler.rotate_axis( 'Z', -math.pi / 2 ) # Not sure if this is correct, but we should never have this img
scene.update()
# And save the x, y pixel coordinates for the skybox image
x, y, _ = get_2d_point_and_decision_vector_from_3d_point(
new_camera_data, location, new_camera.rotation_euler, point )
point_data[ 'rotation_of_skybox' ] = tuple( new_camera.rotation_euler )
point_data[ 'skybox_pixel_x' ] = int( round( x ) )
point_data[ 'skybox_pixel_y' ] = int( round( y ) )
## FIXATED
# Now save the rotation needed to point at the target
new_camera, new_camera_data, scene = create_camera( location, rotation_euler,
resolution=settings.RESOLUTION,
field_of_view=settings.FIELD_OF_VIEW_RADS,
camera_name="Camera_save_point_3" )
utils.point_camera_at_target( new_camera, empty )
point_data[ 'rotation_from_original_to_point' ] = tuple(
utils.get_euler_rotation_between(
camera.rotation_euler,
new_camera.rotation_euler ) )
# other_calculated_normal = camera.matrix_world.to_quaternion() * Vector( (0,1,0) )
# centered_camera_dir = new_camera.matrix_world.to_quaternion() * Vector( (0,0,-1) )
# other_calculated_pitch = math.pi/2 - centered_camera_dir.angle( other_calculated_normal )
# Local coords method
# point_in_local_coords = ( camera.matrix_world.inverted() * point ).normalized()
# angle_to_normal_local_method = Vector( (0,1,0) ).angle( point_in_local_coords )
# print( "-----------pitch (manual method):", math.degrees( other_calculated_pitch ) )
# print("\tnormal_dir", other_calculated_normal)
# print("\tcamera_dir:", centered_camera_dir)
# print("\tpoint_dir:", (Vector( point ) - camera.location ).normalized() )
# print("\tpoint_dir unnormalized:", (Vector( point ) - camera.location ) )
# print("\tangle_to_normal: {0} degrees".format( math.degrees( centered_camera_dir.angle( other_calculated_normal ) ) ) )
# print( "-----------pitch (local method):", math.degrees( math.pi / 2 - angle_to_normal_local_method ) )
# print("\tpoint_dir_in_local_coords:", ( camera.matrix_world.inverted() * point ).normalized() )
## NONFIXATED
# # Generate nonfixated image
# x_jitter = np.random.uniform( -settings.FIELD_OF_VIEW_RADS / 2., settings.FIELD_OF_VIEW_RADS / 2. )
# new_camera.rotation_euler.rotate_axis( axis_of_rotation, x_jitter )
# new_camera.rotation_euler.rotate_axis( 'X', -point_data[ 'point_pitch' ] ) # Back into the plane
# And save the x, y pixel coordinates the nonfixated image
x, y, _ = get_2d_point_and_decision_vector_from_3d_point(
new_camera_data, location, new_camera.rotation_euler, point )
point_data[ 'rotation_from_original_to_nonfixated' ] = tuple(
utils.get_euler_rotation_between(
camera.rotation_euler,
new_camera.rotation_euler ) )
point_data[ 'nonfixated_pixel_x' ] = int( round( x ) )
point_data[ 'nonfixated_pixel_y' ] = int( round( y ) )
utils.delete_objects_starting_with( "Camera_save_point_1" ) # Clean up
utils.delete_objects_starting_with( "Camera_save_point_2" ) # Clean up
utils.delete_objects_starting_with( "Camera_save_point_3" ) # Clean up
# utils.delete_objects_starting_with( "Camera" ) # Clean up
return point_data
def get_save_info_for_sweep( fov, pitch, yaw, point_uuid, camera_uuid, cameras, resolution ):
"""
Creates info for a point and camera that allows easy loading of a camera in Blender
Args:
fov: The field of view of the camera
pitch: The pitch of the camera relative to its plane of rotation
yaw: The yaw of the camera compared to its initial Euler coords
point_uuid: The uuid pertaining to this point
camera_uuid: The uuid of the camera for which we will be creating info for
cameras: This a dict of many cameras for which camera_uuid is a key
resolution: Skybox camera resolution
Returns:
save_dict: A Dict of useful information. Currently it's keys are
{
"camera_k_matrix": # The 3x3 camera K matrix. Stored as a list-of-lists,
"field_of_view_rads": # The Camera's field of view, in radians,
"camera_original_rotation": # The camera's initial XYZ-Euler rotation in the .obj,
"rotation_from_original_to_point":
# Apply this to the original rotation in order to orient the camera for the corresponding picture,
"point_uuid": # alias for camera_uuid,
"camera_location": # XYZ location of the camera,
"frame_num": # The frame_num in the filename,
"camera_rt_matrix": # The 4x3 camera RT matrix, stored as a list-of-lists,
"final_camera_rotation": # The camera Euler in the corresponding picture,
"camera_uuid": # The globally unique identifier for the camera location,
"room": # The room that this camera is in. Stored as roomType_roomNum_areaNum
}
"""
# TODO(sasha): The arguments are ugly
point_data = {}
# Save basic info
point_data[ 'camera_uuid' ] = camera_uuid
point_data[ 'point_uuid' ] = point_uuid
# Unpack the camera extrinsics
camera_extrinsics = cameras[ camera_uuid ]
location = camera_extrinsics['position']
rotation_euler = camera_extrinsics['rotation']
point_data[ 'camera_original_rotation' ] = tuple( rotation_euler )
point_data[ 'camera_location' ] = location
# Save initial camera locatoin
camera, camera_data, scene = create_camera( location, rotation_euler,
field_of_view=settings.FIELD_OF_VIEW_MATTERPORT_RADS,
camera_name="Camera_save_point_1" )
# Save the rotation_euler for the camera to point at the skybox image in the model
new_camera, new_camera_data, scene = create_camera( location, rotation_euler,
resolution=settings.RESOLUTION,
field_of_view=fov,
camera_name="Camera_save_point_2" )
new_camera.rotation_euler.rotate_axis( 'Y', yaw )
new_camera.rotation_euler.rotate_axis( 'X', pitch )
point_data[ 'rotation_from_original_to_point' ] = tuple(
utils.get_euler_rotation_between(
camera.rotation_euler,
new_camera.rotation_euler ) )
point_data[ 'final_camera_rotation' ] = tuple( new_camera.rotation_euler )
point_data[ 'field_of_view_rads' ] = fov
def matrix_to_list_of_lists( mat ):
lst_of_lists = list( mat )
lst_of_lists = [ list( vec ) for vec in lst_of_lists ]
return lst_of_lists
point_data[ 'camera_rt_matrix' ] = matrix_to_list_of_lists(
get_3x4_RT_matrix_from_blender( Vector( location ), new_camera.rotation_euler ) )
point_data[ 'camera_k_matrix' ] = matrix_to_list_of_lists(
get_calibration_matrix_K_from_blender( new_camera_data ) )
utils.delete_objects_starting_with( "Camera_save_point_1" ) # Clean up
utils.delete_objects_starting_with( "Camera_save_point_2" ) # Clean up
# utils.delete_objects_starting_with( "Camera" ) # Clean up
return point_data
def get_skybox_img_number_containing_point( camera_location, camera_rotation_euler, empty_at_target ):
"""
This gets the image index of the skybox image.
It works by finding the direction of the empty from the camera and then by rotating that vector into a
canonical orientation. Then we can use the dimension with the greatest magnitude, and the sign of that
coordinate in order to determine the face of the cube that the empty projects onto.
"""
empty_direction = ( empty_at_target.location - Vector( camera_location ) ).normalized()
empty_direction.normalize()
empty_direction.rotate( camera_rotation_euler.to_matrix().inverted() )
# The trick to finding the cube face here is that we can convert the direction
max_axis, coord_val = max( enumerate( empty_direction ), key=lambda x: abs( x[1] ) ) # Find the dim with
sign = ( coord_val >= 0.0 )
max_axis = ["X", "Y", "Z"][ max_axis ] # Just make it more readable
return axis_and_positive_to_skybox_idx[ ( max_axis, sign ) ]
def get_task_image_fpath( directory, point_data, view_num, task ):
''' Builds and returnes a standardized filepath for an point/image '''
view_dict = point_data[ view_num ]
if task == 'skybox':
return os.path.join( directory, view_dict[ 'img_path' ] )
elif 'depth' in task:
directory = os.path.join( directory, 'depth')
elif 'normals' in task:
directory = os.path.join( directory, 'normals')
elif 'rgb':
directory = os.path.join( directory, 'rgb')
preferred_ext = img_format_to_ext[ settings.PREFERRED_IMG_EXT.lower() ]
fname = get_file_name_for( directory,
point_uuid=view_dict[ 'point_uuid' ],
view_number=view_num,
camera_uuid=view_dict[ 'camera_uuid' ],
task=task,
ext=preferred_ext )
return os.path.join( directory, fname )
def import_mesh( dir, typ='RAW' ):
''' Imports a mesh with the appropriate processing beforehand.
Args:
dir: The dir from which to import the mesh. The actual filename is given from settings.
typ: The type of mesh to import. Must be one of ['RAW', 'SEMANTIC', 'SEMANTIC_PRETTY', 'LEGO']
Importing a raw model will remove all materials and textures.
Returns:
mesh: The imported mesh.
'''
model_fpath = get_model_file( dir, typ=typ )
if '.obj' in model_fpath:
bpy.ops.import_scene.obj( filepath=model_fpath )
model = join_meshes() # OBJs often come in many many peices
for img in bpy.data.images: # remove all images
bpy.data.images.remove(img, do_unlink=True)
bpy.context.scene.objects.active = model
if typ == 'SEMANTIC' or typ == 'SEMANTIC_PRETTY':
return
#model.matrix_world *= Matrix.Rotation(-math.pi/2., 4, 'Z')
if typ == 'LEGO':
return
delete_materials()
elif '.ply' in model_fpath:
bpy.ops.import_mesh.ply( filepath=model_fpath )
model = bpy.context.object
return model
def join_meshes():
''' Takes all meshes in the scene and joins them into a single mesh.
Args:
None
Returns:
mesh: The single, combined, mesh
'''
# https://blender.stackexchange.com/questions/13986/how-to-join-objects-with-python
scene = bpy.context.scene
obs = []
for ob in scene.objects:
# whatever objects you want to join...
if ob.type == 'MESH':
obs.append(ob)
ctx = bpy.context.copy()
# one of the objects to join
#ctx['active_object'] = obs[0]
#ctx['selected_objects'] = obs
# we need the scene bases as well for joining
#ctx['selected_editable_bases'] = [scene.object_bases[ob.name] for ob in obs]
#print('finished join mesh selection')
#bpy.ops.object.join(ctx)
#for ob in scene.objects:
# print(ob)
# whatever objects you want to join...
# if ob.type == 'MESH':
# return ob
scene.objects.active = obs[0]
bpy.ops.object.join()
return obs[0]
def load_camera_poses( dir, enabled_only ):
"""
Loads the cameras from disk.
Args:
dir: Parent directory of the model. E.g. '/path/to/model/u8isYTAK3yP'
enabled_only: Whether to load only enabled cameras
Returns:
camera_poses: A dict of camera_uuid -> { position:, quaternion:, rotation: }
"""
camera_locations = collect_camera_poses_from_csvfile( get_camera_pose_file( dir ) )
if enabled_only:
camera_locations = filter_camera_sweeps_by_enabled_in_file( camera_locations, get_camera_is_enabled_file( dir ) )
logger.info("Loaded {0} cameras.".format( len( camera_locations ) ) )
return camera_locations
def load_saved_points_of_interest( dir ):
"""
Loads all the generated points that have multiple views.
Args:
dir: Parent directory of the model. E.g. '/path/to/model/u8isYTAK3yP'
Returns:
point_infos: A list where each element is the parsed json file for a point
"""
point_files = glob.glob( os.path.join( dir, "points", "point_*.json" ) )
point_files.sort()
point_infos = []
for point_file in point_files:
with open( point_file ) as f:
point_infos.append( json.load( f ) )
logger.info( "Loaded {0} points of interest.".format( len( point_infos ) ) )
return point_infos
def load_model_and_points( basepath, typ='RAW' ):
''' Loads the model and points
Args:
basepath: The model path
Returns:
A Dict:
'camera_poses':
'point_infos':
'model: The blender mesh
'''
utils.delete_all_objects_in_context()
camera_poses = load_camera_poses( basepath, settings.USE_ONLY_ENABLED_CAMERAS )
point_infos = load_saved_points_of_interest( basepath )
model = import_mesh( basepath, typ=typ )
return { 'camera_poses': camera_poses, 'point_infos': point_infos, 'model': model }
def parse_semantic_label( label ):
''' Pareses a semantic label string into
semantic_class, instance_num, roomtype, roomnum, area_num
Args:
label: A string to be parsed
Returns:
semantic_class, instance_num, roomtype, roomnum, area_num
'''
toks = label.split('_')
clazz, instance_num, roomtype, roomnum, area_num = toks[0], toks[1], toks[2], toks[3], toks[4]
return clazz, instance_num, roomtype, roomnum, area_num
def track_empty_with_axis_lock(cam, lock_axis, track_axis, empty):
''' Turns the camera along its axis of rotation in order to points (as much as possible) at the
target empty.
Args:
cam: A Blender camera
lock_axis: The axis to rotate about
track_axis: The axis of the camera which should point (as much as possible) at the empty: Use 'NEGATIVE_Z'
empty: The empty to point 'track_axis' at
Returns:
None (points the camera at the empty)
'''
constraint = cam.constraints.new(type='LOCKED_TRACK')
constraint.lock_axis = lock_axis
constraint.track_axis = track_axis
constraint.target = empty
bpy.ops.object.select_all(action='DESELECT')
cam.select = True
bpy.ops.object.visual_transform_apply()
cam.constraints.remove(constraint)
def try_get_data_dict( point_datas, point_uuid ):
point_data = [ p for p in point_datas if p[0][ 'point_uuid' ] == point_uuid ]
if not point_data:
raise KeyError( "Point uuid {0} not found".format( point_uuid ) )
return point_data[0]
def try_get_task_image_fpaths( directory, point_uuid, point_datas=None, task='markedskybox' ):
if task == 'rgb_nonfixated':
if not point_datas:
raise ValueError( "If using rgb_nonfixated then point_datas must be specified" )
return sorted( [ os.path.join( directory, view_dict[ 'img_path' ] )
for view_dict in try_get_data_dict( point_datas, point_uuid ) ] )
else:
bash_regex = "point_{0}__view_*__{1}.png".format( point_uuid, task )
return sorted( glob.glob( os.path.join( directory, bash_regex ) ) )
if __name__=='__main__':
import argparse
args = argparse.Namespace()
# import settings
# settings.__dict__['DEPTH_BITS_PER_CHANNEL'] = 1000000000000
# print( settings.DEPTH_BITS_PER_CHANNEL )
load_settings( args )

133
data/load_settings.py Normal file
View File

@ -0,0 +1,133 @@
"""
Name: load_settings.py
Author: Sasha Sax, CVGL
Modified by: Zhiyang He
Desc: Loads runtime settings and exposes them via 'settings'
Usage: for import only
"""
import bpy
import ast
import os
import logging
import sys
sys.path.append( os.path.dirname( os.path.realpath(__file__) ) )
from activate_env import add_on_path
sys.path.append(add_on_path)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel( logging.INFO )
def load_settings( args ):
''' Will set the global 'settings' namespace with, in order of priority,
1. Any command-line arguments passed in through 'args'
2. Any key-value pairs that are stored in a dict called 'override_settings'
in any file called 'settings.py' which is located in the cwd or a parent.
Files that are closer to the cwd have higher priority than files which
are further away.
3. The default settings from 'import settings'
Args:
args: Command line arguments. A Namespece which has key-value pairs that
override settings
Returns
settings: The mutated namespace.
'''
arg_dict = parse_cmdline_args( args )
settings_come_from = {}
def override_settings_dict_with( settings_dict, override_dict, settings_come_from, settings_location ):
for k, v in override_dict.items():
if k.startswith( '__' ): continue
if k in settings_dict:
settings_come_from[ k ] = settings_location
if type( v ) != type( settings_dict[ k ] ):
settings_dict[ k ] = ast.literal_eval( v )
else:
settings_dict[ k ] = v
return settings_dict
# Get default settings
import settings
settings_dict = settings.__dict__ #Namespace()
for k, v in settings_dict.items():
settings_come_from[ k ] = '"import settings"'
# Override settings if there is a settings.py file in the current directory
def get_directory_hierarchy():
''' returns all directories up to root, root first. '''
current_dir = os.getcwd()
dir_order = [ ]
last_dir = ''
while current_dir != last_dir:
dir_order.append( current_dir )
last_dir = current_dir
current_dir = os.path.dirname( current_dir )
dir_order.reverse()
return dir_order
# update the settings with local overrides
for directory in get_directory_hierarchy():
settingspath = os.path.join( directory, 'override_settings.py' )
if os.path.isfile( settingspath ):
sys.path.insert( 0, directory )
from override_settings import override_settings
override_settings_dict_with( settings_dict, override_settings, settings_come_from, settings_location=directory )
del sys.path[ 0 ]
del override_settings
del sys.modules["override_settings"]
# Override settings with command-line arguments
override_settings_dict_with( settings_dict, arg_dict, settings_come_from, settings_location='CMDLINE' )
settings.LOGGING_LEVEL = settings.VERBOSTITY_LEVEL_TO_NAME[ settings.VERBOSITY ]
settings.LOGGER = logger
logger.setLevel( settings.LOGGING_LEVEL )
for setting, location in sorted( settings_come_from.items() ):
if setting.startswith( '__' ) or setting.upper() != setting: continue
logger.debug( "Using {} from {}".format( setting, location ) )
# validate_blender_settings( settings )
return settings
def parse_cmdline_args( argv ):
''' Parses all --OPTION_NAME=val into OPTION_NAME->val
'''
argsdict = {}
for farg in argv:
if farg.startswith('--') and '=' in farg:
(arg,val) = farg.split("=")
arg = arg[2:]
argsdict[arg] = val
return argsdict
def validate_blender_settings( settings_ns ):
''' Checks all settings for internal consistency, and makes sure that the running
version of Blender is appropriate
Args:
settings: A namespace that contains the parameters from settings.py
Raises:
RuntimeError: Depends, but describes the problem
'''
s = settings_ns
# Check version number
logger.debug( "Python version: {}".format( sys.version ) )
logger.debug( "Blender version: {}".format( bpy.app.version ) )
if ( bpy.app.version[1] != 79 ):
raise RuntimeError( 'Blender version must be 2.78, but is {}.'.format( bpy.app.version ) )
if ( settings_ns.CREATE_PANOS and settings_ns.CREATE_FIXATED ) or \
( settings_ns.CREATE_PANOS and settings_ns.CREATE_NONFIXATED ):
raise RuntimeError( 'Cannot create both panos and non-panos in the same run. Either turn off panos or turn off (non)fixated!' )
if ( s.DEBUG_AT_POINT is None and s.DEBUG_AT_VIEW is not None ) or \
( s.DEBUG_AT_POINT is not None and s.DEBUG_AT_VIEW is None ):
raise RuntimeError( 'If debugging a point/view, then both must be specified.' )
global settings
settings = load_settings( sys.argv )
validate_blender_settings( settings )

25
data/package-lock.json generated Normal file
View File

@ -0,0 +1,25 @@
{
"requires": true,
"lockfileVersion": 1,
"dependencies": {
"minimist": {
"version": "0.0.10",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz",
"integrity": "sha1-3j+YVD2/lggr5IrRoMfNqDYwHc8="
},
"optimist": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz",
"integrity": "sha1-2j6nRob6IaGaERwybpDrFaAZZoY=",
"requires": {
"minimist": "0.0.10",
"wordwrap": "0.0.3"
}
},
"wordwrap": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz",
"integrity": "sha1-o9XabNXAvAAI03I0u68b7WMFkQc="
}
}
}

84
data/settings.py Normal file
View File

@ -0,0 +1,84 @@
"""
Name: settings.py
Author: Sasha Sax, CVGL
Desc: Contains all the settings that our scripts will use.
Usage: for import only
"""
from math import pi
import math
import sys
# Images
CREATE_FIXATED = False
CREATE_NONFIXATED = False
CREATE_PANOS = True
USE_ONLY_ENABLED_CAMERAS = False
POINT_TYPE = 'SWEEP' # 'CORRESPONDENCES' # The basis for how points are generated
#POINT_TYPE = 'CORRESPONDENCES' # The basis for how points are generated
# File paths
BLENDER_PATH = '/Applications/Blender/blender.app/Contents/MacOS/blender'
CAMERA_IS_ENABLED_FILE = "ActiveSweeps.json"
CAMERA_POSE_FILE = "sweep_locations.csv"
FILE_NAMING_CONVENTION = 'DEBUG' # Or 'STANDARD'
LOG_FILE = sys.stderr # Use sys.stderr to avoid Blender garbage
LEGO_MODEL_FILE = "out_res.obj" # "out_res.obj"
SEMANTIC_MODEL_FILE = "semantic_lookup.obj" # "out_res.obj"
SEMANTIC_PRETTY_MODEL_FILE = "semantic.obj" # "out_res.obj"
MODEL_FILE = "out_res.ply" # "out_res.obj"
PANO_VIEW_NAME = 'equirectangular'
PREFERRED_IMG_EXT = 'PNG' # PNG, JPEG
POINTS_DIR = "points"
# Render settings
RESOLUTION = 1080
SENSOR_DIM = 20 # 20
STOP_VIEW_NUMBER = -1 #2 # Generate up to (and including) this many views. -1 to disable.
DEBUG_AT_POINT = None
DEBUG_AT_VIEW = None
TILE_SIZE = 128
PANO_RESOLUTION = (2048, 1024)
# Color depth
COLOR_BITS_PER_CHANNEL = '8' # bits per channel. PNG allows 8, 16.
DEPTH_BITS_PER_CHANNEL = '16' # bits per channel. PNG allows 8, 16.
DEPTH_MAX_DISTANCE_METERS = 128. # With 128m and 16-bit channel, has sensitivity 1/512m (128 / 2^16)
MIST_MAX_DISTANCE_METERS = 128. # With 128m and 16-bit channel, has sensitivity 1/512m (128 / 2^16)
# Field of view a
BLUR_ANGLE_FROM_PLANE_OF_ROTATION = math.radians( 60 ) # 60d, use pi/2 for testing pi / 2. #
FIELD_OF_VIEW_RADS = math.radians( 60 )
FIELD_OF_VIEW_MIN_RADS = math.radians( 45 )
FIELD_OF_VIEW_MAX_RADS = math.radians( 75 )
FIELD_OF_VIEW_MATTERPORT_RADS = math.radians( 90 )
LINE_OF_SITE_HIT_TOLERANCE = 0.001 # Matterport has 1 unit = 1 meter, so 0.001 is 1mm
MODE = 'DEBUG' # DEBUG, TEST, PRODUCTION
# Debugging
VERBOSITY_LEVELS = { 'ERROR': 0, # Everything >= VERBOSITY will be printed
'WARNING': 20,
'STANDARD': 50,
'INFO': 90,
'DEBUG': 100 }
VERBOSTITY_LEVEL_TO_NAME = { v: k for k, v in VERBOSITY_LEVELS.items()}
VERBOSITY = VERBOSITY_LEVELS[ 'INFO' ]
RANDOM_SEED = 42 # None to disable
# TEST_SETTINGS
NORMALS_SIG_FIGS_TOLERANCE = 4
FLOAT_PLACES_TOLERANCE = 4
# DO NOT CHANGE -- effectively hardcoded
CYCLES_DEVICE = 'GPU' # Not yet implemented!
EULER_ROTATION_ORDER = 'XYZ' # Not yet implemented!
MATTERPORT_SKYBOX_RESOLUTION = 1024
# NOT YET IMPLEMENTED -- changing it won't make a difference
MAX_PITCH_SIGMA = 3 # how many sigma = the max pitch
# AUTOMATICALLY CALCULATED SETTINGS:
MAX_ANGLE_OF_CAMERA_BASE_FROM_PLANE_OF_ROTATION = float( BLUR_ANGLE_FROM_PLANE_OF_ROTATION ) - ( FIELD_OF_VIEW_MAX_RADS / 2.) #pi / 12 TODO: Set this back when not testing

77
data/start.py Normal file
View File

@ -0,0 +1,77 @@
## Data generation entry point for real environment
## Output:
## /dataroot/model_id
## /pano
## /points: json formatted view point data
## /mist : mist panorama
## /rgb : rgb panorama
## /normal: surface normal panorama
## How to run
## cd /dataroot
## source activate xxx (blender virtual env)
## python start.py
## Requirements:
## (1) Have blender installed (v78 or v79)
## Have python3 environment
## Packages:
## npm install optimist
##
## (2) Have the following initial model files
## /dataroot/model_id
## modeldata/sweep_locations.csv
## modeldata/out_res.obj (if use obj)
## modeldata/out_res.ply (if use ply)
## modeldata/img/high
## modeldata/img/low
from __future__ import print_function
from datatasks import DataTasks
import argparse
import shutil
import os
def model_finished(model_path, model_id):
#check pano/points, pano/rgb, pano/mist, pano/normal
#check file counts
#check point.json
return False
def model_process(model_path, model_id):
pano_dir = os.path.join(model_path, "pano")
dt = DataTasks(".", model_path, model_id)
dt.generate_points(2, 1, 1)
dt.create_obj_file()
dt.create_rgb_images()
dt.create_mist_images()
dt.create_normal_images()
shutil.move(os.path.join(model_path, "points"), os.path.join(model_path, "pano", "points"))
## move point folder
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataroot", type = str,default = ".", help = "root of model folders")
## Assert blender version
## Assert node js installed
opt = parser.parse_args()
models = []
for root_file in os.listdir(opt.dataroot):
if (os.path.isdir(root_file) and "__" not in root_file and "decode" not in root_file and "node" not in root_file):
models.append(root_file)
for model in models:
print("Processing model: %s" % model)
model_path = os.path.join(os.getcwd(), model)
if model_finished(model_path, model):
print("\tModel %s finished" %model)
else:
model_process(opt.dataroot, model)

501
data/utils.py Normal file
View File

@ -0,0 +1,501 @@
"""
Name: utils.py
Author: Sasha Sax, CVGL
Modified by: Zhiyang He
Desc: Contains Blender and Matterport utility functions
Usage: for import only
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from load_settings import settings
import os
import sys
sys.path.append( os.path.dirname( os.path.realpath(__file__) ) )
from activate_env import add_on_path
sys.path.append(add_on_path)
import _cycles
import bpy
import logging
import math
from mathutils import Euler
import numpy as np
import os
import random
import shutil # Temporary dir
import time
import uuid as uu
cube_face_idx_to_skybox_img_idx = { 0: 4, 1: 1, 2: 2, 3: 3, 4: 5, 5: 0 }
img_format_to_ext = { "png": 'png', "jpeg": "jpg", "jpg": "jpg" }
skybox_img_idx_to_cube_face_idx = { v: k for k, v in cube_face_idx_to_skybox_img_idx.items() }
# order of axes is XYZ
axis_and_positive_to_cube_face = { (0, False): 4,
(0, True): 2,
(1, False): 3,
(1, True): 5,
(2, False): 0,
(2, True): 1}
class Profiler(object):
def __init__(self, name, logger=None, level=logging.INFO):
self.name = name
self.logger = logger
self.level = level
def step( self, name ):
""" Returns the duration and stepname since last step/start """
self.summarize_step( start=self.step_start, step_name=name, level=self.level )
now = time.time()
self.step_start = now
def __enter__( self ):
self.start = time.time()
self.step_start = time.time()
return self
def __exit__( self, exception_type, exception_value, traceback ):
self.summarize_step( self.start )
def summarize_step( self, start, step_name="", level=None ):
duration = time.time() - start
step_semicolon = ':' if step_name else ""
if self.logger:
level = level or self.level
self.logger.log( self.level, "{name}{step}: {secs} seconds".format( name=self.name, step=step_semicolon + step_name, secs=duration) )
return duration
def change_shading( color, factor, lighter=True ):
old_color = np.array( color )
if lighter:
return tuple( old_color + (1. - old_color) * factor )
else:
return tuple( old_color * (1. - factor) )
def create_camera( location, rotation,
field_of_view=settings.FIELD_OF_VIEW_RADS,
sensor_dim=settings.SENSOR_DIM,
resolution=settings.RESOLUTION,
scene=bpy.context.scene,
camera_name="Camera" ):
"""
Creates a camera in the context scene
Args:
location: The origin of the create_camera
rotation: Rotation to start the camera in, relative to global (?)
focal_length:
sensor_dim: Size of aperture
resolution: Sets render x and y sizes
Returns:
(camera, camera_data, scene):
camera: The camera from bpy.data.objects.new
camera_data: The camera data from bpy.data.cameras.new
scene: The scene passed in (?)
"""
scene.render.resolution_x = resolution
scene.render.resolution_y = resolution
camera_data = bpy.data.cameras.new( camera_name )
camera = bpy.data.objects.new( name=camera_name, object_data=camera_data )
scene.objects.link(camera)
scene.camera = camera
scene.camera.location = location
scene.camera.rotation_mode = settings.EULER_ROTATION_ORDER
scene.camera.rotation_euler = rotation
# From https://en.wikibooks.org/wiki/Blender_3D:_Noob_to_Pro/Understanding_the_Camera#Specifying_the_Field_of_View
camera_data.lens_unit = 'FOV'
focal_length = sensor_dim / ( 2 * math.tan( field_of_view / 2. ) )
camera_data.lens = focal_length
camera_data.sensor_width = sensor_dim
camera_data.sensor_height = sensor_dim
scene.update()
return (camera, camera_data, scene)
def create_empty(name, location):
"""
Creates an empty at the given location.
Args:
name: Name for the Empty in Blender.
location: Location of the empty in the Blender model.
Returns:
A reference to the created Empty
"""
scene = bpy.context.scene
empty = bpy.data.objects.new(name, None)
scene.objects.link(empty)
empty.location = location
return empty
def create_material_with_texture(texture, name="material"):
"""
Creates a new material in blender and applies the given texture to it using UV mapping
Args:
texture: A Blender texture
Returns:
material: A Blender material with the texture applied
"""
material = bpy.data.materials.new( name )
material.use_shadeless = True
m_texture = material.texture_slots.add()
m_texture.texture = texture
m_texture.texture_coords = 'UV'
m_texture.use_map_color_diffuse = True
m_texture.use_map_color_emission = True
m_texture.emission_color_factor = 0.5
m_texture.use_map_density = True
m_texture.mapping = 'FLAT'
return material
def create_material_with_color( rgb, name="material", engine='BI' ):
"""
Creates a new material in blender and applies the given texture to it using UV mapping
Args:
rgb: A 3-tuple of the RGB values that the material will have
Returns:
material: A Blender material with the texture applied
"""
material = bpy.data.materials.new( name )
if engine=='BI':
material.use_shadeless = True
# material.use_shadows = False
# material.use_cast_shadows = False
# material.use_mist = False
# material.use_raytrace = False
material.diffuse_color = rgb
elif engine=='CYCLES':
# Create material
material.use_nodes = True
tree = material.node_tree
links = tree.links
# Make sure there are no existing nodes
for node in tree.nodes:
tree.nodes.remove( node )
nodes = tree.nodes
# Use bump map to get normsls
color_input = nodes.new("ShaderNodeRGB")
color_input.outputs[0].default_value = list(rgb) + [1.0]
# Make the material emit that color (so it's visible in render)
emit_node = nodes.new("ShaderNodeEmission")
links.new( color_input.outputs[ 0 ], emit_node.inputs[ 0 ] )
# Now output that color
out_node = nodes.new("ShaderNodeOutputMaterial")
links.new( emit_node.outputs[ 0 ], out_node.inputs[ 0 ] )
material.use_shadeless = True
return material
def create_render_nodetree( scene ):
''' Clears and creates a render nodetree for a scene
Args:
scene: The Blender scene to render
Returns:
tree, links
'''
scene.use_nodes = True
tree = scene.node_tree
links = tree.links
# Make sure there are no existing nodes
for node in tree.nodes:
tree.nodes.remove( node )
return tree, links
def create_output_node( tree, output_data, tmpdir=None, color_mode='RGB', color_depth=settings.COLOR_BITS_PER_CHANNEL, file_format=settings.PREFERRED_IMG_EXT.upper() ):
''' Creates an output node for a scene render nodetree
Args:
tree: The scene's nodetree
output_data: This will be fed into the input slot of the output node
tmpdir: Dir to save the file. If is None, then the fn will create a compositor node.
color_mode: In ['RGB', 'BW' ]
file_format: The format to save the image as.
Returns:
save_path: The path where the image will be saved.
'''
links = tree.links
if tmpdir:
out = tree.nodes.new('CompositorNodeOutputFile')
ident = str(uu.uuid4())
out.file_slots[0].path = ident
out.base_path = tmpdir
out.format.color_mode = color_mode
out.format.color_depth = color_depth
out.format.file_format = file_format.upper()
links.new( output_data, out.inputs[ 0 ] )
# Blender pecululiarly names its files with 0001 (frame #) at the end
ext = img_format_to_ext[ file_format.lower() ]
temp_filename = "{0}0001.{1}".format( ident, ext )
return os.path.join( tmpdir, temp_filename )
else:
out = tree.nodes.new('CompositorNodeComposite')
links.new( output_data, out.inputs[ 0 ] )
return None
def delete_all_objects_in_context():
""" Selects all objects in context scene and deletest them. """
for obj in bpy.context.scene.objects:
obj.select = True
bpy.ops.object.delete()
def delete_objects_starting_with( prefix ):
""" Removes all objects whose Blender name begins with the prefix """
old_mode = bpy.context.mode
bpy.ops.object.mode_set(mode='OBJECT')
for obj in bpy.data.objects:
if obj.name.startswith( prefix ):
obj.select = True
bpy.context.scene.objects.unlink( obj )
bpy.data.objects.remove( obj )
else:
obj.select = False
bpy.ops.object.delete()
bpy.ops.object.mode_set( mode=old_mode )
def delete_cameras_and_empties():
# oldMode = bpy.context.mode
# bpy.ops.object.mode_set(mode='OBJECT')
for obj in bpy.data.objects:
if obj.name.startswith("Camera") or obj.name.startswith("Empty"):
obj.select = True
bpy.context.scene.objects.unlink(obj)
bpy.data.objects.remove(obj)
else:
obj.select = False
bpy.ops.object.delete()
# bpy.ops.object.mode_set(mode=oldMode)
def get_euler_rotation_between( start, end ):
"""
Returns the Euler rotation so that start.rotate( get_euler_rotation_between( start, end ) ) == end
Args:
start: An Euler
end: An Euler with the same ordering
Returns:
An Euler
"""
# Gets the rotation by converting Euler angles to rotation matrices and composing
# return end.to_quaternion().rotation_difference( start.to_quaternion() ).to_euler()
return ( end.to_matrix() * start.to_matrix().inverted() ).to_euler()
def get_mesh():
scene = bpy.context.scene
for ob in scene.objects:
# whatever objects you want to join...
if ob.type == 'MESH':
return ob
def make_camera_data_pano( camera_data ):
render = bpy.context.scene.render
render.engine = 'CYCLES'
camera_data.type = 'PANO'
camera_data.cycles.panorama_type = 'EQUIRECTANGULAR'
render.resolution_x, render.resolution_y = settings.PANO_RESOLUTION
def point_camera_at_target_OLD( camera, target ):
"""
Points the given camera at the target. If the target moves, so will the camera.
This will leave only the camera selected.
"""
constraint = camera.constraints.new(type="TRACK_TO") # Works via adding a constraint to camera
constraint.target = target
constraint.track_axis = 'TRACK_NEGATIVE_Z' # Points the local negative z axis (lens) at target
constraint.up_axis = 'UP_Y' # Keeps the local y axis pointing in the global positive z direction. for orientation
bpy.ops.object.select_all(action='DESELECT') # Make sure that only the camera is transformd
camera.select = True
bpy.ops.object.visual_transform_apply()
camera.constraints.remove( constraint )
def point_camera_at_target( camera, target, align_with_global_up_axis=False, lock_pitch=False ):
"""
Points the given camera at the target. If the target moves, so will the camera.
This will leave only the camera selected.
"""
# constraint = camera.constraints.new(type="TRACK_TO") # Works via adding a constraint to camera
# constraint.target = target
# constraint.track_axis = 'TRACK_NEGATIVE_Z' # Points the local negative z axis (lens) at target
# constraint.up_axis = 'UP_Y' # Keeps the local y axis pointing in the global positive z direction. for orientation
# bpy.ops.object.select_all(action='DESELECT') # Make sure that only the camera is transformd
# camera.select = True
# bpy.ops.object.visual_transform_apply()
# camera.constraints.remove( constraint )
target_old_rotation_euler = target.rotation_euler.copy()
target.rotation_euler = camera.rotation_euler
target.rotation_euler.rotate_axis( "X", -math.pi / 2) # Since the empty's axes are ordered differently than the camera's
if not lock_pitch: # Use unlocked track
constraint = camera.constraints.new(type="TRACK_TO") # Works via adding a constraint to camera
constraint.target = target
constraint.track_axis = 'TRACK_NEGATIVE_Z' # Points the local negative z axis (lens) at target
constraint.up_axis = 'UP_Y' # Keeps the local y axis pointing in the global positive z direction. for orientation
if not align_with_global_up_axis:
# target.rotation_euler = camera.rotation_euler
constraint.use_target_z = True # Keeps the local y axis pointing in the global positive z direction. for orientation
else:
constraint = camera.constraints.new(type="LOCKED_TRACK") # Works via adding a constraint to camera
constraint.target = target
constraint.track_axis = 'TRACK_NEGATIVE_Z' # Points the local negative z axis (lens) at target
constraint.lock_axis = 'LOCK_Y' # Keeps the local y axis pointing in the global positive z direction. for orientation
bpy.ops.object.select_all(action='DESELECT') # Make sure that only the camera is transformd
camera.select = True
bpy.ops.object.visual_transform_apply()
camera.constraints.remove( constraint )
# camera.rotation_euler = camera.rotation_euler
# target.rotation_euler = target_old_rotation_euler
def make_render_fn( setup_scene_fn, setup_nodetree_fn, logger=None ):
""" Renders a scene
Args:
setup_scene_fn: A function which accepts (scene)
setup_nodetree_fn: A function which accepts (scene, output_dir)
Returns:
A function which accepts( scene, save_path ) and renders the scene
to that save path, using the given nodetree function.
"""
def render_fn( scene, save_path ):
"""
Renders an image from the POV of the camera and save it out
Args:
scene: A Blender scene that the camera will render
save_path: Where to save the image
"""
outdir, _ = os.path.split(save_path)
setup_scene_fn( scene )
render_save_path = setup_nodetree_fn( scene, outdir )
quiet_render()
shutil.move( render_save_path, save_path )
return render_fn
with Profiler( "Render", logger ) as prf:
setup_scene_fn( scene )
render_save_path = setup_nodetree_fn( scene, outdir )
prf.step( "Setup" )
# bpy.ops.render.render()
quiet_render()
prf.step( "Render" )
with Profiler( "Saving", logger ) as prf:
shutil.move( render_save_path, save_path )
return render_fn
def quiet_render():
''' sends the noisy blender render info to the bitbucket '''
# redirect output to log file
logfile = 'blender_render.log'
open(logfile, 'a').close()
old = os.dup(1)
sys.stdout.flush()
os.close(1)
os.open(logfile, os.O_WRONLY)
# do the rendering
bpy.ops.render.render(write_still=True)
# disable output redirection
os.close(1)
os.dup(old)
os.close(old)
os.remove(logfile)
def set_use_of_gpu_to_render_scene( use=True, compute_device='CUDA_0' ):
"""
Enables or disables GPU when using cycles
Args:
use: Whether to use GPU
compute_device: Which device to use for cycles. Usually one of ['CUDA_MULTI_0', 'CUDA_0', 'CUDA_1', ...]
"""
bpy.context.scene.cycles.device = 'GPU' if use else 'CPU'
bpy.context.user_preferences.system.compute_device_type = 'CUDA'
bpy.context.user_preferences.system.compute_device = 'CUDA_0'
print("Available compute devices: " + str(_cycles.available_devices()))
print("Default CUDA device: " + bpy.context.user_preferences.system.compute_device)
print("Default cycles device: " + bpy.context.scene.cycles.device)
if settings.VERBOSITY >= settings.VERBOSITY_LEVELS[ 'DEBUG' ]:
print("Default CUDA device: " + bpy.context.user_preferences.system.compute_device)
print("Render engine: " + scene.render.engine )
def set_preset_render_settings( scene, presets=[ 'BASE' ] ):
""" Sets Blender render settings to common preset.
Many of the tasks don't require sampling in cycles, and don't
require antialiasing. This function disables such features.
Args:
scene: The scene for which to set settings.
preset: The types of preset to use. Allowable types:
[ 'BASE', 'RAW' ]
"""
if 'BASE' in presets:
# If using cycles, don't sample.
scene.cycles.samples = 1
scene.cycles.max_bounces = 1
scene.cycles.min_bounces = 1
# Quality settings
scene.render.resolution_percentage = 100
scene.render.tile_x = settings.TILE_SIZE
scene.render.tile_y = settings.TILE_SIZE
# Turn off all but the first renderlayer
for i, layer in enumerate( scene.layers ):
layer = ( i == 0 )
render_layer = scene.render.layers["RenderLayer"]
bpy.types.WorldLighting.indirect_bounces = 1
scene.render.layers[0].use_all_z = True
# We don't need raytracing or shadows
render_layer.use_edge_enhance = False
scene.render.use_sss = False
scene.render.use_envmaps = False
scene.render.use_raytrace = False
scene.render.use_shadows = False
scene.render.use_simplify = True
# Antialiasing leads to incorrect values
scene.render.use_antialiasing = False
if 'NON-COLOR' in presets: # Save as non-color data
scene.view_settings.view_transform = 'Raw'
def set_random_seed():
# Set seeds
if settings.RANDOM_SEED:
np.random.seed( settings.RANDOM_SEED )
random.seed( settings.RANDOM_SEED )