add Gazebo_Distributed_MPI document

This commit is contained in:
zhangshuai 2019-04-18 10:27:54 +08:00
parent 093964e078
commit a2732f8bbd
3328 changed files with 860205 additions and 0 deletions

2
Gazebo_Distributed_MPI/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
build
.vscode

View File

@ -0,0 +1,2 @@
Nate Koenig <nkoenig@osrfoundation.org>
John Hsu <hsu@osrfoundation.org>

View File

@ -0,0 +1,428 @@
cmake_minimum_required(VERSION 2.8.6 FATAL_ERROR)
if(COMMAND CMAKE_POLICY)
CMAKE_POLICY(SET CMP0003 NEW)
CMAKE_POLICY(SET CMP0004 NEW)
endif(COMMAND CMAKE_POLICY)
project (Gazebo)
string (TOLOWER ${PROJECT_NAME} PROJECT_NAME_LOWER)
string (TOUPPER ${PROJECT_NAME} PROJECT_NAME_UPPER)
set (GAZEBO_MAJOR_VERSION 7)
set (GAZEBO_MINOR_VERSION 14)
# The patch version may have been bumped for prerelease purposes; be sure to
# check gazebo-release/ubuntu/debian/changelog@default to determine what the
# next patch version should be for a regular release.
set (GAZEBO_PATCH_VERSION 0)
set (GAZEBO_VERSION ${GAZEBO_MAJOR_VERSION}.${GAZEBO_MINOR_VERSION})
set (GAZEBO_VERSION_FULL ${GAZEBO_MAJOR_VERSION}.${GAZEBO_MINOR_VERSION}.${GAZEBO_PATCH_VERSION})
message (STATUS "${PROJECT_NAME} version ${GAZEBO_VERSION_FULL}")
set (gazebo_cmake_dir ${PROJECT_SOURCE_DIR}/cmake CACHE PATH "Location of CMake scripts")
########################################
# Package Creation:
include (${gazebo_cmake_dir}/gazebo_cpack.cmake)
set (CPACK_PACKAGE_VERSION "${GAZEBO_VERSION_FULL}")
set (CPACK_PACKAGE_VERSION_MAJOR "${GAZEBO_MAJOR_VERSION}")
set (CPACK_PACKAGE_VERSION_MINOR "${GAZEBO_MINOR_VERSION}")
set (CPACK_PACKAGE_VERSION_PATCH "${GAZEBO_PATCH_VERSION}")
if (CPACK_GENERATOR)
message(STATUS "Found CPack generators: ${CPACK_GENERATOR}")
configure_file("${gazebo_cmake_dir}/cpack_options.cmake.in" ${GAZEBO_CPACK_CFG_FILE} @ONLY)
set(CPACK_PROJECT_CONFIG_FILE ${GAZEBO_CPACK_CFG_FILE})
include (CPack)
endif()
# If we're configuring only to package source, stop here
if (PACKAGE_SOURCE_ONLY)
message(WARNING "Configuration was done in PACKAGE_SOURCE_ONLY mode. You can build a tarball (make package_source), but nothing else.")
return()
endif()
# Documentation
add_subdirectory(doc)
# Configure documentation uploader
configure_file("${CMAKE_SOURCE_DIR}/cmake/upload_doc.sh.in"
${CMAKE_BINARY_DIR}/upload_doc.sh @ONLY)
# If we're configuring only to build docs, stop here
if (DOC_ONLY)
message(WARNING "Configuration was done in DOC_ONLY mode. You can build documentation (make doc), but nothing else.")
return()
endif()
enable_testing()
# Use GNUInstallDirst to get canonical paths
include(GNUInstallDirs)
# with -fPIC
if(UNIX AND NOT WIN32)
set (CMAKE_INSTALL_PREFIX "/usr" CACHE STRING "Install Prefix")
find_program(CMAKE_UNAME uname /bin /usr/bin /usr/local/bin )
if(CMAKE_UNAME)
exec_program(uname ARGS -m OUTPUT_VARIABLE CMAKE_SYSTEM_PROCESSOR)
set(CMAKE_SYSTEM_PROCESSOR ${CMAKE_SYSTEM_PROCESSOR} CACHE INTERNAL
"processor type (i386 and x86_64)")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
ADD_DEFINITIONS(-fPIC)
endif(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
endif(CMAKE_UNAME)
endif()
set (CMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE ON)
# developer's option to cache PKG_CONFIG_PATH and
# LD_LIBRARY_PATH for local installs
if(PKG_CONFIG_PATH)
set (ENV{PKG_CONFIG_PATH} ${PKG_CONFIG_PATH}:$ENV{PKG_CONFIG_PATH})
endif()
if(LD_LIBRARY_PATH)
set (ENV{LD_LIBRARY_PATH} ${LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH})
endif()
set (INCLUDE_INSTALL_DIR "${CMAKE_INSTALL_INCLUDEDIR}/gazebo-${GAZEBO_MAJOR_VERSION}/gazebo")
set (LIB_INSTALL_DIR ${CMAKE_INSTALL_LIBDIR})
set (BIN_INSTALL_DIR ${CMAKE_INSTALL_BINDIR})
set (BUILD_GAZEBO ON CACHE INTERNAL "Build Gazebo" FORCE)
set (build_errors "" CACHE INTERNAL "build errors" FORCE)
set (build_warnings "" CACHE INTERNAL "build warnings" FORCE)
set (MIN_OGRE_VERSION 1.7.4 CACHE INTERNAL "Ogre version requirement" FORCE)
set (MIN_BOOST_VERSION 1.40.0 CACHE INTERNAL "Boost min version requirement" FORCE)
set (FREEIMAGE_MAJOR_VERSION 3 CACHE INTERNAL "FreeImage major version requirement" FORCE)
set (FREEIMAGE_MINOR_VERSION 9 CACHE INTERNAL "FreeImage minor version requirement" FORCE)
set (MIN_FREEIMAGE_VERSION ${FREEIMAGE_MAJOR_VERSION}.${FREEIMAGE_MINOR_VERSION}.0 CACHE INTERNAL "FreeImage version requirement" FORCE)
include (${gazebo_cmake_dir}/DissectVersion.cmake)
#####################################
# Check for low memory version to use in some tests
if(NOT DEFINED USE_LOW_MEMORY_TESTS)
set (USE_LOW_MEMORY_TESTS FALSE)
message (STATUS "High memory tests: enabled")
else()
set (USE_LOW_MEMORY_TEST TRUE)
message (STATUS "High memory tests: disabled, low memory versions will be used")
endif()
######################################
# Enable screen tests by default
if(NOT DEFINED ENABLE_SCREEN_TESTS)
set(ENABLE_SCREEN_TESTS TRUE)
endif()
message (STATUS "\n\n====== Finding 3rd Party Packages ======")
include (${gazebo_cmake_dir}/SearchForStuff.cmake)
message (STATUS "----------------------------------------\n")
#####################################
# Define some variables that are going to be used in two places:
# 1. In CMake code to pass preprocessor definitions to certain source files
# (especially in common/CMakeLists.txt).
# 2. In the generation of cmake/setup.sh from cmake/setup.sh.in
set(GAZEBO_DEFAULT_MASTER_HOST localhost)
set(GAZEBO_DEFAULT_MASTER_PORT 11345)
set(GAZEBO_PLUGIN_PATH ${CMAKE_INSTALL_PREFIX}/${LIB_INSTALL_DIR}/gazebo-${GAZEBO_MAJOR_VERSION}/plugins)
set(GAZEBO_MODEL_PATH ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/gazebo-${GAZEBO_MAJOR_VERSION}/models)
set(GAZEBO_RESOURCE_PATH ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/gazebo-${GAZEBO_MAJOR_VERSION})
set(GAZEBO_MODEL_DATABASE_URI http://models.gazebosim.org)
set(OGRE_RESOURCE_PATH ${OGRE_PLUGINDIR})
# Seems that OGRE_PLUGINDIR can end in a newline, which will cause problems when
# we pass it to the compiler later.
string(REPLACE "\n" "" OGRE_RESOURCE_PATH ${OGRE_RESOURCE_PATH})
# Check for DRI capable Display
include (${gazebo_cmake_dir}/CheckDRIDisplay.cmake)
#####################################
MESSAGE(STATUS "Checking gazebo build type")
# Set the default build type
if (NOT CMAKE_BUILD_TYPE)
set (CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING
"Choose the type of build, options are: Debug Release RelWithDebInfo Profile Check" FORCE)
endif (NOT CMAKE_BUILD_TYPE)
# TODO: still convert to uppercase to keep backwards compatibility with
# uppercase old supported and deprecated modes
string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPERCASE)
if (NOT DEFINED HDF5_INSTRUMENT)
set (HDF5_INSTRUMENT FALSE)
endif()
set (BUILD_TYPE_PROFILE FALSE)
set (BUILD_TYPE_RELEASE FALSE)
set (BUILD_TYPE_RELWITHDEBINFO FALSE)
set (BUILD_TYPE_DEBUG FALSE)
set (BUILD_TYPE_COVERAGE FALSE)
if ("${CMAKE_BUILD_TYPE_UPPERCASE}" STREQUAL "PROFILE")
set (BUILD_TYPE_PROFILE TRUE)
elseif ("${CMAKE_BUILD_TYPE_UPPERCASE}" STREQUAL "RELEASE")
set (BUILD_TYPE_RELEASE TRUE)
elseif ("${CMAKE_BUILD_TYPE_UPPERCASE}" STREQUAL "RELWITHDEBINFO")
set (BUILD_TYPE_RELWITHDEBINFO TRUE)
elseif ("${CMAKE_BUILD_TYPE_UPPERCASE}" STREQUAL "DEBUG")
set (BUILD_TYPE_DEBUG TRUE)
elseif ("${CMAKE_BUILD_TYPE_UPPERCASE}" STREQUAL "COVERAGE")
BUILD_WARNING ("Simbody physics engine not supported during Coverage builds (issue #1849).")
set (HAVE_SIMBODY FALSE)
BUILD_WARNING ("DART physics engine not supported during Coverage builds (issue #1160).")
set (HAVE_DART FALSE)
set (BUILD_TYPE_COVERAGE TRUE)
include (${gazebo_cmake_dir}/CodeCoverage.cmake)
setup_target_for_coverage(coverage coverage)
else()
build_error("CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} unknown. Valid options are: Debug Release RelWithDebInfo Profile Check")
endif()
#####################################
# Handle CFlags
# USE_UPSTREAM_CFLAGS (default TRUE)
if(NOT DEFINED USE_UPSTREAM_CFLAGS)
set (USE_UPSTREAM_CFLAGS True)
endif()
unset (CMAKE_C_FLAGS_ALL CACHE)
# USE_HOST_CFLAGS (default TRUE)
# Will check building host machine for proper cflags
if(NOT DEFINED USE_HOST_CFLAGS OR USE_HOST_CFLAGS)
message(STATUS "Enable host CFlags")
include (${gazebo_cmake_dir}/HostCFlags.cmake)
endif()
# Will use predefined gazebo developers cflags
# this needs to be called after HostCFlags
if(USE_UPSTREAM_CFLAGS)
# use gazebo own set of flags
unset (CMAKE_CXX_FLAGS CACHE)
message(STATUS "Enable upstream CFlags")
include(${gazebo_cmake_dir}/DefaultCFlags.cmake)
endif()
# Check if warning options are avaliable for the compiler and return WARNING_CXX_FLAGS variable
filter_valid_compiler_flags(-Wall -Wextra -Wno-long-long -Wno-unused-value -Wno-unused-value
-Wno-unused-value -Wno-unused-value -Wfloat-equal -Wshadow
-Winit-self -Wswitch-default -Wmissing-include-dirs -pedantic)
# Check and add visibility hidden by default. Only in UNIX
# Windows and MacosX does not handled properly the hidden compilation
if (UNIX AND NOT APPLE)
filter_valid_compiler_flags(-fvisibility=hidden -fvisibility-inlines-hidden)
endif()
if (MSVC)
# Unable to be filtered flags (failing due to limitations in filter_valid_compiler_warnings)
# Handling exceptions rightly
set(UNFILTERED_FLAGS "/EHsc")
endif()
# Visual Studio enables c++11 support by default
if (NOT MSVC)
set(UNFILTERED_FLAGS "-std=c++11")
endif()
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${VALID_CXX_FLAGS} ${UNFILTERED_FLAGS}")
#################################################
# OS Specific initialization
if (UNIX)
gz_setup_unix()
else (WIN32)
gz_setup_windows()
endif()
if(APPLE)
gz_setup_apple()
endif()
# Main includes for compilation
include_directories(${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR})
include_directories(SYSTEM
${IGNITION-MATH_INCLUDE_DIRS}
)
#################################################
# Configure 3rd Party Packages after OS Specific initialization
message (STATUS "\n\n====== Configuring 3rd Party Packages ======")
add_subdirectory(deps)
message (STATUS "----------------------------------------\n")
#################################################
# Print warnings and errors
if ( build_warnings )
message(STATUS "BUILD WARNINGS")
foreach (msg ${build_warnings})
message(STATUS ${msg})
endforeach ()
message(STATUS "END BUILD WARNINGS\n")
endif (build_warnings)
########### Add uninstall target ###############
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake"
IMMEDIATE @ONLY)
add_custom_target(uninstall
"${CMAKE_COMMAND}" -P "${CMAKE_CURRENT_BINARY_DIR}/cmake/cmake_uninstall.cmake")
if (build_errors)
message(STATUS "BUILD ERRORS: These must be resolved before compiling.")
foreach (msg ${build_errors})
message(STATUS ${msg})
endforeach ()
message(STATUS "END BUILD ERRORS\n")
message (FATAL_ERROR "Errors encountered in build. Please see the BUILD ERRORS above.")
else (build_errors)
########################################
# Write the config.h file
configure_file (${gazebo_cmake_dir}/gazebo_config.h.in ${PROJECT_BINARY_DIR}/gazebo/gazebo_config.h)
gz_install_includes("" ${PROJECT_BINARY_DIR}/gazebo/gazebo_config.h)
configure_file(${CMAKE_SOURCE_DIR}/cmake/setup.sh.in ${PROJECT_BINARY_DIR}/setup.sh @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/setup.sh DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/gazebo-${GAZEBO_MAJOR_VERSION}/)
# Also install the setup.sh in an unversioned location
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/setup.sh DESTINATION ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DATAROOTDIR}/gazebo/)
if (DEFINED CMAKE_CXX_FLAGS)
message (STATUS "Custom CFlags:${CMAKE_CXX_FLAGS}")
else()
message (STATUS "Use default CFlags")
endif()
message (STATUS "Build Type: ${CMAKE_BUILD_TYPE}")
message (STATUS "Install path: ${CMAKE_INSTALL_PREFIX}")
if (BUILD_GAZEBO)
set(TEST_TYPE "UNIT")
add_subdirectory(gazebo)
add_subdirectory(media)
add_subdirectory(test)
add_subdirectory(tools)
add_subdirectory(plugins)
add_subdirectory(interfaces)
add_subdirectory(worlds)
add_subdirectory(models)
endif (BUILD_GAZEBO)
########################################
# Make the package config files
set (pkgconfig_files gazebo_ode gazebo_transport gazebo)
# set boost pkgconfig cflags
set (Boost_PKGCONFIG_CFLAGS ${Boost_INCLUDE_DIRS})
if (NOT "${Boost_PKGCONFIG_CFLAGS}" STREQUAL "")
set (Boost_PKGCONFIG_CFLAGS "-I${Boost_PKGCONFIG_CFLAGS}")
endif (NOT "${Boost_PKGCONFIG_CFLAGS}" STREQUAL "")
string (REPLACE ";" " -I" Boost_PKGCONFIG_CFLAGS "${Boost_PKGCONFIG_CFLAGS}")
# set boost pkgconfig libs
set (Boost_PKGCONFIG_LIBS ${Boost_LIBRARY_DIRS})
if (NOT "${Boost_PKGCONFIG_LIBS}" STREQUAL "")
set (Boost_PKGCONFIG_LIBS "-L${Boost_PKGCONFIG_LIBS}")
endif(NOT "${Boost_PKGCONFIG_LIBS}" STREQUAL "")
string (REPLACE ";" " -L" Boost_PKGCONFIG_LIBS "${Boost_PKGCONFIG_LIBS}")
foreach (b ${Boost_LIBRARIES})
get_filename_component(bname ${b} NAME_WE)
# Prefix always -l
set (bname "-l${bname}")
# Remove the prefix lib (not always present, like in pthread)
string (REPLACE "lib" "" bname "${bname}")
set (Boost_PKGCONFIG_LIBS "${Boost_PKGCONFIG_LIBS} ${bname}")
endforeach(b)
foreach (pkgconfig ${pkgconfig_files})
configure_file(${CMAKE_SOURCE_DIR}/cmake/pkgconfig/${pkgconfig}.in ${CMAKE_CURRENT_BINARY_DIR}/cmake/pkgconfig/${pkgconfig}.pc @ONLY)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/cmake/pkgconfig/${pkgconfig}.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig COMPONENT pkgconfig)
endforeach()
########################################
# Make the cmake config files
set(PKG_NAME ${PROJECT_NAME_UPPER})
# Order is important, if A depends on B, please add B after A.
# The list should have at the very end the libraries
# without internal interdependencies
set(PKG_LIBRARIES
gazebo
gazebo_client
gazebo_gui
gazebo_sensors
gazebo_rendering
)
if (INCLUDE_PLAYER)
set(PKG_LIBRARIES ${PKG_LIBRARIES} gazebo_player)
endif()
set(PKG_LIBRARIES ${PKG_LIBRARIES}
gazebo_physics
gazebo_ode
)
set(PKG_LIBRARIES ${PKG_LIBRARIES}
gazebo_transport
gazebo_msgs
gazebo_util
gazebo_common)
# No other internal dependencies:
set(PKG_LIBRARIES ${PKG_LIBRARIES}
gazebo_gimpact
gazebo_opcode
gazebo_opende_ou
gazebo_math
)
if (NOT CCD_FOUND)
set(PKG_LIBRARIES ${PKG_LIBRARIES} gazebo_ccd)
endif()
set(PKG_DEPENDS Boost Protobuf SDFormat OGRE)
set(cmake_conf_file "cmake/gazebo-config.cmake")
set(cmake_conf_version_file "cmake/gazebo-config-version.cmake")
configure_file("${CMAKE_CURRENT_SOURCE_DIR}/${cmake_conf_file}.in" "${CMAKE_CURRENT_BINARY_DIR}/${cmake_conf_file}" @ONLY)
# Use write_basic_package_version_file to generate a ConfigVersion file that
# allow users of gazebo to specify the API or version to depend on
# TODO: keep this instruction until deprecate Ubuntu/Precise and update with
# https://github.com/Kitware/CMake/blob/v2.8.8/Modules/CMakePackageConfigHelpers.cmake
include(WriteBasicConfigVersionFile)
write_basic_config_version_file(
${CMAKE_CURRENT_BINARY_DIR}/${cmake_conf_version_file}
VERSION "${GAZEBO_VERSION_FULL}"
COMPATIBILITY SameMajorVersion)
install(FILES
${CMAKE_CURRENT_BINARY_DIR}/${cmake_conf_file}
${CMAKE_CURRENT_BINARY_DIR}/${cmake_conf_version_file}
DESTINATION
${LIB_INSTALL_DIR}/cmake/${PROJECT_NAME_LOWER}/
COMPONENT cmake)
########################################
# If present, load platform-specific build hooks. This system is used,
# for example, by the Ubuntu overlay (in the gazebo-release repo), to
# arrange for installation of Ubuntu-specific application-launching
# configuration.
if (EXISTS ${PROJECT_SOURCE_DIR}/cmake/packager-hooks/CMakeLists.txt)
message(STATUS "Loading packager build hooks from cmake/packager-hooks")
add_subdirectory(cmake/packager-hooks)
endif()
message(STATUS "Configuration successful. Type make to compile gazebo")
endif(build_errors)

View File

@ -0,0 +1,341 @@
# Introduction
Interested in developing new features, fixing bugs, or making any other
kind of contribution which touches Gazebo's codebase? Read on to find out how!
## Development process
We follow a development process designed to reduce errors, encourage
collaboration, and make high quality code. The process may seem rigid and
tedious, but every step is worth the effort.
### Steps to follow
1. Are you sure?
Run through this mental checklist before getting started.
1. Has your idea already been done, or maybe someone is already working on it?
Check [answers.gazebosim.org](http://answers.gazebosim.org) and the [issue tracker](https://bitbucket.org/osrf/gazebo/issues).
1. Get feedback from the Gazebo core team.
Send an email to the
[mailing list](https://groups.google.com/a/osrfoundation.org/forum/#!forum/gazebo),
post a question on [Gazebo Answers](http://answers.gazebosim.org), or use the
[issue tracker](https://bitbucket.org/osrf/gazebo/issues) to get feedback from
Gazebo developers.
1. [Fork Gazebo](https://bitbucket.org/osrf/gazebo/fork)
This will create your own personal copy of Gazebo. All of your development
should take place in your fork.
1. Choose a base branch
If your changes will break API or ABI, then base your new branch off of
`default`. If your changes don't break API/ABI and you would like them
to be released to an existing gazebo release with major version `N`,
then use branch `gazeboN` as the base.
1. Work out of a branch
Always work out of a new branch, never off of the base branch. This is a
good habit to get in, and will make your life easier.
1. Write your code.
This is the fun part.
1. Write tests
In most cases, a pull request will only be accepted if it has tests. See the
"Write tests" section below for more information.
1. Compiler warnings
Code must have zero compile warnings, or at least make sure your pull
request is not adding new warnings.
1. Style
Static code checking analyzes your code for bugs, such as potential memory
leaks, and style. Gazebo's static code checker uses `cppcheck`, and a
modified `cpplint`. You'll need to install cppcheck on your system. Ubuntu
users can install via:
sudo apt-get install cppcheck
To check your code, run the following script from the root of the Gazebo
sources:
sh tools/code_check.sh
It takes a few minutes to run. Fix all errors and warnings until the output
looks like:
Total errors found: 0
The tool does not catch all style errors. See the "Style" section below for
more information.
1. Tests pass
There must be no failing tests. You can check by running `make test` in
your build directory.
Running all tests in Gazebo may take a long time. If you expect your changes
to only affect a few different tests, it should be enough to run each one
of them individually from the build directory, for example:
./test/integration/INTEGRATION_world
1. Documentation.
Document all your code. Every class, function, member variable must have
doxygen comments. All code in source files must have documentation that
describes the functionality. This will help reviewers and future developers.
1. Review your code.
Before submitting your code through a pull request, take some time to
review everything line-by-line. The review process will go much faster if
you make sure everything is perfect before other people look at your code.
There is a bit of the human-condition involved here. Folks are less likely
to spend time reviewing your code if it's sloppy.
1. Small pull requests
A large pull request is hard to review, and will take a long time. It is
worth your time to split a large pull request into multiple smaller pull
requests. For reference, here are a few examples:
* [Small, very nice](https://bitbucket.org/osrf/gazebo/pull-request/1732)
* [Medium, still okay](https://bitbucket.org/osrf/gazebo/pull-request/1700/)
* [Too large](https://bitbucket.org/osrf/gazebo/pull-request/30)
1. [Make a pull request](https://bitbucket.org/osrf/gazebo/pull-request/new)
Submit a pull request when you're ready.
1. Continuous integration
The moment you make a pull request, a few jobs in our
[continuous integration](http://build.osrfoundation.org/)
server will be started. These jobs will build your branch on Linux, Mac and
Windows, run all tests and check for warnings.
Your pull request will be updated with the status of these builds. Take some
time to check these builds and see if you've introduced test failures,
warnings or broke some build. If you did and know how to fix it, do so. If
you don't know, speak up and someone may try to help you.
1. Review
At least two other people have to approve your pull request before it can
be merged. Please be responsive to any questions and comments.
1. Done, phew.
Once you have met all the requirements, you're code will be merged. Thanks
for improving Gazebo!
### Internal Developers
This section is targeted mostly at people who have commit access to the main
repositories.
In addition to the general development process, please follow these steps
before submitting a pull request. Each step is pass/fail, where the test or
check must pass before continuing to the next step.
1. Run the style checker on your personal computer
1. Run all, or only relevant, tests on your personal computer
1. Run your branch through a Jenkins Linux no-gpu build
1. Run your branch through a Jenkins Linux Nvidia build
1. Run your branch through a Jenkins Homebrew build
1. Run your branch through a Jenkins Windows build
1. Run your branch through the ABI/API checker if targeting a release branch
1. Submit the pull request, and make sure the following are included
(a set of jenkins jobs will run automatically once the pull request is created,
if not included automatically, you must add them manually):
1. Link to a coverage report
1. Link to a passing Homebrew build
1. Link to a passing Linux no-gpu build
1. Link to a passing Linux Nvidia build
1. Link to a passing Windows build
1. Link to a passing ABI/API report if the pull request is targeted at a release branch
# Style
In general, we follow [Google's style guide](https://google.github.io/styleguide/cppguide.html). However, we add in some extras.
1. **This pointer**
> All class attributes and member functions must be accessed using the `this->` pointer. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/Base.cc#cl-40).
1. **Underscore function parameters**
> All function parameters must start with an underscore. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/Base.cc#cl-77).
1. **Do not cuddle braces**
> All braces must be on their own line. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/Base.cc#cl-131).
1. **Multi-line code blocks**
> If a block of code spans multiple lines and is part of a flow control statement, such as an `if`, then it must be wrapped in braces. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/Base.cc#cl-249)
1. **++ operator**
> This occurs mostly in `for` loops. Prefix the `++` operator, which is [slightly more efficient than postfix in some cases](http://programmers.stackexchange.com/questions/59880/avoid-postfix-increment-operator).
1. **PIMPL/Opaque pointer**
> If you are writing a new class, it must use a private data pointer. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/World.hh?at=default#cl-479), and you can read more [here](https://en.wikipedia.org/wiki/Opaque_pointer).
1. **const functions**
> Any class function that does not change a member variable should be marked as `const`. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/Entity.cc?at=default#cl-175).
1. **const parameters**
> All parameters that are not modified by a function should be marked as `const`. This applies to parameters that are passed by reference, pointer, and value. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/Entity.cc?at=default#cl-217).
1. **Pointer and reference variables**
> Place the `*` and `&` next to the varaible name, not next to the type. For example: `int &variable` is good, but `int& variable` is not. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/Entity.cc?at=default#cl-217).
1. **Camel case**
> In general, everything should use camel case. Exceptions include SDF element names, and protobuf variable names. Here is an [example](https://bitbucket.org/osrf/gazebo/src/default/gazebo/physics/Entity.cc?at=default#cl-217).
1. **Class function names**
> Class functions must start with a capital letter, and capitalize every word.
>
> `void MyFunction();` : Good
>
> `void myFunction();` : Bad
>
> `void my_function();` : Bad
1. **Variable names**
> Variables must start with a lower case letter, and capitalize every word thereafter.
>
> `int myVariable;` : Good
>
> `int myvariable;` : Bad
>
> `int my_variable;` : Bad
1. **No inline comments**
> `//` style comments may not be placed on the same line as code.
>
> `speed *= 0.44704; // miles per hour to meters per second` : Bad
# Write tests
Gazebo uses [GTest](http://code.google.com/p/googletest) for general testing
and [QTest](http://doc.qt.io/qt-5/qtest.html) for GUI tests. There are a few
kinds of tests:
1. Unit tests: all classes should have corresponding unit tests. These live
in the same directory as the source code and are prefixed by `_TEST`.
1. Integration tests: tests which verify how many classes are working together
go under the `tests/integration` directory.
1. Regression tests: tests which fix broken features go under `tests/regression`
and are prefixed by the issue number on Gazebo's
[issue tracker](https://bitbucket.org/osrf/gazebo/issues).
Before creating a new integration or regression test file, check the current
test files. If one closely matches the topic of your new code, simply add a new
test function to the file. Otherwise, create a new test file, and write your
test.
## Test coverage
The goal is to achieve 100% line and branch coverage. However, this is not
always possible due to complexity issues, analysis tools misreporting
coverage, and time constraints. Try to write as complete of a test suite as
possible, and use the coverage analysis tools as guide. If you have trouble
writing a test please ask for help in your pull request.
Gazebo has a build target called `make coverage` that will produce a code
coverage report. You'll need to have
[lcov](http://ltp.sourceforge.net/coverage/lcov.php) and
[gcov](https://gcc.gnu.org/onlinedocs/gcc/Gcov.html) installed.
1. In your `build` folder, compile Gazebo with `-DCMAKE_BUILD_TYPE=Coverage`
cmake -DCMAKE_BUILD_TYPE=Coverage ..\
make
1. Run a single test, or all the tests
make test
1. Make the coverage report
make coverage
1. View the coverage report
firefox coverage/index.html
## Gazebo assertions
### What is an assertion?
An assertion is a check, which always produces a boolean result, that
developers place in the code when want to be sure that check is always true.
They are aimed to detect programming errors and should check for impossible
situations in the code. If the assertion check failed, the assertion will
stop the program immediately.
Object * p = some_crazy_function()
GZ_ASSERT(p != nullptr, "Object from some_crazy_function should never point to NULL")
p->run()
### Gazebo runtime assertions: GZ_ASSERT
In Gazebo, the GZ_ASSERT macro id designed to handle all our runtime assertions
GZ_ASSERT(<condition to check>,<fail msg>)
* `condition-to-check`: anything returning a boolean value that should always be true.
* `fail msg`: message displayed when assertion is thrown
### Benefits of the assertions
Some of the benefits of using the assertions:
* They are really useful for not having to debug all kind of weird and unexpected errors, especially in runtime. Exact failure point appears when pass by an assertion.
* Developer can be sure that some conditions are met at a given code point. Code turns more reliable.
* Help to detect no so obvious errors happening (affecting performance for example)
### Difference between Assertion and Exception
While assertions are aimed at impossible situations generated from
programming errors, exceptions handle all kind of expected errors and unusual
but logically possible code situations.
Lets review an example: suppose we are writing a math library and created a really fast method to calculate square roots but it only works for positive numbers. Something declared as:
double sqrt_for_positives(double number)
So what could be an assertion and what an exception for our revolutionary function?
* Exception: if the incoming number is negative (our function only accepts positive numbers), then we will thrown an exception. It was an error by the user but we should consider it a possible scenario since we are offering a public interface.
* Assertion: our square root should never return a negative number. This is not a logical error, it's a completely unexpected error.
## Debugging Gazebo
### Meaningful backtraces
In order to provide meaningful backtraces when using a debugger, such as GDB, Gazebo should be compiled with debugging support enabled. When using the ubuntu packages, specially the ''-dbg'' package, this support is limited but could be enough in most of the situations. This are the three level of traces which can be obtained:
**Maximum level of debugging support**
:This only can be obtained compiling Gazebo from source and setting the `CMAKE_BUILD_TYPE` to `DEBUG`. This will set up no optimizations and debugging symbols. It can be required by developers in situations specially difficult to reproduce.
**Medium level of debugging support**
:This can be obtained installing the ''gazebo-dbg'' package (since 1.4 version) or compiling Gazebo from source using the `RELWITHDEBINFO` `CMAKE_BUILD_TYPE` mode (which is the default if no mode is provided). This will set up ''-O2'' optimization level but provide debugging symbols. This should be the default when firing up gdb to explore errors and submit traces.
**Minimum level of debugging support**
:This one is present in package versions previous to 1.4 (no ''-dbg'' package present) or compiling Gazebo from source using the `RELEASE` `CMAKE_BUILD_TYPE` option. This will set up the maximum level of optimizations and does not provide any debugging symbol information. This traces are particularly difficult to follow.

View File

@ -0,0 +1,178 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
Software License Agreement (Apache License)
Copyright (C) 2012 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

View File

View File

@ -0,0 +1,49 @@
Gazebo - A dynamic multi-robot simulator
----------------------------------------
This is the Gazebo simulator. Gazebo simulates multiple robots in a
3D environment, with extensive dynamic interaction between objects.
http://gazebosim.org
Installation
------------
Instructions are located at
http://gazebosim.org/install
Gazebo cmake parameters available at configuring time:
- ENABLE_DIAGNOSTICS
If this is defined, it will enable diagnostic timers using the macros
from Diagnostics.hh (see also the standalone diagnostics example):
DIAG_TIMER_START("name")
DIAG_TIMER_LAP("name")
DIAG_TIMER_STOP("name")
- USE_HOST_CFLAGS (bool) [default True]
Check the building machine for supported compiler optimizations and use
them to build the software.
- USE_UPSTREAM_CFLAGS (bool) [default True]
Use the recommend gazebo developers compiler optimizations flags
- ENABLE_TESTS_COMPILATION (bool) [default True]
Enabled or disable the test suite compilation.
- USE_LOW_MEMORY_TEST (bool) [default False]
Use reduced version of tests which need less quantity of RAM memory
available.
- FORCE_GRAPHIC_TESTS_COMPILATION (bool) [default False]
Ignore system checks to look for graphic and acceleration support and
compile all the test suite.
- ENABLE_SCREEN_TESTS (bool) [default True]
Enable or disable tests that need screen rendering to run properly.
Headless machines or machines with the screen turned off should set this to
False
Uninstallation
--------------
Read the uninstallation instructions (http://gazebosim.org/uninstall)
in the online manual for generic instructions. For most people, the following sequence will suffice:
$ make uninstall (inside the gazebo-trunk/build directory)
- Nate Koenig

View File

@ -0,0 +1,77 @@
# FindDRI support
# Check for existance of glxinfo application
# Check for existance of support for pyopengl
MESSAGE(STATUS "Looking for display capabilities")
IF ((DEFINED FORCE_GRAPHIC_TESTS_COMPILATION) AND (${FORCE_GRAPHIC_TESTS_COMPILATION}))
SET (VALID_DISPLAY TRUE)
SET (VALID_DRI_DISPLAY TRUE)
MESSAGE(STATUS " + Force requested. All capabilities on without checking")
RETURN()
ENDIF()
SET (VALID_DISPLAY FALSE)
SET (VALID_DRI_DISPLAY FALSE)
SET (CHECKER_ERROR "(no glxinfo or pyopengl)")
IF((DEFINED ENV{DISPLAY}) AND NOT ("$ENV{DISPLAY}" STREQUAL ""))
EXECUTE_PROCESS(
COMMAND xwininfo -root
RESULT_VARIABLE DISPLAY_FAIL_RESULT
ERROR_QUIET
OUTPUT_QUIET)
IF (NOT DISPLAY_FAIL_RESULT)
MESSAGE(STATUS " + found a display available ($DISPLAY is set)")
SET (VALID_DISPLAY TRUE)
# Continue check for DRI support in the display
# Try to run glxinfo. If not found, variable will be empty
FIND_PROGRAM(GLXINFO glxinfo)
# If not display found, it will throw an error
# Another grep pattern: "direct rendering:[[:space:]]*Yes[[:space:]]*"
IF (GLXINFO)
EXECUTE_PROCESS(
COMMAND glxinfo
COMMAND grep "direct rendering:[[:space:]]*Yes[[:space:]]*"
ERROR_QUIET
OUTPUT_VARIABLE GLX)
IF (GLX)
MESSAGE(STATUS " + found a valid dri display (glxinfo)")
SET (VALID_DRI_DISPLAY TRUE)
ELSE()
SET (CHECKER_ERROR "using glxinfo")
ENDIF ()
ELSE ()
EXECUTE_PROCESS(
# RESULT_VARIABLE is store in a FAIL variable since the command
# returns 0 if ok and 1 if error (inverse than cmake IF)
COMMAND ${PROJECT_SOURCE_DIR}/tools/gl-test.py
RESULT_VARIABLE GL_FAIL_RESULT
ERROR_VARIABLE GL_ERROR
OUTPUT_QUIET)
IF (NOT GL_FAIL_RESULT)
MESSAGE(STATUS " + found a valid dri display (pyopengl)")
SET (VALID_DRI_DISPLAY TRUE)
ELSE()
# Check error string: no python module means no pyopengl
STRING(FIND ${GL_ERROR}
"ImportError: No module named OpenGL.GLUT" ERROR_POS)
# -1 will imply pyopengl is present but real DRI test fails
IF ("${ERROR_POS}" STREQUAL "-1")
SET (CHECKER_ERROR "using pyopengl")
ENDIF ()
ENDIF ()
ENDIF ()
ENDIF ()
ENDIF ()
IF (NOT VALID_DISPLAY)
MESSAGE(STATUS " ! valid display not found")
ENDIF ()
IF (NOT VALID_DRI_DISPLAY)
MESSAGE(STATUS " ! valid dri display not found ${CHECKER_ERROR}")
ENDIF ()

View File

@ -0,0 +1,53 @@
# Check prereqs
find_program(LCOV_PATH lcov)
find_program(GENHTML_PATH genhtml)
if(NOT CMAKE_COMPILER_IS_GNUCXX)
# Clang version 3.0.0 and greater now supports gcov as well.
message(WARNING "Compiler is not GNU gcc! Clang Version 3.0.0 and greater supports gcov as well, but older versions don't.")
if(NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
message(FATAL_ERROR "Compiler is not GNU gcc! Aborting...")
endif()
endif() # NOT CMAKE_COMPILER_IS_GNUCXX
if (NOT (CMAKE_BUILD_TYPE STREQUAL "Debug" OR
CMAKE_BUILD_TYPE STREQUAL "Coverage"))
message( WARNING "Code coverage results with an optimized (non-Debug) "
"build may be misleading" )
endif() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug"
# Param _targetname The name of new the custom make target
# Param _outputname lcov output is generated as _outputname.info
# HTML report is generated in _outputname/index.html
function(setup_target_for_coverage _targetname _outputname)
if(NOT LCOV_PATH)
message(FATAL_ERROR "lcov not found! Aborting...")
endif() # NOT LCOV_PATH
if(NOT GENHTML_PATH)
message(FATAL_ERROR "genhtml not found! Aborting...")
endif() # NOT GENHTML_PATH
# Setup target
add_custom_target(${_targetname}
COMMAND ${LCOV_PATH} -q --zerocounters --directory ${PROJECT_BINARY_PATH}/gazebo
# Capturing lcov counters and generating report
COMMAND ${LCOV_PATH} --quiet --no-checksum --directory ${PROJECT_BINARY_DIR}/gazebo --capture --output-file ${_outputname}.info 2>/dev/null
COMMAND ${LCOV_PATH} -q --remove ${_outputname}.info 'include/sdformat*' 'include/SimTK*' 'include/simbody*' 'include/simmath*' 'deps/*' 'build/*' 'test/*' '/usr/*' --output-file ${_outputname}.info.cleaned
COMMAND ${GENHTML_PATH} -q --legend -o ${_outputname} ${_outputname}.info.cleaned
COMMAND ${CMAKE_COMMAND} -E remove ${_outputname}.info
${_outputname}.info.cleaned
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Resetting code coverage counters to zero.\n"
"Processing code coverage counters and generating report."
)
# Show info where to find the report. And cleanup
add_custom_command(TARGET ${_targetname} POST_BUILD
COMMAND ;
COMMENT "Open ./${_outputname}/index.html in your browser to view the coverage report.")
endfunction() # SETUP_TARGET_FOR_COVERAGE

View File

@ -0,0 +1,68 @@
# Build type link flags
set (CMAKE_LINK_FLAGS_RELEASE " " CACHE INTERNAL "Link flags for release" FORCE)
set (CMAKE_LINK_FLAGS_RELWITHDEBINFO " " CACHE INTERNAL "Link flags for release with debug support" FORCE)
set (CMAKE_LINK_FLAGS_DEBUG " " CACHE INTERNAL "Link flags for debug" FORCE)
set (CMAKE_LINK_FLAGS_PROFILE " -pg" CACHE INTERNAL "Link flags for profile" FORCE)
set (CMAKE_LINK_FLAGS_COVERAGE " --coverage" CACHE INTERNAL "Link flags for static code coverage" FORCE)
set (CMAKE_C_FLAGS_RELEASE "")
if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" AND NOT MSVC)
# -s doesn't work with clang or Visual Studio, see alternative in link below:
# http://stackoverflow.com/questions/6085491/gcc-vs-clang-symbol-strippingu
set (CMAKE_C_FLAGS_RELEASE "-s")
endif()
if (NOT MSVC)
set (CMAKE_C_FLAGS_RELEASE " ${CMAKE_C_FLAGS_RELEASE} -O3 -DNDEBUG ${CMAKE_C_FLAGS_ALL}" CACHE INTERNAL "C Flags for release" FORCE)
set (CMAKE_CXX_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})
set (CMAKE_C_FLAGS_RELWITHDEBINFO " -g -O2 ${CMAKE_C_FLAGS_ALL}" CACHE INTERNAL "C Flags for release with debug support" FORCE)
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO ${CMAKE_C_FLAGS_RELWITHDEBINFO})
set (CMAKE_C_FLAGS_DEBUG " -ggdb3 ${CMAKE_C_FLAGS_ALL}" CACHE INTERNAL "C Flags for debug" FORCE)
set (CMAKE_CXX_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})
set (CMAKE_C_FLAGS_PROFILE " -fno-omit-frame-pointer -g -pg ${CMAKE_C_FLAGS_ALL}" CACHE INTERNAL "C Flags for profile" FORCE)
set (CMAKE_CXX_FLAGS_PROFILE ${CMAKE_C_FLAGS_PROFILE})
set (CMAKE_C_FLAGS_COVERAGE " -g -O0 -Wformat=2 --coverage -fno-inline ${CMAKE_C_FLAGS_ALL}" CACHE INTERNAL "C Flags for static code coverage" FORCE)
set (CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_C_FLAGS_COVERAGE}")
if (NOT "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
# -fno-default-inline -fno-implicit-inline-templates are unimplemented, cause errors in clang
# -fno-elide-constructors can cause seg-faults in clang 3.4 and earlier
# http://llvm.org/bugs/show_bug.cgi?id=12208
set (CMAKE_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_COVERAGE} -fno-default-inline -fno-implicit-inline-templates -fno-elide-constructors")
endif()
endif()
#####################################
# Set all the global build flags
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE_UPPERCASE}}")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_LINK_FLAGS_${CMAKE_BUILD_TYPE_UPPERCASE}}")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_LINK_FLAGS_${CMAKE_BUILD_TYPE_UPPERCASE}}")
set (CMAKE_MODULE_LINKER_FLAGS "${CMAKE_LINK_FLAGS_${CMAKE_BUILD_TYPE_UPPERCASE}}")
# Compiler-specific C++11 activation.
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "GNU")
execute_process(
COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE GCC_VERSION)
if (NOT (GCC_VERSION VERSION_GREATER 4.7))
message(FATAL_ERROR "${PROJECT_NAME} requires g++ 4.8 or greater.")
endif ()
elseif ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
execute_process(
COMMAND ${CMAKE_CXX_COMPILER} -dumpversion OUTPUT_VARIABLE CLANG_VERSION)
if (NOT (CLANG_VERSION VERSION_GREATER 3.2))
message(FATAL_ERROR "${PROJECT_NAME} requires clang 3.3 or greater.")
endif ()
if ("${CMAKE_SYSTEM_NAME}" MATCHES "Darwin")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
endif ()
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
if (NOT MSVC12)
message(FATAL_ERROR "${PROJECT_NAME} requires VS 2013 or greater.")
endif()
else ()
message(FATAL_ERROR "Your C++ compiler does not support C++11.")
endif ()

View File

@ -0,0 +1,5 @@
# Find version components
STRING (REGEX REPLACE "^([0-9]+).*" "\\1" GAZEBO_MAJOR_VERSION "${GAZEBO_VERSION_FULL}")
STRING (REGEX REPLACE "^[0-9]+\\.([0-9]+).*" "\\1" GAZEBO_MINOR_VERSION "${GAZEBO_VERSION_FULL}")
STRING (REGEX REPLACE "^[0-9]+\\.[0-9]+\\.([0-9]+)" "\\1" GAZEBO_REVISION_VERSION ${GAZEBO_VERSION_FULL})
STRING (REGEX REPLACE "^[0-9]+\\.[0-9]+\\.[0-9]+(.*)" "\\1" GAZEBO_CANDIDATE_VERSION ${GAZEBO_VERSION_FULL})

View File

@ -0,0 +1,52 @@
include (FindPkgConfig)
include (${gazebo_cmake_dir}/GazeboUtils.cmake)
########################################
# Find packages
if (PKG_CONFIG_FOUND)
pkg_check_modules(freeimage freeimage>=${MIN_FREEIMAGE_VERSION})
if (NOT freeimage_FOUND)
message (STATUS " freeimage.pc not found, trying freeimage_include_dir and freeimage_library_dir flags.")
endif (NOT freeimage_FOUND)
endif (PKG_CONFIG_FOUND)
if (NOT freeimage_FOUND)
find_path(freeimage_INCLUDE_DIRS FreeImage.h)
if (NOT freeimage_INCLUDE_DIRS)
message (STATUS " Looking for FreeImage.h - not found")
BUILD_ERROR("Missing: Unable to find FreeImage.h")
else (NOT freeimage_INCLUDE_DIRS)
message (STATUS " Found ${freeimage_INCLUDE_DIRS}/FreeImage.h")
# Check the FreeImage header for the right version
set (testFreeImageSource ${CMAKE_CURRENT_BINARY_DIR}/CMakeTmp/test_freeimage.cc)
file (WRITE ${testFreeImageSource}
"#include <FreeImage.h>\nint main () { if (FREEIMAGE_MAJOR_VERSION >= ${FREEIMAGE_MAJOR_VERSION} && FREEIMAGE_MINOR_VERSION >= ${FREEIMAGE_MINOR_VERSION}) return 1; else return 0;} \n")
try_run(FREEIMAGE_RUNS FREEIMAGE_COMPILES ${CMAKE_CURRENT_BINARY_DIR}
${testFreeImageSource}
COMPILE_DEFINITIONS "-I${freeimage_INCLUDE_DIRS}"
COMPILE_OUTPUT_VARIABLE FREEIMAGE_COMPILE_OUTPUT)
if (NOT FREEIMAGE_RUNS)
message (STATUS "${FREEIMAGE_COMPILE_OUTPUT}")
BUILD_ERROR("Invalid FreeImage Version. Requires ${MIN_FREEIMAGE_VERSION}")
else (NOT FREEIMAGE_RUNS)
message (STATUS " Looking for FreeImage.h - found")
endif (NOT FREEIMAGE_RUNS)
endif (NOT freeimage_INCLUDE_DIRS)
if (WIN32)
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
find_library(freeimage_LIBRARIES FreeImaged)
else()
find_library(freeimage_LIBRARIES FreeImage)
endif()
else(WIN32)
find_library(freeimage_LIBRARIES freeimage)
endif(WIN32)
if (NOT freeimage_LIBRARIES)
message (STATUS " Looking for libfreeimage - not found")
BUILD_ERROR("Missing: Unable to find libfreeimage")
else (NOT freeimage_LIBRARIES)
message (STATUS " Looking for libfreeimage - found")
include_directories(${freeimage_INCLUDE_DIRS})
endif (NOT freeimage_LIBRARIES)
endif (NOT freeimage_FOUND)

View File

@ -0,0 +1,66 @@
# -*- cmake -*-
# - Find Google perftools
# Find the Google perftools includes and libraries
# This module defines
# GOOGLE_PERFTOOLS_INCLUDE_DIR, where to find heap-profiler.h, etc.
# GOOGLE_PERFTOOLS_FOUND, If false, do not try to use Google perftools.
# also defined for general use are
# TCMALLOC_LIBRARIES, where to find the tcmalloc library.
# STACKTRACE_LIBRARIES, where to find the stacktrace library.
# PROFILER_LIBRARIES, where to find the profiler library.
FIND_PATH(GOOGLE_PERFTOOLS_INCLUDE_DIR google/heap-profiler.h
/usr/local/include
/usr/include
)
SET(TCMALLOC_NAMES ${TCMALLOC_NAMES} tcmalloc)
FIND_LIBRARY(TCMALLOC_LIBRARY
NAMES ${TCMALLOC_NAMES}
PATHS /usr/lib /usr/local/lib
)
IF (TCMALLOC_LIBRARY AND GOOGLE_PERFTOOLS_INCLUDE_DIR)
SET(TCMALLOC_LIBRARIES ${TCMALLOC_LIBRARY})
SET(GOOGLE_PERFTOOLS_FOUND "YES")
ELSE (TCMALLOC_LIBRARY AND GOOGLE_PERFTOOLS_INCLUDE_DIR)
SET(GOOGLE_PERFTOOLS_FOUND "NO")
ENDIF (TCMALLOC_LIBRARY AND GOOGLE_PERFTOOLS_INCLUDE_DIR)
SET(STACKTRACE_NAMES ${STACKTRACE_NAMES} stacktrace)
FIND_LIBRARY(STACKTRACE_LIBRARY
NAMES ${STACKTRACE_LIBRARY}
PATHS /usr/lib /usr/local/lib
)
IF (STACKTRACE_LIBRARY AND GOOGLE_PERFTOOLS_INCLUDE_DIR)
SET(STACKTRACE_LIBRARIES ${STACKTRACE_LIBRARY})
ENDIF (STACKTRACE_LIBRARY AND GOOGLE_PERFTOOLS_INCLUDE_DIR)
SET(PROFILER_NAMES ${PROFILER_NAMES} profiler)
FIND_LIBRARY(PROFILER_LIBRARY
NAMES ${PROFILER_LIBRARY}
PATHS /usr/lib /usr/local/lib
)
IF (PROFILER_LIBRARY AND GOOGLE_PERFTOOLS_INCLUDE_DIR)
SET(PROFILER_LIBRARIES ${PROFILER_LIBRARY})
ENDIF (PROFILER_LIBRARY AND GOOGLE_PERFTOOLS_INCLUDE_DIR)
IF (GOOGLE_PERFTOOLS_FOUND)
IF (NOT GOOGLE_PERFTOOLS_FIND_QUIETLY)
MESSAGE(STATUS "Found Google perftools: ${GOOGLE_PERFTOOLS_LIBRARIES}")
ENDIF (NOT GOOGLE_PERFTOOLS_FIND_QUIETLY)
ELSE (GOOGLE_PERFTOOLS_FOUND)
IF (GOOGLE_PERFTOOLS_FIND_REQUIRED)
MESSAGE(FATAL_ERROR "Could not find Google perftools library")
ENDIF (GOOGLE_PERFTOOLS_FIND_REQUIRED)
ENDIF (GOOGLE_PERFTOOLS_FOUND)
MARK_AS_ADVANCED(
TCMALLOC_LIBRARY
STACKTRACE_LIBRARY
PROFILER_LIBRARY
GOOGLE_PERFTOOLS_INCLUDE_DIR
)

View File

@ -0,0 +1,101 @@
# - Try to find Graphviz cgraph library
# Once done this will define
#
# GRAPHVIZ_FOUND - system has Graphviz installed
# GRAPHVIZ_INCLUDE_DIR
# GRAPHVIZ_GVC_LIBRARY
# GRAPHVIZ_CGRAPH_LIBRARY
# GRAPHVIZ_CDT_LIBRARY
#
#
if ( GRAPHVIZ_CGRAPH_LIBRARY )
# in cache already
SET(Graphviz_FIND_QUIETLY TRUE)
endif ( GRAPHVIZ_CGRAPH_LIBRARY )
# use pkg-config to get the directories and then use these values
# in the FIND_PATH() and FIND_LIBRARY() calls
if( NOT WIN32 )
find_package(PkgConfig)
pkg_check_modules(GRAPHVIZ_GVC_PKG libgvc)
pkg_check_modules(GRAPHVIZ_CGRAPH_PKG libcgraph)
pkg_check_modules(GRAPHVIZ_CDT_PKG libcdt)
endif( NOT WIN32 )
FIND_LIBRARY(GRAPHVIZ_GVC_LIBRARY NAMES gvc libgvc
PATHS
/usr/lib
/usr/local/lib
HINTS
${GRAPHVIZ_GVC_PKG_LIBRARY_DIRS} # Generated by pkg-config
)
IF ( NOT(GRAPHVIZ_GVC_LIBRARY) )
# MESSAGE(STATUS "Could not find libgvc." )
SET(GRAPHVIZ_GVC_FOUND FALSE)
ELSE ()
SET(GRAPHVIZ_GVC_FOUND TRUE)
ENDIF ()
FIND_LIBRARY(GRAPHVIZ_CGRAPH_LIBRARY NAMES cgraph libcgraph
PATHS
/usr/lib
/usr/local/lib
HINTS
${GRAPHVIZ_CGRAPH_PKG_LIBRARY_DIRS} # Generated by pkg-config
)
IF ( NOT(GRAPHVIZ_CGRAPH_LIBRARY) )
# MESSAGE(STATUS "Could not find libcgraph." )
SET(GRAPHVIZ_CGRAPH_FOUND FALSE)
ELSE ()
SET(GRAPHVIZ_CGRAPH_FOUND TRUE)
ENDIF ()
FIND_LIBRARY(GRAPHVIZ_CDT_LIBRARY NAMES cdt libcdt
PATHS
/usr/lib
/usr/local/lib
HINTS
${GRAPHVIZ_CDT_PKG_LIBRARY_DIRS} # Generated by pkg-config
)
IF ( NOT(GRAPHVIZ_CDT_LIBRARY) )
# MESSAGE(STATUS "Could not find libcdt." )
SET(GRAPHVIZ_CDT_FOUND FALSE)
ELSE ()
SET(GRAPHVIZ_CDT_FOUND TRUE)
ENDIF ()
FIND_PATH(GRAPHVIZ_INCLUDE_DIR NAMES cgraph.h
PATHS
/usr/include/graphviz
/usr/local/include
HINTS
${GRAPHVIZ_GVC_PKG_INCLUDE_DIRS} # Generated by pkg-config
)
IF ( NOT(GRAPHVIZ_INCLUDE_DIR) )
MESSAGE(STATUS "Could not find graphviz headers." )
ENDIF ()
include(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(Graphviz_Gvc DEFAULT_MSG GRAPHVIZ_GVC_LIBRARY )
FIND_PACKAGE_HANDLE_STANDARD_ARGS(Graphviz_Cgraph DEFAULT_MSG GRAPHVIZ_CGRAPH_LIBRARY )
FIND_PACKAGE_HANDLE_STANDARD_ARGS(Graphviz_Cdt DEFAULT_MSG GRAPHVIZ_CDT_LIBRARY )
FIND_PACKAGE_HANDLE_STANDARD_ARGS("Graphviz Headers" DEFAULT_MSG GRAPHVIZ_INCLUDE_DIR )
# show the POPPLER_(XPDF/QT4)_INCLUDE_DIR and POPPLER_LIBRARIES variables only in the advanced view
MARK_AS_ADVANCED(GRAPHVIZ_INCLUDE_DIR GRAPHVIZ_GVC_LIBRARY GRAPHVIZ_CGRAPH_LIBRARY GRAPHVIZ_CDT_LIBRARY)
if (GRAPHVIZ_INCLUDE_DIR AND
GRAPHVIZ_CDT_LIBRARY AND
GRAPHVIZ_CGRAPH_LIBRARY AND
GRAPHVIZ_GVC_LIBRARY)
set(GRAPHVIZ_FOUND TRUE)
set(GRAPHVIZ_LIBRARIES
${GRAPHVIZ_CDT_LIBRARY}
${GRAPHVIZ_CGRAPH_LIBRARY}
${GRAPHVIZ_GVC_LIBRARY})
else ()
set(GRAPHVIZ_FOUND FALSE)
endif ()

View File

@ -0,0 +1,55 @@
# Check the OS type.
# CMake does not distinguish Linux from other Unices.
STRING (REGEX MATCH "Linux" PLAYER_OS_LINUX ${CMAKE_SYSTEM_NAME})
# Nor *BSD
STRING (REGEX MATCH "BSD" PLAYER_OS_BSD ${CMAKE_SYSTEM_NAME})
# Or Solaris. I'm seeing a trend, here
STRING (REGEX MATCH "SunOS" PLAYER_OS_SOLARIS ${CMAKE_SYSTEM_NAME})
# Windows is easy (for once)
IF (WIN32)
SET (PLAYER_OS_WIN TRUE BOOL INTERNAL)
ENDIF (WIN32)
# Check if it's an Apple OS
IF (APPLE)
# Check if it's OS X or another MacOS (that's got to be pretty unlikely)
STRING (REGEX MATCH "Darwin" PLAYER_OS_OSX ${CMAKE_SYSTEM_NAME})
IF (NOT PLAYER_OS_OSX)
SET (PLAYER_OS_MACOS TRUE BOOL INTERNAL)
ENDIF (NOT PLAYER_OS_OSX)
ENDIF (APPLE)
# QNX
IF (QNXNTO)
SET (PLAYER_OS_QNX TRUE BOOL INTERNAL)
ENDIF (QNXNTO)
IF (PLAYER_OS_LINUX)
MESSAGE (STATUS "Operating system is Linux")
ELSEIF (PLAYER_OS_BSD)
MESSAGE (STATUS "Operating system is BSD")
ELSEIF (PLAYER_OS_WIN)
MESSAGE (STATUS "Operating system is Windows")
ELSEIF (PLAYER_OS_OSX)
MESSAGE (STATUS "Operating system is Apple MacOS X")
ELSEIF (PLAYER_OS_MACOS)
MESSAGE (STATUS "Operating system is Apple MacOS (not OS X)")
ELSEIF (PLAYER_OS_QNX)
MESSAGE (STATUS "Operating system is QNX")
ELSEIF (PLAYER_OS_SOLARIS)
MESSAGE (STATUS "Operating system is Solaris")
ELSE (PLAYER_OS_LINUX)
MESSAGE (STATUS "Operating system is generic Unix")
ENDIF (PLAYER_OS_LINUX)
#################################################
# Check for non-case-sensitive filesystems
execute_process(COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/tools/case_sensitive_filesystem
RESULT_VARIABLE FILESYSTEM_CASE_SENSITIVE_RETURN)
if (${FILESYSTEM_CASE_SENSITIVE_RETURN} EQUAL 0)
set(FILESYSTEM_CASE_SENSITIVE TRUE)
else()
set(FILESYSTEM_CASE_SENSITIVE FALSE)
endif()

View File

@ -0,0 +1,113 @@
# Check if SSE instructions are available on the machine where
# the project is compiled.
IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
EXEC_PROGRAM(cat ARGS "/proc/cpuinfo" OUTPUT_VARIABLE CPUINFO)
STRING(REGEX REPLACE "^.*(sse2).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "sse2" "${SSE_THERE}" SSE2_TRUE)
IF (SSE2_TRUE)
set(SSE2_FOUND true CACHE BOOL "SSE2 available on host")
ELSE (SSE2_TRUE)
set(SSE2_FOUND false CACHE BOOL "SSE2 available on host")
ENDIF (SSE2_TRUE)
# /proc/cpuinfo apparently omits sse3 :(
STRING(REGEX REPLACE "^.*[^s](sse3).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "sse3" "${SSE_THERE}" SSE3_TRUE)
IF (NOT SSE3_TRUE)
STRING(REGEX REPLACE "^.*(T2300).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "T2300" "${SSE_THERE}" SSE3_TRUE)
ENDIF (NOT SSE3_TRUE)
STRING(REGEX REPLACE "^.*(ssse3).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "ssse3" "${SSE_THERE}" SSSE3_TRUE)
IF (SSE3_TRUE OR SSSE3_TRUE)
set(SSE3_FOUND true CACHE BOOL "SSE3 available on host")
ELSE (SSE3_TRUE OR SSSE3_TRUE)
set(SSE3_FOUND false CACHE BOOL "SSE3 available on host")
ENDIF (SSE3_TRUE OR SSSE3_TRUE)
IF (SSSE3_TRUE)
set(SSSE3_FOUND true CACHE BOOL "SSSE3 available on host")
ELSE (SSSE3_TRUE)
set(SSSE3_FOUND false CACHE BOOL "SSSE3 available on host")
ENDIF (SSSE3_TRUE)
STRING(REGEX REPLACE "^.*(sse4_1).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "sse4_1" "${SSE_THERE}" SSE41_TRUE)
IF (SSE41_TRUE)
set(SSE4_1_FOUND true CACHE BOOL "SSE4.1 available on host")
ELSE (SSE41_TRUE)
set(SSE4_1_FOUND false CACHE BOOL "SSE4.1 available on host")
ENDIF (SSE41_TRUE)
STRING(REGEX REPLACE "^.*(sse4_2).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "sse4_2" "${SSE_THERE}" SSE42_TRUE)
IF (SSE42_TRUE)
set(SSE4_2_FOUND true CACHE BOOL "SSE4.2 available on host")
ELSE (SSE42_TRUE)
set(SSE4_2_FOUND false CACHE BOOL "SSE4.2 available on host")
ENDIF (SSE42_TRUE)
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Darwin")
EXEC_PROGRAM("/usr/sbin/sysctl -n machdep.cpu.features" OUTPUT_VARIABLE
CPUINFO)
STRING(REGEX REPLACE "^.*[^S](SSE2).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "SSE2" "${SSE_THERE}" SSE2_TRUE)
IF (SSE2_TRUE)
set(SSE2_FOUND true CACHE BOOL "SSE2 available on host")
ELSE (SSE2_TRUE)
set(SSE2_FOUND false CACHE BOOL "SSE2 available on host")
ENDIF (SSE2_TRUE)
STRING(REGEX REPLACE "^.*[^S](SSE3).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "SSE3" "${SSE_THERE}" SSE3_TRUE)
IF (SSE3_TRUE)
set(SSE3_FOUND true CACHE BOOL "SSE3 available on host")
ELSE (SSE3_TRUE)
set(SSE3_FOUND false CACHE BOOL "SSE3 available on host")
ENDIF (SSE3_TRUE)
STRING(REGEX REPLACE "^.*(SSSE3).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "SSSE3" "${SSE_THERE}" SSSE3_TRUE)
IF (SSSE3_TRUE)
set(SSSE3_FOUND true CACHE BOOL "SSSE3 available on host")
ELSE (SSSE3_TRUE)
set(SSSE3_FOUND false CACHE BOOL "SSSE3 available on host")
ENDIF (SSSE3_TRUE)
STRING(REGEX REPLACE "^.*(SSE4.1).*$" "\\1" SSE_THERE ${CPUINFO})
STRING(COMPARE EQUAL "SSE4.1" "${SSE_THERE}" SSE41_TRUE)
IF (SSE41_TRUE)
set(SSE4_1_FOUND true CACHE BOOL "SSE4.1 available on host")
ELSE (SSE41_TRUE)
set(SSE4_1_FOUND false CACHE BOOL "SSE4.1 available on host")
ENDIF (SSE41_TRUE)
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "Windows")
# TODO
set(SSE2_FOUND false CACHE BOOL "SSE2 available on host")
set(SSE3_FOUND false CACHE BOOL "SSE3 available on host")
set(SSSE3_FOUND false CACHE BOOL "SSSE3 available on host")
set(SSE4_1_FOUND false CACHE BOOL "SSE4.1 available on host")
ELSE(CMAKE_SYSTEM_NAME MATCHES "Linux")
set(SSE2_FOUND true CACHE BOOL "SSE2 available on host")
set(SSE3_FOUND false CACHE BOOL "SSE3 available on host")
set(SSSE3_FOUND false CACHE BOOL "SSSE3 available on host")
set(SSE4_1_FOUND false CACHE BOOL "SSE4.1 available on host")
ENDIF(CMAKE_SYSTEM_NAME MATCHES "Linux")
if(NOT SSE2_FOUND)
MESSAGE(STATUS "Could not find hardware support for SSE2 on this machine.")
endif(NOT SSE2_FOUND)
if(NOT SSE3_FOUND)
MESSAGE(STATUS "Could not find hardware support for SSE3 on this machine.")
endif(NOT SSE3_FOUND)
if(NOT SSSE3_FOUND)
MESSAGE(STATUS "Could not find hardware support for SSSE3 on this machine.")
endif(NOT SSSE3_FOUND)
if(NOT SSE4_1_FOUND)
MESSAGE(STATUS "Could not find hardware support for SSE4.1 on this machine.")
endif(NOT SSE4_1_FOUND)
mark_as_advanced(SSE2_FOUND SSE3_FOUND SSSE3_FOUND SSE4_1_FOUND)

View File

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

View File

@ -0,0 +1,147 @@
#################################################
# VAR: GZ_BUILD_TESTS_EXTRA_EXE_SRCS
# Hack: extra sources to build binaries can be supplied to gz_build_tests in
# the variable GZ_BUILD_TESTS_EXTRA_EXE_SRCS. This variable will be clean up
# at the end of the function
#
# ARG: EXTRA_LIBS
# List extra libraries that the sources should be linked against after the
# EXTRA_LIBS tag. Example:
# gz_build_tests(${test_sources} EXTRA_LIBS ${test_libraries})
#
macro (gz_build_tests)
set(_append_sources TRUE)
set(_sources)
set(_extra_libs)
foreach(arg ${ARGN})
if ("${arg}" STREQUAL "EXTRA_LIBS")
set(_append_sources FALSE)
else()
if (_append_sources)
list(APPEND _sources ${arg})
else()
list(APPEND _extra_libs ${arg})
endif()
endif()
endforeach()
# Build all the tests
foreach(GTEST_SOURCE_file ${_sources})
string(REGEX REPLACE "\\.cc" "" BINARY_NAME ${GTEST_SOURCE_file})
set(BINARY_NAME ${TEST_TYPE}_${BINARY_NAME})
if(USE_LOW_MEMORY_TESTS)
add_definitions(-DUSE_LOW_MEMORY_TESTS=1)
endif(USE_LOW_MEMORY_TESTS)
add_executable(${BINARY_NAME} ${GTEST_SOURCE_file}
${GZ_BUILD_TESTS_EXTRA_EXE_SRCS})
link_directories(${PROJECT_BINARY_DIR}/test)
target_link_libraries(${BINARY_NAME}
gtest
gtest_main
${_extra_libs}
)
if (UNIX)
# gtest uses pthread on UNIX
target_link_libraries(${BINARY_NAME} pthread)
endif()
add_test(${BINARY_NAME} ${CMAKE_CURRENT_BINARY_DIR}/${BINARY_NAME}
--gtest_output=xml:${CMAKE_BINARY_DIR}/test_results/${BINARY_NAME}.xml)
set(_env_vars)
list(APPEND _env_vars "CMAKE_PREFIX_PATH=${CMAKE_BINARY_DIR}:${CMAKE_PREFIX_PATH}")
list(APPEND _env_vars "GAZEBO_PLUGIN_PATH=${CMAKE_BINARY_DIR}/plugins:${CMAKE_BINARY_DIR}/plugins/events:${CMAKE_BINARY_DIR}/plugins/rest_web")
list(APPEND _env_vars "GAZEBO_RESOURCE_PATH=${CMAKE_SOURCE_DIR}")
list(APPEND _env_vars "PATH=${CMAKE_BINARY_DIR}/gazebo:${CMAKE_BINARY_DIR}/tools:$ENV{PATH}")
list(APPEND _env_vars "PKG_CONFIG_PATH=${CMAKE_BINARY_DIR}/cmake/pkgconfig:$PKG_CONFIG_PATH")
set_tests_properties(${BINARY_NAME} PROPERTIES
TIMEOUT 240
ENVIRONMENT "${_env_vars}")
# Check that the test produced a result and create a failure if it didn't.
# Guards against crashed and timed out tests.
add_test(check_${BINARY_NAME} ${PROJECT_SOURCE_DIR}/tools/check_test_ran.py
${CMAKE_BINARY_DIR}/test_results/${BINARY_NAME}.xml)
if(GAZEBO_RUN_VALGRIND_TESTS AND VALGRIND_PROGRAM)
add_test(memcheck_${BINARY_NAME} ${VALGRIND_PROGRAM} --leak-check=full
--error-exitcode=1 --show-leak-kinds=all ${CMAKE_CURRENT_BINARY_DIR}/${BINARY_NAME})
endif()
endforeach()
set(GZ_BUILD_TESTS_EXTRA_EXE_SRCS "")
endmacro()
if (VALID_DISPLAY)
# Redefine build display tests
macro (gz_build_display_tests)
gz_build_tests(${ARGV})
endmacro()
# Redefine build qt tests
macro (gz_build_qt_tests)
# Build all the tests
foreach(QTEST_SOURCE_file ${ARGN})
string(REGEX REPLACE ".cc" "" BINARY_NAME ${QTEST_SOURCE_file})
string(REGEX REPLACE ".cc" ".hh" QTEST_HEADER_file ${QTEST_SOURCE_file})
set(BINARY_NAME ${TEST_TYPE}_${BINARY_NAME})
QT4_WRAP_CPP(${BINARY_NAME}_MOC ${QTEST_HEADER_file} ${CMAKE_SOURCE_DIR}/gazebo/gui/QTestFixture.hh)
add_executable(${BINARY_NAME}
${${BINARY_NAME}_MOC} ${QTEST_SOURCE_file} ${CMAKE_SOURCE_DIR}/gazebo/gui/QTestFixture.cc)
add_dependencies(${BINARY_NAME}
gazebo_gui
gazebo_common
gazebo_math
gazebo_physics
gazebo_sensors
gazebo_rendering
gazebo_msgs
gazebo_transport
)
target_link_libraries(${BINARY_NAME}
# gazebo_gui and libgazebo will bring all most of gazebo
# libraries as dependencies
libgazebo
gazebo_gui
${QT_QTTEST_LIBRARY}
${QT_LIBRARIES}
)
# QTest need and extra -o parameter to write logging information to a file
add_test(${BINARY_NAME} ${CMAKE_CURRENT_BINARY_DIR}/${BINARY_NAME}
-xml -o ${CMAKE_BINARY_DIR}/test_results/${BINARY_NAME}.xml)
set(_env_vars)
list(APPEND _env_vars "CMAKE_PREFIX_PATH=${CMAKE_BINARY_DIR}:${CMAKE_PREFIX_PATH}")
list(APPEND _env_vars "GAZEBO_PLUGIN_PATH=${CMAKE_BINARY_DIR}/plugins:${CMAKE_BINARY_DIR}/plugins/events:${CMAKE_BINARY_DIR}/plugins/rest_web")
list(APPEND _env_vars "GAZEBO_RESOURCE_PATH=${CMAKE_SOURCE_DIR}")
list(APPEND _env_vars "PATH=${CMAKE_BINARY_DIR}/gazebo:${CMAKE_BINARY_DIR}/tools:$ENV{PATH}")
list(APPEND _env_vars "PKG_CONFIG_PATH=${CMAKE_BINARY_DIR}/cmake/pkgconfig:$PKG_CONFIG_PATH")
set_tests_properties(${BINARY_NAME} PROPERTIES
TIMEOUT 240
ENVIRONMENT "${_env_vars}")
# Check that the test produced a result and create a failure if it didn't.
# Guards against crashed and timed out tests.
add_test(check_${BINARY_NAME} ${PROJECT_SOURCE_DIR}/tools/check_test_ran.py
${CMAKE_BINARY_DIR}/test_results/${BINARY_NAME}.xml)
if(GAZEBO_RUN_VALGRIND_TESTS AND VALGRIND_PROGRAM)
add_test(memcheck_${BINARY_NAME} ${VALGRIND_PROGRAM} --leak-check=full
--error-exitcode=1 --show-leak-kinds=all ${CMAKE_CURRENT_BINARY_DIR}/${BINARY_NAME})
endif()
endforeach()
endmacro()
endif()
if (VALID_DRI_DISPLAY)
macro (gz_build_dri_tests)
gz_build_tests(${ARGV})
endmacro()
endif()

View File

@ -0,0 +1,238 @@
################################################################################
#APPEND_TO_CACHED_STRING(_string _cacheDesc [items...])
# Appends items to a cached list.
MACRO (APPEND_TO_CACHED_STRING _string _cacheDesc)
FOREACH (newItem ${ARGN})
SET (${_string} "${${_string}} ${newItem}" CACHE INTERNAL ${_cacheDesc} FORCE)
ENDFOREACH (newItem ${ARGN})
#STRING(STRIP ${${_string}} ${_string})
ENDMACRO (APPEND_TO_CACHED_STRING)
################################################################################
# APPEND_TO_CACHED_LIST (_list _cacheDesc [items...]
# Appends items to a cached list.
MACRO (APPEND_TO_CACHED_LIST _list _cacheDesc)
SET (tempList ${${_list}})
FOREACH (newItem ${ARGN})
LIST (APPEND tempList ${newItem})
ENDFOREACH (newItem ${newItem})
SET (${_list} ${tempList} CACHE INTERNAL ${_cacheDesc} FORCE)
ENDMACRO(APPEND_TO_CACHED_LIST)
###############################################################################
# Append sources to the server sources list
MACRO (APPEND_TO_SERVER_SOURCES)
FOREACH (src ${ARGN})
APPEND_TO_CACHED_LIST(gazeboserver_sources
${gazeboserver_sources_desc}
${CMAKE_CURRENT_SOURCE_DIR}/${src})
ENDFOREACH (src ${ARGN})
ENDMACRO (APPEND_TO_SERVER_SOURCES)
###############################################################################
# Append headers to the server headers list
MACRO (APPEND_TO_SERVER_HEADERS)
FOREACH (src ${ARGN})
APPEND_TO_CACHED_LIST(gazeboserver_headers
${gazeboserver_headers_desc}
${CMAKE_CURRENT_SOURCE_DIR}/${src})
APPEND_TO_CACHED_LIST(gazeboserver_headers_nopath
"gazeboserver_headers_nopath"
${src})
ENDFOREACH (src ${ARGN})
ENDMACRO (APPEND_TO_SERVER_HEADERS)
###############################################################################
# Append sources to the sensor sources list
MACRO (APPEND_TO_SENSOR_SOURCES)
FOREACH (src ${ARGN})
APPEND_TO_CACHED_LIST(gazebosensor_sources
${gazebosensor_sources_desc}
${CMAKE_CURRENT_SOURCE_DIR}/${src})
ENDFOREACH (src ${ARGN})
ENDMACRO (APPEND_TO_SENSOR_SOURCES)
###############################################################################
# Append sources to the controller sources list
MACRO (APPEND_TO_CONTROLLER_SOURCES)
FOREACH (src ${ARGN})
APPEND_TO_CACHED_LIST(gazebocontroller_sources
${gazebocontroller_sources_desc}
${CMAKE_CURRENT_SOURCE_DIR}/${src})
ENDFOREACH (src ${ARGN})
ENDMACRO (APPEND_TO_CONTROLLER_SOURCES)
#################################################
# Macro to turn a list into a string (why doesn't CMake have this built-in?)
MACRO (LIST_TO_STRING _string _list)
SET (${_string})
FOREACH (_item ${_list})
SET (${_string} "${${_string}} ${_item}")
ENDFOREACH (_item)
#STRING(STRIP ${${_string}} ${_string})
ENDMACRO (LIST_TO_STRING)
#################################################
# BUILD ERROR macro
macro (BUILD_ERROR)
foreach (str ${ARGN})
SET (msg "\t${str}")
MESSAGE (STATUS ${msg})
APPEND_TO_CACHED_LIST(build_errors "build errors" ${msg})
endforeach ()
endmacro (BUILD_ERROR)
#################################################
# BUILD WARNING macro
macro (BUILD_WARNING)
foreach (str ${ARGN})
SET (msg "\t${str}" )
MESSAGE (STATUS ${msg} )
APPEND_TO_CACHED_LIST(build_warnings "build warning" ${msg})
endforeach (str ${ARGN})
endmacro (BUILD_WARNING)
#################################################
macro (gz_add_library _name)
# Not defining STATIC or SHARED will use BUILD_SHARED_LIBS variable
add_library(${_name} ${ARGN})
target_link_libraries (${_name} ${general_libraries})
endmacro ()
#################################################
macro (gz_add_executable _name)
add_executable(${_name} ${ARGN})
target_link_libraries (${_name} ${general_libraries})
endmacro ()
#################################################
macro (gz_install_includes _subdir)
install(FILES ${ARGN} DESTINATION ${INCLUDE_INSTALL_DIR}/${_subdir} COMPONENT headers)
endmacro()
#################################################
macro (gz_install_library _name)
set_target_properties(${_name} PROPERTIES SOVERSION ${GAZEBO_MAJOR_VERSION} VERSION ${GAZEBO_VERSION_FULL})
install (TARGETS ${_name} DESTINATION ${LIB_INSTALL_DIR} COMPONENT shlib)
endmacro ()
#################################################
macro (gz_install_executable _name)
set_target_properties(${_name} PROPERTIES VERSION ${GAZEBO_VERSION_FULL})
install (TARGETS ${_name} DESTINATION ${BIN_INSTALL_DIR})
endmacro ()
#################################################
macro (gz_setup_unix)
# Using dynamic linking in UNIX by default
set(BUILD_SHARED_LIBS TRUE)
endmacro()
#################################################
macro (gz_setup_windows)
# Using static linking in Windows by default
set(BUILD_SHARED_LIBS FALSE)
add_definitions(-DBUILDING_STATIC_LIBS -DWIN32_LEAN_AND_MEAN)
# Need for M_PI constant
add_definitions(-D_USE_MATH_DEFINES)
# Don't pull in the Windows min/max macros
add_definitions(-DNOMINMAX)
#use static libraries for FREEIMAGE
add_definitions(-DFREEIMAGE_LIB)
# Use dynamic linking for boost
add_definitions(-DBOOST_ALL_DYN_LINK)
# And force linking to MSVC dynamic runtime
if ("${CMAKE_BUILD_TYPE_UPPERCASE}" STREQUAL "DEBUG")
add_definitions("/MDd")
else()
add_definitions("/MD")
endif()
# And we want exceptions
add_definitions("/EHsc")
if (MSVC AND CMAKE_SIZEOF_VOID_P EQUAL 8)
# Not need if proper cmake gnerator (-G "...Win64") is passed to cmake
# Enable as a second measure to workaround over bug
# http://www.cmake.org/Bug/print_bug_page.php?bug_id=11240
set(CMAKE_SHARED_LINKER_FLAGS "/machine:x64")
endif()
endmacro()
#################################################
macro (gz_setup_apple)
# NOTE MacOSX provides different system versions than CMake is parsing.
# The following table lists the most recent OSX versions
# 9.x.x = Mac OSX Leopard (10.5)
# 10.x.x = Mac OSX Snow Leopard (10.6)
# 11.x.x = Mac OSX Lion (10.7)
# 12.x.x = Mac OSX Mountain Lion (10.8)
if (${CMAKE_SYSTEM_VERSION} LESS 10)
add_definitions(-DMAC_OS_X_VERSION=1050)
elseif (${CMAKE_SYSTEM_VERSION} GREATER 10 AND ${CMAKE_SYSTEM_VERSION} LESS 11)
add_definitions(-DMAC_OS_X_VERSION=1060)
elseif (${CMAKE_SYSTEM_VERSION} GREATER 11 AND ${CMAKE_SYSTEM_VERSION} LESS 12)
add_definitions(-DMAC_OS_X_VERSION=1070)
elseif (${CMAKE_SYSTEM_VERSION} GREATER 12 OR ${CMAKE_SYSTEM_VERSION} EQUAL 12)
add_definitions(-DMAC_OS_X_VERSION=1080)
else ()
add_definitions(-DMAC_OS_X_VERSION=0)
endif ()
# libstdc++ used on 10.8 and earlier
# libc++ after that
if (${CMAKE_SYSTEM_VERSION} LESS 13)
set (APPLE_PKGCONFIG_LIBS "${APPLE_PKGCONFIG_LIBS} -lstdc++")
else()
set (APPLE_PKGCONFIG_LIBS "${APPLE_PKGCONFIG_LIBS} -lc++")
endif()
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-undefined -Wl,dynamic_lookup")
endmacro()
# This should be migrated to more fine control solution based on set_property APPEND
# directories. It's present on cmake 2.8.8 while precise version is 2.8.7
link_directories(${PROJECT_BINARY_DIR}/test)
include_directories("${PROJECT_SOURCE_DIR}/test/gtest/include")
#################################################
# Enable tests compilation by default
if (NOT DEFINED ENABLE_TESTS_COMPILATION)
set (ENABLE_TESTS_COMPILATION True)
endif()
# Define testing macros as empty and redefine them if support is found and
# ENABLE_TESTS_COMPILATION is set to true
macro (gz_build_tests)
endmacro()
macro (gz_build_qt_tests)
endmacro()
macro (gz_build_display_tests)
endmacro()
macro (gz_build_dri_tests)
endmacro()
if (ENABLE_TESTS_COMPILATION)
include (${gazebo_cmake_dir}/GazeboTestUtils.cmake)
endif()
#################################################
# Macro to setup supported compiler flags
# Based on work of Florent Lamiraux, Thomas Moulard, JRL, CNRS/AIST.
include(CheckCXXCompilerFlag)
macro(filter_valid_compiler_flags)
foreach(flag ${ARGN})
CHECK_CXX_COMPILER_FLAG(${flag} R${flag})
if(${R${flag}})
set(VALID_CXX_FLAGS "${VALID_CXX_FLAGS} ${flag}")
endif()
endforeach()
endmacro()

View File

@ -0,0 +1,28 @@
include (${gazebo_cmake_dir}/FindSSE.cmake)
if (SSE2_FOUND)
set (CMAKE_C_FLAGS_ALL "-msse -msse2 ${CMAKE_C_FLAGS_ALL}")
if (NOT APPLE)
set (CMAKE_C_FLAGS_ALL "-mfpmath=sse ${CMAKE_C_FLAGS_ALL}")
endif()
endif()
if (SSE3_FOUND)
set (CMAKE_C_FLAGS_ALL "-msse3 ${CMAKE_C_FLAGS_ALL}")
endif()
if (SSSE3_FOUND)
set (CMAKE_C_FLAGS_ALL "-mssse3 ${CMAKE_C_FLAGS_ALL}")
endif()
if (ENABLE_SSE4)
message(STATUS "\nSSE4 will be enabled if system supports it.\n")
if (SSE4_1_FOUND)
set (CMAKE_C_FLAGS_ALL "-msse4.1 ${CMAKE_C_FLAGS_ALL}")
endif()
if (SSE4_2_FOUND)
set (CMAKE_C_FLAGS_ALL "-msse4.2 ${CMAKE_C_FLAGS_ALL}")
endif()
else()
message(STATUS "\nSSE4 disabled.\n")
endif()

View File

@ -0,0 +1,34 @@
macro(ADD_MANPAGE_TARGET)
# It is not possible add a dependency to target 'install'
# Run hard-coded 'make man' when 'make install' is invoked
install(CODE "EXECUTE_PROCESS(COMMAND make man)")
add_custom_target(man)
endmacro(ADD_MANPAGE_TARGET)
find_program(GZIP gzip)
if (NOT GZIP)
BUILD_WARNING ("gzip not found, manpages won't be generated")
macro(roffman MANFILE)
endmacro(roffman)
else (NOT GZIP)
message (STATUS "Looking for gzip to generate manpages - found")
macro(roffman _source _section)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${_source}.${_section}.gz
COMMAND ${GZIP} -c ${CMAKE_CURRENT_SOURCE_DIR}/${_source}.${_section}.roff
> ${CMAKE_CURRENT_BINARY_DIR}/${_source}.${_section}.gz
)
set(MANPAGE_TARGET "man-${_source}")
add_custom_target(${MANPAGE_TARGET}
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${_source}.${_section}.gz)
add_dependencies(man ${MANPAGE_TARGET})
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/${_source}.${_section}.gz
DESTINATION share/man/man${_section}
)
endmacro(roffman _source _section)
endif(NOT GZIP)

View File

@ -0,0 +1,52 @@
#
# Based on work of Emmanuel Roullit <emmanuel@netsniff-ng.org>
# Copyright 2009, 2012 Emmanuel Roullit.
# Subject to the GPL, version 2.
#
FIND_PROGRAM(RONN ronn)
FIND_PROGRAM(GZIP gzip)
IF (NOT RONN OR NOT GZIP)
IF (NOT RONN)
BUILD_WARNING ("ronn not found, manpages won't be generated")
ENDIF(NOT RONN)
IF (NOT GZIP)
BUILD_WARNING ("gzip not found, manpages won't be generated")
ENDIF(NOT GZIP)
# empty macro
MACRO(manpage MANFILE)
ENDMACRO(manpage)
SET (MANPAGES_SUPPORT FALSE)
ELSE (NOT RONN OR NOT GZIP)
MESSAGE (STATUS "Looking for ronn to generate manpages - found")
SET (MANPAGES_SUPPORT TRUE)
MACRO(manpage RONNFILE SECTION)
SET(RONNFILE_FULL_PATH ${CMAKE_CURRENT_SOURCE_DIR}/${RONNFILE})
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${RONNFILE}.${SECTION}
DEPENDS ${RONNFILE}
COMMAND ${RONN}
ARGS -r --pipe ${RONNFILE_FULL_PATH}.${SECTION}.ronn
> ${CMAKE_CURRENT_BINARY_DIR}/${RONNFILE}.${SECTION}
)
ADD_CUSTOM_COMMAND(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${RONNFILE}.${SECTION}.gz
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${RONNFILE}.${SECTION}
COMMAND ${GZIP} -c ${CMAKE_CURRENT_BINARY_DIR}/${RONNFILE}.${SECTION}
> ${CMAKE_CURRENT_BINARY_DIR}/${RONNFILE}.${SECTION}.gz
)
SET(MANPAGE_TARGET "man-${RONNFILE}")
ADD_CUSTOM_TARGET(${MANPAGE_TARGET} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${RONNFILE}.${SECTION}.gz)
ADD_DEPENDENCIES(man ${MANPAGE_TARGET})
INSTALL(
FILES ${CMAKE_CURRENT_BINARY_DIR}/${RONNFILE}.${SECTION}.gz
DESTINATION share/man/man${SECTION}
)
ENDMACRO(manpage RONNFILE SECTION)
ENDIF(NOT RONN OR NOT GZIP)

View File

@ -0,0 +1,731 @@
include (${gazebo_cmake_dir}/GazeboUtils.cmake)
include (CheckCXXSourceCompiles)
include (${gazebo_cmake_dir}/FindOS.cmake)
include (FindPkgConfig)
include (${gazebo_cmake_dir}/FindFreeimage.cmake)
execute_process(COMMAND pkg-config --modversion protobuf
OUTPUT_VARIABLE PROTOBUF_VERSION
RESULT_VARIABLE protobuf_modversion_failed)
########################################
# 1. can not use BUILD_TYPE_PROFILE is defined after include this module
# 2. TODO: TOUPPER is a hack until we fix the build system to support standard build names
if (CMAKE_BUILD_TYPE)
string(TOUPPER ${CMAKE_BUILD_TYPE} TMP_CMAKE_BUILD_TYPE)
if ("${TMP_CMAKE_BUILD_TYPE}" STREQUAL "PROFILE")
include (${gazebo_cmake_dir}/FindGooglePerfTools.cmake)
if (GOOGLE_PERFTOOLS_FOUND)
message(STATUS "Include google-perftools")
else()
BUILD_ERROR("Need google/heap-profiler.h (libgoogle-perftools-dev) tools to compile in Profile mode")
endif()
endif()
endif()
########################################
if (PROTOBUF_VERSION LESS 2.3.0)
BUILD_ERROR("Incorrect version: Gazebo requires protobuf version 2.3.0 or greater")
endif()
########################################
# The Google Protobuf library for message generation + serialization
find_package(Protobuf REQUIRED)
if (NOT PROTOBUF_FOUND)
BUILD_ERROR ("Missing: Google Protobuf (libprotobuf-dev)")
endif()
if (NOT PROTOBUF_PROTOC_EXECUTABLE)
BUILD_ERROR ("Missing: Google Protobuf Compiler (protobuf-compiler)")
endif()
if (NOT PROTOBUF_PROTOC_LIBRARY)
BUILD_ERROR ("Missing: Google Protobuf Compiler Library (libprotoc-dev)")
endif()
if ("${CMAKE_BUILD_TYPE}" STREQUAL "Debug")
set (GZ_PROTOBUF_LIBRARY ${PROTOBUF_LIBRARY_DEBUG})
set (GZ_PROTOBUF_PROTOC_LIBRARY ${PROTOBUF_PROTOC_LIBRARY_DEBUG})
else()
set (GZ_PROTOBUF_LIBRARY ${PROTOBUF_LIBRARY})
set (GZ_PROTOBUF_PROTOC_LIBRARY ${PROTOBUF_PROTOC_LIBRARY})
endif()
########################################
include (FindOpenGL)
if (NOT OPENGL_FOUND)
BUILD_ERROR ("Missing: OpenGL")
set (HAVE_OPENGL FALSE)
else ()
if (OPENGL_INCLUDE_DIR)
APPEND_TO_CACHED_LIST(gazeboserver_include_dirs
${gazeboserver_include_dirs_desc}
${OPENGL_INCLUDE_DIR})
set (HAVE_OPENGL TRUE)
add_definitions(-DHAVE_OPENGL)
endif()
if (OPENGL_LIBRARIES)
APPEND_TO_CACHED_LIST(gazeboserver_link_libs
${gazeboserver_link_libs_desc}
${OPENGL_LIBRARIES})
endif()
endif ()
########################################
include (FindOpenAL)
if (NOT OPENAL_FOUND)
BUILD_WARNING ("OpenAL not found, audio support will be disabled.")
set (HAVE_OPENAL OFF CACHE BOOL "HAVE OpenAL" FORCE)
else ()
set (HAVE_OPENAL ON CACHE BOOL "HAVE OpenAL" FORCE)
endif ()
########################################
find_package(HDF5 COMPONENTS C CXX)
if (NOT HDF5_FOUND)
BUILD_WARNING("HDF5 not found")
else ()
message(STATUS "HDF5 Found")
endif ()
########################################
# Find packages
# In Visual Studio we use configure.bat to trick all path cmake
# variables so let's consider that as a replacement for pkgconfig
if (MSVC)
set (PKG_CONFIG_FOUND TRUE)
endif()
if (PKG_CONFIG_FOUND)
pkg_check_modules(CURL libcurl)
if (NOT CURL_FOUND)
BUILD_ERROR ("Missing: libcurl. Required for connection to model database.")
endif()
pkg_check_modules(PROFILER libprofiler)
if (PROFILER_FOUND)
set (CMAKE_LINK_FLAGS_PROFILE "-Wl,--no-as-needed -lprofiler -Wl,--as-needed ${CMAKE_LINK_FLAGS_PROFILE}" CACHE INTERNAL "Link flags for profile")
else ()
find_library(PROFILER profiler)
if (PROFILER)
message (STATUS "Looking for libprofiler - found")
set (CMAKE_LINK_FLAGS_PROFILE "-Wl,--no-as-needed -lprofiler -Wl,--as-needed ${CMAKE_LINK_FLAGS_PROFILE}" CACHE INTERNAL "Link flags for profile")
else()
message (STATUS "Looking for libprofiler - not found")
endif()
endif()
pkg_check_modules(TCMALLOC libtcmalloc)
if (TCMALLOC_FOUND)
set (CMAKE_LINK_FLAGS_PROFILE "${CMAKE_LINK_FLAGS_PROFILE} -Wl,--no-as-needed -ltcmalloc -Wl,--no-as-needed"
CACHE INTERNAL "Link flags for profile" FORCE)
else ()
find_library(TCMALLOC tcmalloc)
if (TCMALLOC)
message (STATUS "Looking for libtcmalloc - found")
set (CMAKE_LINK_FLAGS_PROFILE "${CMAKE_LINK_FLAGS_PROFILE} -ltcmalloc"
CACHE INTERNAL "Link flags for profile" FORCE)
else ()
message (STATUS "Looking for libtcmalloc - not found")
endif()
endif ()
#################################################
# Find Simbody
set(SimTK_INSTALL_DIR ${SimTK_INSTALL_PREFIX})
#list(APPEND CMAKE_MODULE_PATH ${SimTK_INSTALL_PREFIX}/share/cmake)
find_package(Simbody)
if (Simbody_FOUND)
set (HAVE_SIMBODY TRUE)
else()
BUILD_WARNING ("Simbody not found, for simbody physics engine option, please install libsimbody-dev.")
set (HAVE_SIMBODY FALSE)
endif()
#################################################
# Find DART
find_package(DARTCore 4.3.3 QUIET)
if (DARTCore_FOUND)
message (STATUS "Looking for DARTCore - found")
set (HAVE_DART TRUE)
else()
message (STATUS "Looking for DARTCore - not found")
BUILD_WARNING ("DART not found, for dart physics engine option, please install libdart-core4-dev.")
set (HAVE_DART FALSE)
endif()
#################################################
# Find tinyxml. Only debian distributions package tinyxml with a pkg-config
# Use pkg_check_modules and fallback to manual detection
# (needed, at least, for MacOS)
# Use system installation on UNIX and Apple, and internal copy on Windows
if (UNIX OR APPLE)
message (STATUS "Using system tinyxml.")
set (USE_EXTERNAL_TINYXML True)
elseif(WIN32)
message (STATUS "Using internal tinyxml.")
set (USE_EXTERNAL_TINYXML False)
add_definitions(-DTIXML_USE_STL)
else()
message (STATUS "Unknown platform, unable to configure tinyxml.")
BUILD_ERROR("Unknown platform")
endif()
if (USE_EXTERNAL_TINYXML)
pkg_check_modules(tinyxml tinyxml)
if (NOT tinyxml_FOUND)
find_path (tinyxml_INCLUDE_DIRS tinyxml.h ${tinyxml_INCLUDE_DIRS} ENV CPATH)
find_library(tinyxml_LIBRARIES NAMES tinyxml)
set (tinyxml_FAIL False)
if (NOT tinyxml_INCLUDE_DIRS)
message (STATUS "Looking for tinyxml headers - not found")
set (tinyxml_FAIL True)
endif()
if (NOT tinyxml_LIBRARIES)
message (STATUS "Looking for tinyxml library - not found")
set (tinyxml_FAIL True)
endif()
endif()
if (tinyxml_FAIL)
message (STATUS "Looking for tinyxml.h - not found")
BUILD_ERROR("Missing: tinyxml")
endif()
else()
# Needed in WIN32 since in UNIX the flag is added in the code installed
message (STATUS "Skipping search for tinyxml")
set (tinyxml_INCLUDE_DIRS "")
set (tinyxml_LIBRARIES "")
set (tinyxml_LIBRARY_DIRS "")
endif()
#################################################
# Find tinyxml2. Only debian distributions package tinyxml with a pkg-config
# Use pkg_check_modules and fallback to manual detection
# (needed, at least, for MacOS)
# Use system installation on UNIX and Apple, and internal copy on Windows
if (UNIX OR APPLE)
message (STATUS "Using system tinyxml2.")
set (USE_EXTERNAL_TINYXML2 True)
elseif(WIN32)
message (STATUS "Using internal tinyxml2.")
set (USE_EXTERNAL_TINYXML2 False)
else()
message (STATUS "Unknown platform, unable to configure tinyxml2.")
BUILD_ERROR("Unknown platform")
endif()
if (USE_EXTERNAL_TINYXML2)
pkg_check_modules(tinyxml2 tinyxml2)
if (NOT tinyxml2_FOUND)
find_path (tinyxml2_INCLUDE_DIRS tinyxml2.h ${tinyxml2_INCLUDE_DIRS} ENV CPATH)
find_library(tinyxml2_LIBRARIES NAMES tinyxml2)
set (tinyxml2_FAIL False)
if (NOT tinyxml2_INCLUDE_DIRS)
message (STATUS "Looking for tinyxml2 headers - not found")
set (tinyxml2_FAIL True)
endif()
if (NOT tinyxml2_LIBRARIES)
message (STATUS "Looking for tinyxml2 library - not found")
set (tinyxml2_FAIL True)
endif()
if (NOT tinyxml2_LIBRARY_DIRS)
message (STATUS "Looking for tinyxml2 library dirs - not found")
set (tinyxml2_FAIL True)
endif()
endif()
if (tinyxml2_FAIL)
message (STATUS "Looking for tinyxml2.h - not found")
BUILD_ERROR("Missing: tinyxml2")
else()
include_directories(${tinyxml2_INCLUDE_DIRS})
link_directories(${tinyxml2_LIBRARY_DIRS})
endif()
else()
# Needed in WIN32 since in UNIX the flag is added in the code installed
message (STATUS "Skipping search for tinyxml2")
set (tinyxml2_INCLUDE_DIRS "")
set (tinyxml2_LIBRARIES "")
set (tinyxml2_LIBRARY_DIRS "")
endif()
if (NOT WIN32)
#################################################
# Find libtar.
find_path (libtar_INCLUDE_DIRS libtar.h)
find_library(libtar_LIBRARIES tar)
set (LIBTAR_FOUND True)
if (NOT libtar_INCLUDE_DIRS)
message (STATUS "Looking for libtar.h - not found")
set (LIBTAR_FOUND False)
else ()
message (STATUS "Looking for libtar.h - found")
include_directories(${libtar_INCLUDE_DIRS})
endif ()
if (NOT libtar_LIBRARIES)
message (STATUS "Looking for libtar.so - not found")
set (LIBTAR_FOUND False)
else ()
message (STATUS "Looking for libtar.so - found")
endif ()
if (NOT LIBTAR_FOUND)
BUILD_ERROR("Missing: libtar")
endif()
else()
set(libtar_LIBRARIES "")
endif()
#################################################
# Find TBB
pkg_check_modules(TBB tbb)
set (TBB_PKG_CONFIG "tbb")
if (NOT TBB_FOUND)
message(STATUS "TBB not found, attempting to detect manually")
set (TBB_PKG_CONFIG "")
find_library(tbb_library tbb ENV LD_LIBRARY_PATH)
if (tbb_library)
set(TBB_FOUND true)
set(TBB_LIBRARIES ${tbb_library})
else (tbb_library)
BUILD_ERROR ("Missing: TBB - Threading Building Blocks")
endif(tbb_library)
endif (NOT TBB_FOUND)
#################################################
# Find OGRE
# On Windows, we assume that all the OGRE* defines are passed in manually
# to CMake.
if (NOT WIN32)
execute_process(COMMAND pkg-config --modversion OGRE
OUTPUT_VARIABLE OGRE_VERSION)
string(REPLACE "\n" "" OGRE_VERSION ${OGRE_VERSION})
string (REGEX REPLACE "^([0-9]+).*" "\\1"
OGRE_MAJOR_VERSION "${OGRE_VERSION}")
string (REGEX REPLACE "^[0-9]+\\.([0-9]+).*" "\\1"
OGRE_MINOR_VERSION "${OGRE_VERSION}")
string (REGEX REPLACE "^[0-9]+\\.[0-9]+\\.([0-9]+).*" "\\1"
OGRE_PATCH_VERSION ${OGRE_VERSION})
set(OGRE_VERSION
${OGRE_MAJOR_VERSION}.${OGRE_MINOR_VERSION}.${OGRE_PATCH_VERSION})
endif()
pkg_check_modules(OGRE-RTShaderSystem
OGRE-RTShaderSystem>=${MIN_OGRE_VERSION})
if (OGRE-RTShaderSystem_FOUND)
set(ogre_ldflags ${OGRE-RTShaderSystem_LDFLAGS})
set(ogre_include_dirs ${OGRE-RTShaderSystem_INCLUDE_DIRS})
set(ogre_libraries ${OGRE-RTShaderSystem_LIBRARIES})
set(ogre_library_dirs ${OGRE-RTShaderSystem_LIBRARY_DIRS})
set(ogre_cflags ${OGRE-RTShaderSystem_CFLAGS})
set (INCLUDE_RTSHADER ON CACHE BOOL "Enable GPU shaders")
else ()
set (INCLUDE_RTSHADER OFF CACHE BOOL "Enable GPU shaders")
endif ()
pkg_check_modules(OGRE OGRE>=${MIN_OGRE_VERSION})
# There are some runtime problems to solve with ogre-1.9.
# Please read gazebo issues: 994, 995
if (NOT OGRE_FOUND)
BUILD_ERROR("Missing: Ogre3d version >=${MIN_OGRE_VERSION}(http://www.orge3d.org)")
else ()
set(ogre_ldflags ${ogre_ldflags} ${OGRE_LDFLAGS})
set(ogre_include_dirs ${ogre_include_dirs} ${OGRE_INCLUDE_DIRS})
set(ogre_libraries ${ogre_libraries};${OGRE_LIBRARIES})
set(ogre_library_dirs ${ogre_library_dirs} ${OGRE_LIBRARY_DIRS})
set(ogre_cflags ${ogre_cflags} ${OGRE_CFLAGS})
endif ()
pkg_check_modules(OGRE-Terrain OGRE-Terrain)
if (OGRE-Terrain_FOUND)
set(ogre_ldflags ${ogre_ldflags} ${OGRE-Terrain_LDFLAGS})
set(ogre_include_dirs ${ogre_include_dirs} ${OGRE-Terrain_INCLUDE_DIRS})
set(ogre_libraries ${ogre_libraries};${OGRE-Terrain_LIBRARIES})
set(ogre_library_dirs ${ogre_library_dirs} ${OGRE-Terrain_LIBRARY_DIRS})
set(ogre_cflags ${ogre_cflags} ${OGRE-Terrain_CFLAGS})
endif()
pkg_check_modules(OGRE-Overlay OGRE-Overlay)
if (OGRE-Overlay_FOUND)
set(ogre_ldflags ${ogre_ldflags} ${OGRE-Overlay_LDFLAGS})
set(ogre_include_dirs ${ogre_include_dirs} ${OGRE-Overlay_INCLUDE_DIRS})
set(ogre_libraries ${ogre_libraries};${OGRE-Overlay_LIBRARIES})
set(ogre_library_dirs ${ogre_library_dirs} ${OGRE-Overlay_LIBRARY_DIRS})
set(ogre_cflags ${ogre_cflags} ${OGRE-Overlay_CFLAGS})
endif()
set (OGRE_INCLUDE_DIRS ${ogre_include_dirs}
CACHE INTERNAL "Ogre include path")
# Also find OGRE's plugin directory, which is provided in its .pc file as the
# `plugindir` variable. We have to call pkg-config manually to get it.
# On Windows, we assume that all the OGRE* defines are passed in manually
# to CMake.
if (NOT WIN32)
execute_process(COMMAND pkg-config --variable=plugindir OGRE
OUTPUT_VARIABLE _pkgconfig_invoke_result
RESULT_VARIABLE _pkgconfig_failed)
if(_pkgconfig_failed)
BUILD_WARNING ("Failed to find OGRE's plugin directory. The build will succeed, but gazebo will likely fail to run.")
else()
# This variable will be substituted into cmake/setup.sh.in
set (OGRE_PLUGINDIR ${_pkgconfig_invoke_result})
endif()
endif()
########################################
# Check and find libccd (if needed)
pkg_check_modules(CCD ccd>=1.4)
if (NOT CCD_FOUND)
message(STATUS "Using internal copy of libccd")
set(CCD_INCLUDE_DIRS "${CMAKE_SOURCE_DIR}/deps/libccd/include")
set(CCD_LIBRARY_DIRS "${CMAKE_BINARY_DIR}/deps/libccd")
set(CCD_LIBRARIES gazebo_ccd)
endif()
########################################
# Find OpenAL
# pkg_check_modules(OAL openal)
# if (NOT OAL_FOUND)
# BUILD_WARNING ("Openal not found. Audio capabilities will be disabled.")
# set (HAVE_OPENAL FALSE)
# else (NOT OAL_FOUND)
# set (HAVE_OPENAL TRUE)
# endif ()
########################################
# Find libswscale format
pkg_check_modules(libswscale libswscale)
if (NOT libswscale_FOUND)
BUILD_WARNING ("libswscale not found. Audio-video capabilities will be disabled.")
else()
include_directories(${libswscale_INCLUDE_DIRS})
link_directories(${libswscale_LIBRARY_DIRS})
endif ()
########################################
# Find AV format
pkg_check_modules(libavformat libavformat)
if (NOT libavformat_FOUND)
BUILD_WARNING ("libavformat not found. Audio-video capabilities will be disabled.")
else()
include_directories(${libavformat_INCLUDE_DIRS})
link_directories(${libavformat_LIBRARY_DIRS})
endif ()
########################################
# Find avcodec
pkg_check_modules(libavcodec libavcodec)
if (NOT libavcodec_FOUND)
BUILD_WARNING ("libavcodec not found. Audio-video capabilities will be disabled.")
else()
include_directories(${libavcodec_INCLUDE_DIRS})
link_directories(${libavcodec_LIBRARY_DIRS})
endif ()
########################################
# Find avutil
pkg_check_modules(libavutil libavutil)
if (NOT libavutil_FOUND)
BUILD_WARNING ("libavutil not found. Audio-video capabilities will be disabled.")
endif ()
if (libavutil_FOUND AND libavformat_FOUND AND libavcodec_FOUND AND libswscale_FOUND)
set (HAVE_FFMPEG TRUE)
else ()
set (HAVE_FFMPEG FALSE)
endif ()
########################################
# Find Player
pkg_check_modules(PLAYER playercore>=3.0 playerc++ playerwkb)
if (NOT PLAYER_FOUND)
set (INCLUDE_PLAYER OFF CACHE BOOL "Build gazebo plugin for player")
BUILD_WARNING ("Player not found, gazebo plugin for player will not be built.")
else (NOT PLAYER_FOUND)
set (INCLUDE_PLAYER ON CACHE BOOL "Build gazebo plugin for player")
set (PLAYER_INCLUDE_DIRS ${PLAYER_INCLUDE_DIRS} CACHE INTERNAL
"Player include directory")
set (PLAYER_LINK_DIRS ${PLAYER_LINK_DIRS} CACHE INTERNAL
"Player link directory")
set (PLAYER_LINK_LIBS ${PLAYER_LIBRARIES} CACHE INTERNAL
"Player libraries")
endif ()
########################################
# Find GNU Triangulation Surface Library
pkg_check_modules(gts gts)
if (gts_FOUND)
message (STATUS "Looking for GTS - found")
set (HAVE_GTS TRUE)
else ()
set (HAVE_GTS FALSE)
BUILD_WARNING ("GNU Triangulation Surface library not found - Gazebo will not have CSG support.")
endif ()
#################################################
# Find bullet
# First and preferred option is to look for bullet standard pkgconfig,
# so check it first. if it is not present, check for the OSRF
# custom bullet2.82.pc file
pkg_check_modules(BULLET bullet>=2.82)
if (NOT BULLET_FOUND)
pkg_check_modules(BULLET bullet2.82>=2.82)
endif()
if (BULLET_FOUND)
set (HAVE_BULLET TRUE)
add_definitions( -DLIBBULLET_VERSION=${BULLET_VERSION} )
else()
set (HAVE_BULLET FALSE)
add_definitions( -DLIBBULLET_VERSION=0.0 )
BUILD_WARNING ("Bullet > 2.82 not found, for bullet physics engine option, please install libbullet2.82-dev.")
endif()
if (BULLET_VERSION VERSION_GREATER 2.82)
add_definitions( -DLIBBULLET_VERSION_GT_282 )
endif()
########################################
# Find libusb
pkg_check_modules(libusb-1.0 libusb-1.0)
if (NOT libusb-1.0_FOUND)
BUILD_WARNING ("libusb-1.0 not found. USB peripherals support will be disabled.")
set (HAVE_USB OFF CACHE BOOL "HAVE USB" FORCE)
else()
message (STATUS "Looking for libusb-1.0 - found. USB peripherals support enabled.")
set (HAVE_USB ON CACHE BOOL "HAVE USB" FORCE)
include_directories(${libusb-1.0_INCLUDE_DIRS})
link_directories(${libusb-1.0_LIBRARY_DIRS})
endif ()
#################################################
# Find Oculus SDK.
pkg_check_modules(OculusVR OculusVR)
if (HAVE_USB AND OculusVR_FOUND)
message (STATUS "Oculus Rift support enabled.")
set (HAVE_OCULUS ON CACHE BOOL "HAVE OCULUS" FORCE)
include_directories(SYSTEM ${OculusVR_INCLUDE_DIRS})
link_directories(${OculusVR_LIBRARY_DIRS})
else ()
BUILD_WARNING ("Oculus Rift support will be disabled.")
set (HAVE_OCULUS OFF CACHE BOOL "HAVE OCULUS" FORCE)
endif()
else (PKG_CONFIG_FOUND)
set (BUILD_GAZEBO OFF CACHE INTERNAL "Build Gazebo" FORCE)
BUILD_ERROR ("Error: pkg-config not found")
endif ()
########################################
# Find SDFormat
set (SDFormat_MIN_VERSION 4.1.0)
find_package(SDFormat ${SDFormat_MIN_VERSION})
if (NOT SDFormat_FOUND)
message (STATUS "Looking for SDFormat - not found")
BUILD_ERROR ("Missing: SDF version >=${SDFormat_MIN_VERSION}. Required for reading and writing SDF files.")
else()
message (STATUS "Looking for SDFormat - found")
endif()
########################################
# Find QT
find_package(Qt4 COMPONENTS QtCore QtGui QtXml QtXmlPatterns REQUIRED)
if (NOT QT4_FOUND)
BUILD_ERROR("Missing: Qt4")
endif()
########################################
# Find Boost, if not specified manually
include(FindBoost)
find_package(Boost ${MIN_BOOST_VERSION} REQUIRED thread signals system filesystem program_options regex iostreams date_time)
if (NOT Boost_FOUND)
set (BUILD_GAZEBO OFF CACHE INTERNAL "Build Gazebo" FORCE)
BUILD_ERROR ("Boost not found. Please install thread signals system filesystem program_options regex date_time boost version ${MIN_BOOST_VERSION} or higher.")
endif()
########################################
# Find libdl
find_path(libdl_include_dir dlfcn.h /usr/include /usr/local/include)
if (NOT libdl_include_dir)
message (STATUS "Looking for dlfcn.h - not found")
BUILD_ERROR ("Missing libdl: Required for plugins.")
set (libdl_include_dir /usr/include)
else (NOT libdl_include_dir)
message (STATUS "Looking for dlfcn.h - found")
endif ()
find_library(libdl_library dl /usr/lib /usr/local/lib)
if (NOT libdl_library)
message (STATUS "Looking for libdl - not found")
BUILD_ERROR ("Missing libdl: Required for plugins.")
set(libdl_library "")
else (NOT libdl_library)
message (STATUS "Looking for libdl - found")
endif ()
########################################
# Find gdal
include (FindGDAL)
if (NOT GDAL_FOUND)
message (STATUS "Looking for libgdal - not found")
BUILD_WARNING ("GDAL not found, Digital elevation terrains support will be disabled.")
set (HAVE_GDAL OFF CACHE BOOL "HAVE GDAL" FORCE)
else ()
message (STATUS "Looking for libgdal - found")
set (HAVE_GDAL ON CACHE BOOL "HAVE GDAL" FORCE)
endif ()
########################################
# Include man pages stuff
include (${gazebo_cmake_dir}/Ronn2Man.cmake)
include (${gazebo_cmake_dir}/Man.cmake)
add_manpage_target()
########################################
# Find Space Navigator header and library
find_library(SPNAV_LIBRARY NAMES spnav)
find_file(SPNAV_HEADER NAMES spnav.h)
if (SPNAV_LIBRARY AND SPNAV_HEADER)
message(STATUS "Looking for libspnav and spnav.h - found")
set(HAVE_SPNAV TRUE)
else()
message(STATUS "Looking for libspnav and spnav.h - not found")
set(HAVE_SPNAV FALSE)
endif()
########################################
# Find xsltproc, which is used by tools/check_test_ran.py
find_program(XSLTPROC xsltproc)
if (NOT EXISTS ${XSLTPROC})
BUILD_WARNING("xsltproc not found. The check_test_ran.py script will cause tests to fail.")
endif()
########################################
# Find uuid-dev Library
#pkg_check_modules(uuid uuid)
#if (uuid_FOUND)
# message (STATUS "Looking for uuid - found")
# set (HAVE_UUID TRUE)
#else ()
# set (HAVE_UUID FALSE)
# BUILD_WARNING ("uuid-dev library not found - Gazebo will not have uuid support.")
#endif ()
########################################
# Find uuid
# - In UNIX we use uuid library.
# - In Windows the native RPC call, no dependency needed.
if (UNIX)
pkg_check_modules(uuid uuid)
if (uuid_FOUND)
message (STATUS "Looking for uuid - found")
set (HAVE_UUID TRUE)
else ()
set (HAVE_UUID FALSE)
BUILD_WARNING ("uuid-dev library not found - Gazebo will not have uuid support.")
endif ()
else()
message (STATUS "Using Windows RPC UuidCreate function")
set (HAVE_UUID TRUE)
endif()
########################################
# Find graphviz
include (${gazebo_cmake_dir}/FindGraphviz.cmake)
if (NOT GRAPHVIZ_FOUND)
message (STATUS "Looking for libgraphviz-dev - not found")
BUILD_WARNING ("Graphviz not found, Model editor's schematic view will be disabled.")
set (HAVE_GRAPHVIZ OFF CACHE BOOL "HAVE GRAPHVIZ" FORCE)
else ()
message (STATUS "Looking for libgraphviz-dev - found")
set (HAVE_GRAPHVIZ ON CACHE BOOL "HAVE GRAPHVIZ" FORCE)
endif ()
########################################
# Find ignition math in unix platforms
# In Windows we expect a call from configure.bat script with the paths
if (NOT WIN32)
find_package(ignition-math2 2.4 QUIET)
if (NOT ignition-math2_FOUND)
message(STATUS "Looking for ignition-math2-config.cmake - not found")
BUILD_ERROR ("Missing: Ignition math2 library.")
else()
message(STATUS "Looking for ignition-math2-config.cmake - found")
endif()
endif()
########################################
# Find the Ignition_Transport library
# In Windows we expect a call from configure.bat script with the paths
if (NOT WIN32)
find_package(ignition-transport2 QUIET)
if (NOT ignition-transport2_FOUND)
find_package(ignition-transport1 QUIET)
if (NOT ignition-transport1_FOUND)
BUILD_WARNING ("Missing: Ignition Transport (libignition-transport-dev or libignition-transport2-dev")
endif()
endif()
if (ignition-transport2_FOUND OR ignition-transport1_FOUND)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${IGNITION-TRANSPORT_CXX_FLAGS}")
include_directories(${IGNITION-TRANSPORT_INCLUDE_DIRS})
link_directories(${IGNITION-TRANSPORT_LIBRARY_DIRS})
endif()
endif()
################################################
# Find Valgrind for checking memory leaks in the
# tests
find_program(VALGRIND_PROGRAM NAMES valgrind PATH ${VALGRIND_ROOT}/bin)
option(GAZEBO_RUN_VALGRIND_TESTS "Run gazebo tests with Valgrind" FALSE)
mark_as_advanced(GAZEBO_RUN_VALGRIND_TESTS)
if (GAZEBO_RUN_VALGRIND_TESTS AND NOT VALGRIND_PROGRAM)
BUILD_WARNING("valgrind not found. Memory check tests will be skipped.")
endif()
########################################
# Find QWT (QT graphing library)
#find_path(QWT_INCLUDE_DIR NAMES qwt.h PATHS
# /usr/include
# /usr/local/include
# "$ENV{LIB_DIR}/include"
# "$ENV{INCLUDE}"
# PATH_SUFFIXES qwt-qt4 qwt qwt5
# )
#
#find_library(QWT_LIBRARY NAMES qwt qwt6 qwt5 PATHS
# /usr/lib
# /usr/local/lib
# "$ENV{LIB_DIR}/lib"
# "$ENV{LIB}/lib"
# )
#
#if (QWT_INCLUDE_DIR AND QWT_LIBRARY)
# set(HAVE_QWT TRUE)
#endif (QWT_INCLUDE_DIR AND QWT_LIBRARY)
#
#if (HAVE_QWT)
# if (NOT QWT_FIND_QUIETLY)
# message(STATUS "Found Qwt: ${QWT_LIBRARY}")
# endif (NOT QWT_FIND_QUIETLY)
#else ()
# if (QWT_FIND_REQUIRED)
# BUILD_WARNING ("Could not find libqwt-dev. Plotting features will be disabled.")
# endif (QWT_FIND_REQUIRED)
#endif ()

View File

@ -0,0 +1,21 @@
if(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
message(FATAL_ERROR "Cannot find install manifest: @CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
endif(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt")
file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files)
string(REGEX REPLACE "\n" ";" files "${files}")
foreach(file ${files})
message(STATUS "Uninstalling $ENV{DESTDIR}${file}")
if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
exec_program(
"@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\""
OUTPUT_VARIABLE rm_out
RETURN_VALUE rm_retval
)
if(NOT "${rm_retval}" STREQUAL 0)
message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}")
endif(NOT "${rm_retval}" STREQUAL 0)
else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
message(STATUS "File $ENV{DESTDIR}${file} does not exist.")
endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}")
endforeach(file)

View File

@ -0,0 +1,24 @@
set(CPACK_PACKAGE_NAME "@PROJECT_NAME@")
set(CPACK_PACKAGE_VENDOR "gazebosim.org")
set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "A 3D dynamics simulator for robotics")
set(CPACK_PACKAGE_INSTALL_DIRECTORY "@PROJECT_NAME_LOWER@")
set(CPACK_RESOURCE_FILE_LICENSE "@CMAKE_CURRENT_SOURCE_DIR@/LICENSE")
set(CPACK_RESOURCE_FILE_README "@CMAKE_CURRENT_SOURCE_DIR@/README")
set(CPACK_PACKAGE_DESCRIPTION_FILE "@CMAKE_CURRENT_SOURCE_DIR@/README")
set(CPACK_PACKAGE_MAINTAINER "Nate Koenig <gazebo@gazebosim.org>")
set(CPACK_PACKAGE_CONTACT "Nate Koenig <gazebo@gazebosim.org>")
set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE "@DPKG_ARCH@")
set(CPACK_DEBIAN_PACKAGE_DEPENDS "@DEBIAN_PACKAGE_DEPENDS@")
set(CPACK_DEBIAN_PACKAGE_SECTION "devel")
set(CPACK_DEBIAN_PACKAGE_PRIORITY "optional")
set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
set(CPACK_DEBIAN_PACKAGE_DESCRIPTION "A 3D dynamics simulator for robotics")
set(CPACK_RPM_PACKAGE_ARCHITECTURE "@DPKG_ARCH@")
set(CPACK_RPM_PACKAGE_REQUIRES "@DEBIAN_PACKAGE_DEPENDS@")
set(CPACK_RPM_PACKAGE_DESCRIPTION "A 3D dynamics simulator for robotics")
set (CPACK_PACKAGE_FILE_NAME "@PROJECT_NAME_LOWER@-@GAZEBO_VERSION_FULL@")
set (CPACK_SOURCE_PACKAGE_FILE_NAME "@PROJECT_NAME_LOWER@-@GAZEBO_VERSION_FULL@")

View File

@ -0,0 +1,138 @@
if (@PKG_NAME@_CONFIG_INCLUDED)
return()
endif()
set(@PKG_NAME@_CONFIG_INCLUDED TRUE)
set(@PKG_NAME@_VERSION @GAZEBO_VERSION@)
set(@PKG_NAME@_MAJOR_VERSION @GAZEBO_MAJOR_VERSION@)
set(@PKG_NAME@_PLUGIN_PATH "@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@/gazebo-@GAZEBO_MAJOR_VERSION@/plugins")
# The media path contains the location on disk where images,
# materials scripts, shaders, and other related resources are stored.
set(@PKG_NAME@_MEDIA_PATH "@CMAKE_INSTALL_PREFIX@/share/gazebo-@GAZEBO_MAJOR_VERSION@/media")
#################################################
# @PKG_NAME@_PROTO_PATH, @PKG_NAME@_PROTO_INCLUDE_DIRS, and
# @PKG_NAME@_PROTO_LIBRARIES
#
# These three variables allow Gazebo messages to be used in other projects.
#
# The following examples are for demonstration purposes and are
# incomplete. The first example shows how to use a Gazebo message in a
# custom proto file. The second example shows how to run 'protoc' against
# custom proto files that make use Gazebo message definitions. The third
# example shows how to include the correct directory when compiling a library
# or executable that uses your custom messages.
#
# 1. Use a Gazebo message in a custom proto file:
#
# package my.msgs;
# import "vector3d.proto";
#
# message MyMessage
# {
# required gazebo.msgs.Vector3d p = 1;
# }
#
# 2. Run protoc from a CMakeLists.txt to generate your message's
# header and source files:
#
# add_custom_command(
# OUTPUT
# "${proto_filename}.pb.cc"
# "${proto_filename}.pb.h"
# COMMAND protoc
# ARGS --proto_path ${GAZEBO_PROTO_PATH} ${proto_file_out}
# COMMENT "Running C++ protocol buffer compiler on ${proto_filename}"
# VERBATIM)
#
# 3. When compiling your library or executable, make sure to use the following
# in the CMakeLists.txt file:
#
# include_directories(@PKG_NAME@_PROTO_INCLUDE_DIRS)
# target_link_libraries(your_package @PKG_NAME@_PROTO_LIBRARIES)
#
set(@PKG_NAME@_PROTO_PATH
"@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@/gazebo-@GAZEBO_MAJOR_VERSION@/gazebo/msgs/proto")
find_library(gazebo_proto_msgs_lib gazebo_msgs
PATHS "@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@" NO_DEFAULT_PATH)
list(APPEND @PKG_NAME@_PROTO_LIBRARIES ${gazebo_proto_msgs_lib})
list(APPEND @PKG_NAME@_PROTO_INCLUDE_DIRS
"@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@/gazebo-@GAZEBO_MAJOR_VERSION@/gazebo/msgs")
# End @PKG_NAME@_PROTO_PATH, @PKG_NAME@_PROTO_INCLUDE_DIRS, and
# @PKG_NAME@_PROTO_LIBRARIES
list(APPEND @PKG_NAME@_INCLUDE_DIRS @CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@)
list(APPEND @PKG_NAME@_INCLUDE_DIRS @CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@/gazebo-@GAZEBO_MAJOR_VERSION@)
list(APPEND @PKG_NAME@_LIBRARY_DIRS @CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@)
list(APPEND @PKG_NAME@_LIBRARY_DIRS @CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@/gazebo-@GAZEBO_MAJOR_VERSION@/plugins)
list(APPEND @PKG_NAME@_CFLAGS -I@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@)
list(APPEND @PKG_NAME@_CFLAGS -I@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_INCLUDEDIR@/gazebo-@GAZEBO_MAJOR_VERSION@)
# Visual Studio enables c++11 support by default
if (NOT MSVC)
list(APPEND @PKG_NAME@_CXX_FLAGS -std=c++11)
endif()
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" AND
"${CMAKE_SYSTEM_NAME}" MATCHES "Darwin")
set(@PKG_NAME@_CXX_FLAGS "${@PKG_NAME@_CXX_FLAGS} -stdlib=libc++")
endif ()
foreach(lib @PKG_LIBRARIES@)
set(onelib "${lib}-NOTFOUND")
find_library(onelib ${lib}
PATHS "@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@"
NO_DEFAULT_PATH
)
if(NOT onelib)
message(FATAL_ERROR "Library '${lib}' in package @PKG_NAME@ is not installed properly")
endif()
list(APPEND @PKG_NAME@_LIBRARIES ${onelib})
endforeach()
# Get the install prefix for OGRE
execute_process(COMMAND pkg-config --variable=prefix OGRE
OUTPUT_VARIABLE OGRE_INSTALL_PREFIX OUTPUT_STRIP_TRAILING_WHITESPACE)
# Add the OGRE cmake path to CMAKE_MODULE_PATH
set(CMAKE_MODULE_PATH
"${OGRE_INSTALL_PREFIX}/share/OGRE/cmake/modules;${OGRE_INSTALL_PREFIX}/lib/OGRE/cmake;${OGRE_INSTALL_PREFIX}/CMake;${CMAKE_MODULE_PATH}")
foreach(dep @PKG_DEPENDS@)
if(NOT ${dep}_FOUND)
if (${dep} MATCHES "Boost")
find_package(Boost @MIN_BOOST_VERSION@ REQUIRED thread signals system filesystem program_options regex iostreams date_time)
else()
find_package(${dep} REQUIRED)
endif()
endif()
list(APPEND @PKG_NAME@_INCLUDE_DIRS ${${dep}_INCLUDE_DIRS})
# Protobuf needs to be capitalized to match PROTOBUF_LIBRARIES
if (${dep} STREQUAL "Protobuf")
string (TOUPPER ${dep} dep_lib)
else()
set (dep_lib ${dep})
endif()
list(APPEND @PKG_NAME@_LIBRARIES ${${dep_lib}_LIBRARIES})
# When including OGRE, also include the Terrain and Paging components
if (${dep} STREQUAL "OGRE")
list(APPEND GAZEBO_INCLUDE_DIRS ${OGRE_Terrain_INCLUDE_DIRS}
${OGRE_Paging_INCLUDE_DIRS})
list(APPEND GAZEBO_LIBRARIES ${OGRE_Terrain_LIBRARIES}
${OGRE_Paging_LIBRARIES})
endif()
endforeach()
find_package(ignition-math2 REQUIRED)
list(APPEND @PKG_NAME@_INCLUDE_DIRS ${IGNITION-MATH_INCLUDE_DIRS})
list(APPEND @PKG_NAME@_LIBRARY_DIRS ${IGNITION-MATH_LIBRARY_DIRS})
list(APPEND @PKG_NAME@_LIBRARIES ${IGNITION-MATH_LIBRARIES})
list(APPEND @PKG_NAME@_LDFLAGS -Wl,-rpath,@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@/gazebo-@GAZEBO_MAJOR_VERSION@/plugins)
list(APPEND @PKG_NAME@_LDFLAGS -L@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@)
list(APPEND @PKG_NAME@_LDFLAGS -L@CMAKE_INSTALL_PREFIX@/@CMAKE_INSTALL_LIBDIR@/gazebo-@GAZEBO_MAJOR_VERSION@/plugins)

View File

@ -0,0 +1,42 @@
/* config.h. Generated by CMake for @PROJECT_NAME@. */
#ifndef _GAZEBO_CONFIG_H_
#define _GAZEBO_CONFIG_H_
/* Version number */
#define GAZEBO_MAJOR_VERSION ${GAZEBO_MAJOR_VERSION}
#define GAZEBO_MINOR_VERSION ${GAZEBO_MINOR_VERSION}
#define GAZEBO_PATCH_VERSION ${GAZEBO_PATCH_VERSION}
#define GAZEBO_VERSION "${GAZEBO_VERSION}"
#define GAZEBO_VERSION_FULL "${GAZEBO_VERSION_FULL}"
#define GAZEBO_VERSION_HEADER "Gazebo multi-robot simulator, version ${GAZEBO_VERSION_FULL}\nCopyright (C) 2012 Open Source Robotics Foundation.\nReleased under the Apache 2 License.\nhttp://gazebosim.org\n\n"
#cmakedefine BUILD_TYPE_PROFILE 1
#cmakedefine BUILD_TYPE_DEBUG 1
#cmakedefine BUILD_TYPE_RELEASE 1
#cmakedefine BUILD_TYPE_COVERAGE 1
#cmakedefine HAVE_OPENAL 1
#cmakedefine HAVE_FFMPEG 1
#cmakedefine ENABLE_SHADOWS 1
#cmakedefine HAVE_BULLET 1
#cmakedefine HAVE_SIMBODY 1
#cmakedefine HAVE_DART 1
#cmakedefine INCLUDE_RTSHADER 1
#cmakedefine HAVE_GTS 1
#cmakedefine HAVE_QWT 1
#cmakedefine ENABLE_DIAGNOSTICS 1
#cmakedefine HAVE_GDAL 1
#cmakedefine HAVE_USB 1
#cmakedefine HAVE_OCULUS 1
#cmakedefine HAVE_SPNAV 1
#cmakedefine HDF5_INSTRUMENT 1
#cmakedefine HAVE_GRAPHVIZ 1
#cmakedefine HAVE_UUID 1
#ifdef BUILD_TYPE_PROFILE
#include <google/heap-checker.h>
#endif
// ifndef _GAZEBO_CONFIG_H_
#endif

View File

@ -0,0 +1,25 @@
################################################################################
#Find available package generators
# DEB
if ("${CMAKE_SYSTEM}" MATCHES "Linux")
find_program(DPKG_PROGRAM dpkg)
if (EXISTS ${DPKG_PROGRAM})
list (APPEND CPACK_GENERATOR "DEB")
endif(EXISTS ${DPKG_PROGRAM})
find_program(RPMBUILD_PROGRAM rpmbuild)
endif()
list (APPEND CPACK_SOURCE_GENERATOR "TBZ2")
list (APPEND CPACK_SOURCE_GENERATOR "ZIP")
list (APPEND CPACK_SOURCE_IGNORE_FILES ";Ogre.log;TODO;/.hg/;.swp$;/build/;.hgtags")
include (InstallRequiredSystemLibraries)
#execute_process(COMMAND dpkg --print-architecture _NPROCE)
set (DEBIAN_PACKAGE_DEPENDS "libogre-dev, libfreeimage-dev, libqt4-dev, libprotobuf-dev, libprotoc-dev, libtbb2, libboost-all-dev")
set (RPM_PACKAGE_DEPENDS "libogre-dev, libfreeimage-dev, libqt4-dev, libprotobuf-dev, libprotoc-dev, libtbb2, libboost-all-dev")
set (GAZEBO_CPACK_CFG_FILE "${PROJECT_BINARY_DIR}/cpack_options.cmake")

View File

@ -0,0 +1,5 @@
<?xml version="1.0"?>
<gazeborc>
<gazeboPath>@CMAKE_INSTALL_PREFIX@/share/gazebo</gazeboPath>
<ogrePath>@OGRE_LIBRARY_PATH@/OGRE</ogrePath>
</gazeborc>

View File

@ -0,0 +1,10 @@
prefix=@CMAKE_INSTALL_PREFIX@
libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: gazebo
Description: Gazebo Libraries
Version: @GAZEBO_VERSION_FULL@
Requires: sdformat protobuf @TBB_PKG_CONFIG@ ignition-math2
Libs: -Wl,-rpath,${prefix}/@CMAKE_INSTALL_LIBDIR@/gazebo-@GAZEBO_MAJOR_VERSION@/plugins -L${libdir} -L${prefix}/@CMAKE_INSTALL_LIBDIR@/gazebo-@GAZEBO_MAJOR_VERSION@/plugins -lgazebo_transport -lgazebo_physics -lgazebo_sensors -lgazebo_rendering -lgazebo_gui -lgazebo_client -lgazebo_msgs -lgazebo_math -lgazebo_common -lgazebo @Boost_PKGCONFIG_LIBS@ @APPLE_PKGCONFIG_LIBS@
CFlags: -I${includedir}/gazebo-@GAZEBO_MAJOR_VERSION@ @Boost_PKGCONFIG_CFLAGS@ -std=c++11

View File

@ -0,0 +1,10 @@
prefix=@CMAKE_INSTALL_PREFIX@
libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: gazebo
Description: Gazebo Exported ODE Libraries
Version: @GAZEBO_VERSION_FULL@
Requires:
Libs: -Wl,-rpath,${prefix}/@CMAKE_INSTALL_LIBDIR@ -L${prefix}/@CMAKE_INSTALL_LIBDIR@ -lgazebo_ode
CFlags: -I${includedir}/gazebo-@GAZEBO_MAJOR_VERSION@

View File

@ -0,0 +1,10 @@
prefix=@CMAKE_INSTALL_PREFIX@
libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@
includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
Name: gazebo_transport
Description: Gazebo Transport Library
Version: @GAZEBO_VERSION_FULL@
Requires:
Libs: -L${libdir} -lgazebo_transport -lgazebo_msgs
CFlags: -I${includedir}/gazebo-@GAZEBO_MAJOR_VERSION@

View File

@ -0,0 +1,7 @@
export GAZEBO_MASTER_URI=http://@GAZEBO_DEFAULT_MASTER_HOST@:@GAZEBO_DEFAULT_MASTER_PORT@
export GAZEBO_MODEL_DATABASE_URI=@GAZEBO_MODEL_DATABASE_URI@
export GAZEBO_RESOURCE_PATH=@GAZEBO_RESOURCE_PATH@:${GAZEBO_RESOURCE_PATH}
export GAZEBO_PLUGIN_PATH=@GAZEBO_PLUGIN_PATH@:${GAZEBO_PLUGIN_PATH}
export GAZEBO_MODEL_PATH=@GAZEBO_MODEL_PATH@:${GAZEBO_MODEL_PATH}
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:@GAZEBO_PLUGIN_PATH@
export OGRE_RESOURCE_PATH=@OGRE_RESOURCE_PATH@

View File

@ -0,0 +1,36 @@
#!/bin/sh
# Check if the node was configured to use s3cmd
# This is done by running s3cmd --configure
if [ ! -f "${HOME}/.s3cfg" ]; then
echo "No $HOME/.s3cfg file found. Please config the software first in your system"
exit 1
fi
# Make documentation if not build
make doc
if [ ! -f "@CMAKE_BINARY_DIR@/doxygen/html/index.html" ]; then
echo "Documentation not present. Install doxygen, and run `make doc` in the build directory"
exit 1
fi
# The code API
s3cmd sync @CMAKE_BINARY_DIR@/doxygen/html/* s3://osrf-distributions/gazebo/api/@GAZEBO_VERSION_FULL@/ --dry-run -v
echo -n "Upload code API(Y/n)? "
read ans
if [ "$ans" = "y" ] || [ "$ans" = "Y" ]; then
s3cmd sync @CMAKE_BINARY_DIR@/doxygen/html/* s3://osrf-distributions/gazebo/api/@GAZEBO_VERSION_FULL@/ -v
fi
# The msgs API
s3cmd sync @CMAKE_BINARY_DIR@/doxygen_msgs/html/* s3://osrf-distributions/gazebo/api/@GAZEBO_VERSION_FULL@/ --dry-run -v
echo -n "Upload msg API (Y/n)? "
read ans
if [ "$ans" = "y" ] || [ "$ans" = "Y" ]; then
s3cmd sync @CMAKE_BINARY_DIR@/doxygen_msgs/html/* s3://osrf-distributions/gazebo/msg-api/@GAZEBO_VERSION_FULL@/ -v
fi

View File

@ -0,0 +1,100 @@
:: This file is a helper for gazebo configuration (cmake) on Windows
::
:: It is designed to work with a workspace detailed in the INSTALL_WIN32.md
:: file. Please follow the instructions there. The workspace layout should be:
::
:: gz-ws/
:: sdformat/
:: ... # all dependencies detailed in the INSTALL file
:: #dep1/
:: #dep2/
:: ...
:: gazebo/
::
:: Usage: cd gz-ws/gazebo/build && ../configure <Release|Debug>
@set build_type=Release
@if not "%1"=="" set build_type=%1
@echo Configuring for build type %build_type%
@set BOOST_PATH=%cd%\..\..\boost_1_56_0
@set BOOST_LIBRARY_DIR=%BOOST_PATH%\lib64-msvc-12.0
@set PROTOBUF_PATH=%cd%\..\..\protobuf-2.6.0-win64-vc12
@set CURL_PATH=%cd%\..\..\libcurl-vc12-x64-release-debug-static-ipv6-sspi-winssl\%build_type%
@set CURL_INCLUDE_DIR=%CURL_PATH%\include
@set CURL_LIBRARY_DIR=%CURL_PATH%\lib
set CURL_LIBRARY_NAME=libcurl_a
@if "%build_type%"=="Debug" set CURL_LIBRARY_NAME=libcurl_a_debug
@set FREEIMAGE_PATH=%cd%\..\..\FreeImage-vc12-x64-release-debug
@set FREEIMAGE_LIBRARY_DIR=%FREEIMAGE_PATH%\x64\%build_type%\DLL
@set FREEIMAGE_INCLUDE_DIR=%FREEIMAGE_PATH%\Source
@set SDFORMAT_PATH=%cd%\..\..\sdformat\build\install\%build_type%
@set IGNITION-MATH_PATH=%cd%\..\..\ign-math\build\install\%build_type%
@set IGNITION-TRANSPORT_PATH=%cd%\..\..\ign-transport\build\install\%build_type%
@set IGNITION-TRANSPORT_CMAKE_PREFIX_PATH=%IGNITION-TRANSPORT_PATH%\lib\cmake\ignition-transport0
@set TBB_PATH=%cd%\..\..\tbb43_20141023oss
@set TBB_LIBRARY_DIR=%TBB_PATH%\lib\intel64\vc12
@set TBB_INCLUDEDIR=%TBB_PATH%\include
@set OGRE_VERSION=1.8.1
@set OGRE_PATH=%cd%\..\..\ogre_src_v1-8-1-vc12-x64-release-debug\build\install\%build_type%
@set OGRE_INCLUDE_DIR=%OGRE_PATH%\include;%OGRE_PATH%\include\OGRE;%OGRE_PATH%\include\OGRE\RTShaderSystem;%OGRE_PATH%\include\OGRE\Terrain;%OGRE_PATH%\include\OGRE\Paging
@set OGRE_LIBRARY_DIR=%OGRE_PATH%\lib\%build_type%
@set OGRE_PLUGIN_DIR=%OGRE_LIBRARY_DIR%\opt
set OGRE_LIB_SUFFIX=.lib
@if "%build_type%"=="Debug" set OGRE_LIB_SUFFIX=_d.lib
@set OGRE_LIBS=%OGRE_LIBRARY_DIR%\OgreMain%OGRE_LIB_SUFFIX%;%OGRE_LIBRARY_DIR%\OgreOverlay%OGRE_LIB_SUFFIX%;%OGRE_LIBRARY_DIR%\OgreRTShaderSystem%OGRE_LIB_SUFFIX%;%OGRE_LIBRARY_DIR%\OgreTerrain%OGRE_LIB_SUFFIX%;%OGRE_LIBRARY_DIR%\OgrePaging%OGRE_LIB_SUFFIX%
@set OGRE_LIBS=%OGRE_LIBRARY_DIR%\OgreMain%OGRE_LIB_SUFFIX%;%OGRE_LIBRARY_DIR%\OgreRTShaderSystem%OGRE_LIB_SUFFIX%;%OGRE_LIBRARY_DIR%\OgreTerrain%OGRE_LIB_SUFFIX%;%OGRE_LIBRARY_DIR%\OgrePaging%OGRE_LIB_SUFFIX%
@set DLFCN_WIN32_PATH=%cd%\..\..\dlfcn-win32-vc12-x64-release-debug\build\install\%build_type%
@set DLFCN_WIN32_LIBRARY_DIR=%DLFCN_WIN32_PATH%\lib
@set DLFCN_WIN32_INCLUDE_DIR=%DLFCN_WIN32_PATH%\include
@set QT4_PATH=C:\Qt\4.8.6\x64\msvc2013
@set QT4_BIN_DIR=%QT4_PATH%\bin
@set IGN_MATH_PATH=%cd%\..\..\ign-math\build\install\%build_type%
@set ZEROMQ_PATH=%cd%\..\..\ZeroMQ-3.2.4
@set CPPZMQ_PATH=%cd%\..\..\cppzmq
@set INCLUDE=%FREEIMAGE_INCLUDE_DIR%;%TBB_INCLUDEDIR%;%DLFCN_WIN32_INCLUDE_DIR%;%INCLUDE%
@set LIB=%FREEIMAGE_LIBRARY_DIR%;%BOOST_LIBRARY_DIR%;%TBB_LIBRARY_DIR%;%DLFCN_WIN32_LIBRARY_DIR%;%LIB%
@set PATH=%QT4_BIN_DIR%;%PATH%
cmake -G "NMake Makefiles"^
-DCMAKE_PREFIX_PATH="%SDFORMAT_PATH%;%IGNITION-MATH_PATH%;%IGNITION-TRANSPORT_CMAKE_PREFIX_PATH%"^
-DUSE_EXTERNAL_TINYXML:BOOL=False^
-DUSE_EXTERNAL_TINYXML2:BOOL=False^
-DFREEIMAGE_RUNS=1^
-DPROTOBUF_SRC_ROOT_FOLDER="%PROTOBUF_PATH%"^
-DBOOST_ROOT:STRING="%BOOST_PATH%"^
-DBOOST_LIBRARYDIR:STRING="%BOOST_LIBRARY_DIR%"^
-DOGRE_FOUND=1^
-DOGRE-RTShaderSystem_FOUND=1^
-DOGRE-Terrain_FOUND=1^
-DOGRE_VERSION=%OGRE_VERSION%^
-DOGRE_PLUGINDIR="%OGRE_PLUGIN_DIR%"^
-DOGRE_INCLUDE_DIRS="%OGRE_INCLUDE_DIR%"^
-DOGRE_LIBRARIES="%OGRE_LIBS%"^
-DCURL_FOUND=1^
-DCURL_INCLUDEDIR="%CURL_INCLUDE_DIR%"^
-DCURL_LIBDIR="%CURL_LIBRARY_DIR%"^
-DCURL_LIBRARIES="%CURL_LIBRARY_NAME%"^
-DTBB_FOUND=1^
-DTBB_INCLUDEDIR="%TBB_INCLUDEDIR%"^
-DTBB_LIBRARY_DIR="%TBB_LIBRARY_DIR%"^
-DIGNITION-MATH_INCLUDE_DIRS:STRING="%IGN_MATH_PATH%\include\ignition\math2"^
-DIGNITION-MATH_LIBRARY_DIRS:STRING="%IGN_MATH_PATH%\lib"^
-DIGNITION-MATH_LIBRARIES="ignition-math2"^
-DZeroMQ_ROOT_DIR="@ZEROMQ_PATH@"^
-DCPPZMQ_HEADER_PATH="@CPPZMQ_PATH@"^
-DCMAKE_INSTALL_PREFIX="install\%build_type%"^
-DCMAKE_BUILD_TYPE="%build_type%"^
-DENABLE_TESTS_COMPILATION:BOOL=False^
..

View File

@ -0,0 +1,7 @@
add_subdirectory(opende)
if (NOT CCD_FOUND)
add_subdirectory(libccd)
endif()
# add_subdirectory(ann)
# add_subdirectory(fcl)

View File

@ -0,0 +1,33 @@
add_definitions(-DUSE_PQP=0)
add_definitions(-DUSE_SVMLIGHT=0)
set (sources
src/ANN.cpp
src/bd_fix_rad_search.cpp
src/bd_pr_search.cpp
src/bd_search.cpp
src/bd_tree.cpp
src/bd_tree.h
src/brute.cpp
src/kd_dump.cpp
src/kd_fix_rad_search.cpp
src/kd_fix_rad_search.h
src/kd_pr_search.cpp
src/kd_pr_search.h
src/kd_search.cpp
src/kd_search.h
src/kd_split.cpp
src/kd_split.h
src/kd_tree.cpp
src/kd_tree.h
src/kd_util.cpp
src/kd_util.h
src/perf.cpp
src/pr_queue.h
src/pr_queue_k.h
)
include_directories(SYSTEM ${CMAKE_SOURCE_DIR}/deps/ann/include )
gz_add_library(gazebo_ann ${sources})
gz_install_library(gazebo_ann)

View File

@ -0,0 +1,830 @@
//----------------------------------------------------------------------
// File: ANN.h
// Programmer: Sunil Arya and David Mount
// Description: Basic include file for approximate nearest
// neighbor searching.
// Last modified: 01/27/10 (Version 1.1.2)
//----------------------------------------------------------------------
// Copyright (c) 1997-2015 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Added copyright and revision information
// Added ANNcoordPrec for coordinate precision.
// Added methods theDim, nPoints, maxPoints, thePoints to ANNpointSet.
// Cleaned up C++ structure for modern compilers
// Revision 1.1 05/03/05
// Added fixed-radius k-NN searching
// Revision 1.1.2 01/27/10
// Fixed minor compilation bugs for new versions of gcc
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// ANN - approximate nearest neighbor searching
// ANN is a library for approximate nearest neighbor searching,
// based on the use of standard and priority search in kd-trees
// and balanced box-decomposition (bbd) trees. Here are some
// references to the main algorithmic techniques used here:
//
// kd-trees:
// Friedman, Bentley, and Finkel, ``An algorithm for finding
// best matches in logarithmic expected time,'' ACM
// Transactions on Mathematical Software, 3(3):209-226, 1977.
//
// Priority search in kd-trees:
// Arya and Mount, ``Algorithms for fast vector quantization,''
// Proc. of DCC '93: Data Compression Conference, eds. J. A.
// Storer and M. Cohn, IEEE Press, 1993, 381-390.
//
// Approximate nearest neighbor search and bbd-trees:
// Arya, Mount, Netanyahu, Silverman, and Wu, ``An optimal
// algorithm for approximate nearest neighbor searching,''
// 5th Ann. ACM-SIAM Symposium on Discrete Algorithms,
// 1994, 573-582.
//----------------------------------------------------------------------
#ifndef ANN_H
#define ANN_H
#ifdef WIN32
//----------------------------------------------------------------------
// For Microsoft Visual C++, externally accessible symbols must be
// explicitly indicated with DLL_API, which is somewhat like "extern."
//
// The following ifdef block is the standard way of creating macros
// which make exporting from a DLL simpler. All files within this DLL
// are compiled with the DLL_EXPORTS preprocessor symbol defined on the
// command line. In contrast, projects that use (or import) the DLL
// objects do not define the DLL_EXPORTS symbol. This way any other
// project whose source files include this file see DLL_API functions as
// being imported from a DLL, wheras this DLL sees symbols defined with
// this macro as being exported.
//----------------------------------------------------------------------
#ifdef DLL_EXPORTS
#define DLL_API __declspec(dllexport)
#else
#define DLL_API __declspec(dllimport)
#endif
//----------------------------------------------------------------------
// DLL_API is ignored for all other systems
//----------------------------------------------------------------------
#else
#define DLL_API
#endif
//----------------------------------------------------------------------
// basic includes
//----------------------------------------------------------------------
#include <cstdlib> // standard lib includes
#include <cmath> // math includes
#include <iostream> // I/O streams
#include <cstring> // C-style strings
//----------------------------------------------------------------------
// Limits
// There are a number of places where we use the maximum double value as
// default initializers (and others may be used, depending on the
// data/distance representation). These can usually be found in limits.h
// (as LONG_MAX, INT_MAX) or in float.h (as DBL_MAX, FLT_MAX).
//
// Not all systems have these files. If you are using such a system,
// you should set the preprocessor symbol ANN_NO_LIMITS_H when
// compiling, and modify the statements below to generate the
// appropriate value. For practical purposes, this does not need to be
// the maximum double value. It is sufficient that it be at least as
// large than the maximum squared distance between between any two
// points.
//----------------------------------------------------------------------
#ifdef ANN_NO_LIMITS_H // limits.h unavailable
#include <cvalues> // replacement for limits.h
const double ANN_DBL_MAX = MAXDOUBLE; // insert maximum double
#else
#include <climits>
#include <cfloat>
const double ANN_DBL_MAX = DBL_MAX;
#endif
#define ANNversion "1.1.2" // ANN version and information
#define ANNversionCmt ""
#define ANNcopyright "David M. Mount and Sunil Arya"
#define ANNlatestRev "Jan 27, 2010"
//----------------------------------------------------------------------
// ANNbool
// This is a simple boolean type. Although ANSI C++ is supposed
// to support the type bool, some compilers do not have it.
//----------------------------------------------------------------------
enum ANNbool {ANNfalse = 0, ANNtrue = 1}; // ANN boolean type (non ANSI C++)
//----------------------------------------------------------------------
// ANNcoord, ANNdist
// ANNcoord and ANNdist are the types used for representing
// point coordinates and distances. They can be modified by the
// user, with some care. It is assumed that they are both numeric
// types, and that ANNdist is generally of an equal or higher type
// from ANNcoord. A variable of type ANNdist should be large
// enough to store the sum of squared components of a variable
// of type ANNcoord for the number of dimensions needed in the
// application. For example, the following combinations are
// legal:
//
// ANNcoord ANNdist
// --------- -------------------------------
// short short, int, long, float, double
// int int, long, float, double
// long long, float, double
// float float, double
// double double
//
// It is the user's responsibility to make sure that overflow does
// not occur in distance calculation.
//----------------------------------------------------------------------
typedef double ANNcoord; // coordinate data type
typedef double ANNdist; // distance data type
//----------------------------------------------------------------------
// ANNidx
// ANNidx is a point index. When the data structure is built, the
// points are given as an array. Nearest neighbor results are
// returned as an integer index into this array. To make it
// clearer when this is happening, we define the integer type
// ANNidx. Indexing starts from 0.
//
// For fixed-radius near neighbor searching, it is possible that
// there are not k nearest neighbors within the search radius. To
// indicate this, the algorithm returns ANN_NULL_IDX as its result.
// It should be distinguishable from any valid array index.
//----------------------------------------------------------------------
typedef int ANNidx; // point index
const ANNidx ANN_NULL_IDX = -1; // a NULL point index
//----------------------------------------------------------------------
// Infinite distance:
// The code assumes that there is an "infinite distance" which it
// uses to initialize distances before performing nearest neighbor
// searches. It should be as larger or larger than any legitimate
// nearest neighbor distance.
//
// On most systems, these should be found in the standard include
// file <limits.h> or possibly <float.h>. If you do not have these
// file, some suggested values are listed below, assuming 64-bit
// long, 32-bit int and 16-bit short.
//
// ANNdist ANN_DIST_INF Values (see <limits.h> or <float.h>)
// ------- ------------ ------------------------------------
// double DBL_MAX 1.79769313486231570e+308
// float FLT_MAX 3.40282346638528860e+38
// long LONG_MAX 0x7fffffffffffffff
// int INT_MAX 0x7fffffff
// short SHRT_MAX 0x7fff
//----------------------------------------------------------------------
const ANNdist ANN_DIST_INF = ANN_DBL_MAX;
//----------------------------------------------------------------------
// Significant digits for tree dumps:
// When floating point coordinates are used, the routine that dumps
// a tree needs to know roughly how many significant digits there
// are in a ANNcoord, so it can output points to full precision.
// This is defined to be ANNcoordPrec. On most systems these
// values can be found in the standard include files <limits.h> or
// <float.h>. For integer types, the value is essentially ignored.
//
// ANNcoord ANNcoordPrec Values (see <limits.h> or <float.h>)
// -------- ------------ ------------------------------------
// double DBL_DIG 15
// float FLT_DIG 6
// long doesn't matter 19
// int doesn't matter 10
// short doesn't matter 5
//----------------------------------------------------------------------
#ifdef DBL_DIG // number of sig. bits in ANNcoord
const int ANNcoordPrec = DBL_DIG;
#else
const int ANNcoordPrec = 15; // default precision
#endif
//----------------------------------------------------------------------
// Self match?
// In some applications, the nearest neighbor of a point is not
// allowed to be the point itself. This occurs, for example, when
// computing all nearest neighbors in a set. By setting the
// parameter ANN_ALLOW_SELF_MATCH to ANNfalse, the nearest neighbor
// is the closest point whose distance from the query point is
// strictly positive.
//----------------------------------------------------------------------
const ANNbool ANN_ALLOW_SELF_MATCH = ANNtrue;
//----------------------------------------------------------------------
// Norms and metrics:
// ANN supports any Minkowski norm for defining distance. In
// particular, for any p >= 1, the L_p Minkowski norm defines the
// length of a d-vector (v0, v1, ..., v(d-1)) to be
//
// (|v0|^p + |v1|^p + ... + |v(d-1)|^p)^(1/p),
//
// (where ^ denotes exponentiation, and |.| denotes absolute
// value). The distance between two points is defined to be the
// norm of the vector joining them. Some common distance metrics
// include
//
// Euclidean metric p = 2
// Manhattan metric p = 1
// Max metric p = infinity
//
// In the case of the max metric, the norm is computed by taking
// the maxima of the absolute values of the components. ANN is
// highly "coordinate-based" and does not support general distances
// functions (e.g. those obeying just the triangle inequality). It
// also does not support distance functions based on
// inner-products.
//
// For the purpose of computing nearest neighbors, it is not
// necessary to compute the final power (1/p). Thus the only
// component that is used by the program is |v(i)|^p.
//
// ANN parameterizes the distance computation through the following
// macros. (Macros are used rather than procedures for
// efficiency.) Recall that the distance between two points is
// given by the length of the vector joining them, and the length
// or norm of a vector v is given by formula:
//
// |v| = ROOT(POW(v0) # POW(v1) # ... # POW(v(d-1)))
//
// where ROOT, POW are unary functions and # is an associative and
// commutative binary operator mapping the following types:
//
// ** POW: ANNcoord --> ANNdist
// ** #: ANNdist x ANNdist --> ANNdist
// ** ROOT: ANNdist (>0) --> double
//
// For early termination in distance calculation (partial distance
// calculation) we assume that POW and # together are monotonically
// increasing on sequences of arguments, meaning that for all
// v0..vk and y:
//
// POW(v0) #...# POW(vk) <= (POW(v0) #...# POW(vk)) # POW(y).
//
// Incremental Distance Calculation:
// The program uses an optimized method of computing distances for
// kd-trees and bd-trees, called incremental distance calculation.
// It is used when distances are to be updated when only a single
// coordinate of a point has been changed. In order to use this,
// we assume that there is an incremental update function DIFF(x,y)
// for #, such that if:
//
// s = x0 # ... # xi # ... # xk
//
// then if s' is equal to s but with xi replaced by y, that is,
//
// s' = x0 # ... # y # ... # xk
//
// then the length of s' can be computed by:
//
// |s'| = |s| # DIFF(xi,y).
//
// Thus, if # is + then DIFF(xi,y) is (yi-x). For the L_infinity
// norm we make use of the fact that in the program this function
// is only invoked when y > xi, and hence DIFF(xi,y)=y.
//
// Finally, for approximate nearest neighbor queries we assume
// that POW and ROOT are related such that
//
// v*ROOT(x) = ROOT(POW(v)*x)
//
// Here are the values for the various Minkowski norms:
//
// L_p: p even: p odd:
// ------------------------- ------------------------
// POW(v) = v^p POW(v) = |v|^p
// ROOT(x) = x^(1/p) ROOT(x) = x^(1/p)
// # = + # = +
// DIFF(x,y) = y - x DIFF(x,y) = y - x
//
// L_inf:
// POW(v) = |v|
// ROOT(x) = x
// # = max
// DIFF(x,y) = y
//
// By default the Euclidean norm is assumed. To change the norm,
// uncomment the appropriate set of macros below.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// Use the following for the Euclidean norm
//----------------------------------------------------------------------
#define ANN_POW(v) ((v)*(v))
#define ANN_ROOT(x) sqrt(x)
#define ANN_SUM(x,y) ((x) + (y))
#define ANN_DIFF(x,y) ((y) - (x))
//----------------------------------------------------------------------
// Use the following for the L_1 (Manhattan) norm
//----------------------------------------------------------------------
// #define ANN_POW(v) fabs(v)
// #define ANN_ROOT(x) (x)
// #define ANN_SUM(x,y) ((x) + (y))
// #define ANN_DIFF(x,y) ((y) - (x))
//----------------------------------------------------------------------
// Use the following for a general L_p norm
//----------------------------------------------------------------------
// #define ANN_POW(v) pow(fabs(v),p)
// #define ANN_ROOT(x) pow(fabs(x),1/p)
// #define ANN_SUM(x,y) ((x) + (y))
// #define ANN_DIFF(x,y) ((y) - (x))
//----------------------------------------------------------------------
// Use the following for the L_infinity (Max) norm
//----------------------------------------------------------------------
// #define ANN_POW(v) fabs(v)
// #define ANN_ROOT(x) (x)
// #define ANN_SUM(x,y) ((x) > (y) ? (x) : (y))
// #define ANN_DIFF(x,y) (y)
//----------------------------------------------------------------------
// Array types
// The following array types are of basic interest. A point is
// just a dimensionless array of coordinates, a point array is a
// dimensionless array of points. A distance array is a
// dimensionless array of distances and an index array is a
// dimensionless array of point indices. The latter two are used
// when returning the results of k-nearest neighbor queries.
//----------------------------------------------------------------------
typedef ANNcoord* ANNpoint; // a point
typedef ANNpoint* ANNpointArray; // an array of points
typedef ANNdist* ANNdistArray; // an array of distances
typedef ANNidx* ANNidxArray; // an array of point indices
//----------------------------------------------------------------------
// Basic point and array utilities:
// The following procedures are useful supplements to ANN's nearest
// neighbor capabilities.
//
// annDist():
// Computes the (squared) distance between a pair of points.
// Note that this routine is not used internally by ANN for
// computing distance calculations. For reasons of efficiency
// this is done using incremental distance calculation. Thus,
// this routine cannot be modified as a method of changing the
// metric.
//
// Because points (somewhat like strings in C) are stored as
// pointers. Consequently, creating and destroying copies of
// points may require storage allocation. These procedures do
// this.
//
// annAllocPt() and annDeallocPt():
// Allocate a deallocate storage for a single point, and
// return a pointer to it. The argument to AllocPt() is
// used to initialize all components.
//
// annAllocPts() and annDeallocPts():
// Allocate and deallocate an array of points as well a
// place to store their coordinates, and initializes the
// points to point to their respective coordinates. It
// allocates point storage in a contiguous block large
// enough to store all the points. It performs no
// initialization.
//
// annCopyPt():
// Creates a copy of a given point, allocating space for
// the new point. It returns a pointer to the newly
// allocated copy.
//----------------------------------------------------------------------
DLL_API ANNdist annDist(
int dim, // dimension of space
ANNpoint p, // points
ANNpoint q);
DLL_API ANNpoint annAllocPt(
int dim, // dimension
ANNcoord c = 0); // coordinate value (all equal)
DLL_API ANNpointArray annAllocPts(
int n, // number of points
int dim); // dimension
DLL_API void annDeallocPt(
ANNpoint &p); // deallocate 1 point
DLL_API void annDeallocPts(
ANNpointArray &pa); // point array
DLL_API ANNpoint annCopyPt(
int dim, // dimension
ANNpoint source); // point to copy
//----------------------------------------------------------------------
//Overall structure: ANN supports a number of different data structures
//for approximate and exact nearest neighbor searching. These are:
//
// ANNbruteForce A simple brute-force search structure.
// ANNkd_tree A kd-tree tree search structure. ANNbd_tree
// A bd-tree tree search structure (a kd-tree with shrink
// capabilities).
//
// At a minimum, each of these data structures support k-nearest
// neighbor queries. The nearest neighbor query, annkSearch,
// returns an integer identifier and the distance to the nearest
// neighbor(s) and annRangeSearch returns the nearest points that
// lie within a given query ball.
//
// Each structure is built by invoking the appropriate constructor
// and passing it (at a minimum) the array of points, the total
// number of points and the dimension of the space. Each structure
// is also assumed to support a destructor and member functions
// that return basic information about the point set.
//
// Note that the array of points is not copied by the data
// structure (for reasons of space efficiency), and it is assumed
// to be constant throughout the lifetime of the search structure.
//
// The search algorithm, annkSearch, is given the query point (q),
// and the desired number of nearest neighbors to report (k), and
// the error bound (eps) (whose default value is 0, implying exact
// nearest neighbors). It returns two arrays which are assumed to
// contain at least k elements: one (nn_idx) contains the indices
// (within the point array) of the nearest neighbors and the other
// (dd) contains the squared distances to these nearest neighbors.
//
// The search algorithm, annkFRSearch, is a fixed-radius kNN
// search. In addition to a query point, it is given a (squared)
// radius bound. (This is done for consistency, because the search
// returns distances as squared quantities.) It does two things.
// First, it computes the k nearest neighbors within the radius
// bound, and second, it returns the total number of points lying
// within the radius bound. It is permitted to set k = 0, in which
// case it effectively answers a range counting query. If the
// error bound epsilon is positive, then the search is approximate
// in the sense that it is free to ignore any point that lies
// outside a ball of radius r/(1+epsilon), where r is the given
// (unsquared) radius bound.
//
// The generic object from which all the search structures are
// dervied is given below. It is a virtual object, and is useless
// by itself.
//----------------------------------------------------------------------
class DLL_API ANNpointSet {
public:
virtual ~ANNpointSet() {} // virtual distructor
virtual void annkSearch( // approx k near neighbor search
ANNpoint q, // query point
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor array (modified)
ANNdistArray dd, // dist to near neighbors (modified)
double eps=0.0 // error bound
) = 0; // pure virtual (defined elsewhere)
virtual int annkFRSearch( // approx fixed-radius kNN search
ANNpoint q, // query point
ANNdist sqRad, // squared radius
int k = 0, // number of near neighbors to return
ANNidxArray nn_idx = NULL, // nearest neighbor array (modified)
ANNdistArray dd = NULL, // dist to near neighbors (modified)
double eps=0.0 // error bound
) = 0; // pure virtual (defined elsewhere)
virtual int theDim() = 0; // return dimension of space
virtual int nPoints() = 0; // return number of points
// return pointer to points
virtual ANNpointArray thePoints() = 0;
};
//----------------------------------------------------------------------
// Brute-force nearest neighbor search:
// The brute-force search structure is very simple but inefficient.
// It has been provided primarily for the sake of comparison with
// and validation of the more complex search structures.
//
// Query processing is the same as described above, but the value
// of epsilon is ignored, since all distance calculations are
// performed exactly.
//
// WARNING: This data structure is very slow, and should not be
// used unless the number of points is very small.
//
// Internal information:
// ---------------------
// This data structure bascially consists of the array of points
// (each a pointer to an array of coordinates). The search is
// performed by a simple linear scan of all the points.
//----------------------------------------------------------------------
class DLL_API ANNbruteForce: public ANNpointSet {
int dim; // dimension
int n_pts; // number of points
ANNpointArray pts; // point array
public:
ANNbruteForce( // constructor from point array
ANNpointArray pa, // point array
int n, // number of points
int dd); // dimension
~ANNbruteForce(); // destructor
void annkSearch( // approx k near neighbor search
ANNpoint q, // query point
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor array (modified)
ANNdistArray dd, // dist to near neighbors (modified)
double eps=0.0); // error bound
int annkFRSearch( // approx fixed-radius kNN search
ANNpoint q, // query point
ANNdist sqRad, // squared radius
int k = 0, // number of near neighbors to return
ANNidxArray nn_idx = NULL, // nearest neighbor array (modified)
ANNdistArray dd = NULL, // dist to near neighbors (modified)
double eps=0.0); // error bound
int theDim() // return dimension of space
{ return dim; }
int nPoints() // return number of points
{ return n_pts; }
ANNpointArray thePoints() // return pointer to points
{ return pts; }
};
//----------------------------------------------------------------------
// kd- and bd-tree splitting and shrinking rules
// kd-trees supports a collection of different splitting rules.
// In addition to the standard kd-tree splitting rule proposed
// by Friedman, Bentley, and Finkel, we have introduced a
// number of other splitting rules, which seem to perform
// as well or better (for the distributions we have tested).
//
// The splitting methods given below allow the user to tailor
// the data structure to the particular data set. They are
// are described in greater details in the kd_split.cc source
// file. The method ANN_KD_SUGGEST is the method chosen (rather
// subjectively) by the implementors as the one giving the
// fastest performance, and is the default splitting method.
//
// As with splitting rules, there are a number of different
// shrinking rules. The shrinking rule ANN_BD_NONE does no
// shrinking (and hence produces a kd-tree tree). The rule
// ANN_BD_SUGGEST uses the implementors favorite rule.
//----------------------------------------------------------------------
enum ANNsplitRule {
ANN_KD_STD = 0, // the optimized kd-splitting rule
ANN_KD_MIDPT = 1, // midpoint split
ANN_KD_FAIR = 2, // fair split
ANN_KD_SL_MIDPT = 3, // sliding midpoint splitting method
ANN_KD_SL_FAIR = 4, // sliding fair split method
ANN_KD_SUGGEST = 5}; // the authors' suggestion for best
const int ANN_N_SPLIT_RULES = 6; // number of split rules
enum ANNshrinkRule {
ANN_BD_NONE = 0, // no shrinking at all (just kd-tree)
ANN_BD_SIMPLE = 1, // simple splitting
ANN_BD_CENTROID = 2, // centroid splitting
ANN_BD_SUGGEST = 3}; // the authors' suggested choice
const int ANN_N_SHRINK_RULES = 4; // number of shrink rules
//----------------------------------------------------------------------
// kd-tree:
// The main search data structure supported by ANN is a kd-tree.
// The main constructor is given a set of points and a choice of
// splitting method to use in building the tree.
//
// Construction:
// -------------
// The constructor is given the point array, number of points,
// dimension, bucket size (default = 1), and the splitting rule
// (default = ANN_KD_SUGGEST). The point array is not copied, and
// is assumed to be kept constant throughout the lifetime of the
// search structure. There is also a "load" constructor that
// builds a tree from a file description that was created by the
// Dump operation.
//
// Search:
// -------
// There are two search methods:
//
// Standard search (annkSearch()):
// Searches nodes in tree-traversal order, always visiting
// the closer child first.
// Priority search (annkPriSearch()):
// Searches nodes in order of increasing distance of the
// associated cell from the query point. For many
// distributions the standard search seems to work just
// fine, but priority search is safer for worst-case
// performance.
//
// Printing:
// ---------
// There are two methods provided for printing the tree. Print()
// is used to produce a "human-readable" display of the tree, with
// indenation, which is handy for debugging. Dump() produces a
// format that is suitable reading by another program. There is a
// "load" constructor, which constructs a tree which is assumed to
// have been saved by the Dump() procedure.
//
// Performance and Structure Statistics:
// -------------------------------------
// The procedure getStats() collects statistics information on the
// tree (its size, height, etc.) See ANNperf.h for information on
// the stats structure it returns.
//
// Internal information:
// ---------------------
// The data structure consists of three major chunks of storage.
// The first (implicit) storage are the points themselves (pts),
// which have been provided by the users as an argument to the
// constructor, or are allocated dynamically if the tree is built
// using the load constructor). These should not be changed during
// the lifetime of the search structure. It is the user's
// responsibility to delete these after the tree is destroyed.
//
// The second is the tree itself (which is dynamically allocated in
// the constructor) and is given as a pointer to its root node
// (root). These nodes are automatically deallocated when the tree
// is deleted. See the file src/kd_tree.h for further information
// on the structure of the tree nodes.
//
// Each leaf of the tree does not contain a pointer directly to a
// point, but rather contains a pointer to a "bucket", which is an
// array consisting of point indices. The third major chunk of
// storage is an array (pidx), which is a large array in which all
// these bucket subarrays reside. (The reason for storing them
// separately is the buckets are typically small, but of varying
// sizes. This was done to avoid fragmentation.) This array is
// also deallocated when the tree is deleted.
//
// In addition to this, the tree consists of a number of other
// pieces of information which are used in searching and for
// subsequent tree operations. These consist of the following:
//
// dim Dimension of space
// n_pts Number of points currently in the tree
// n_max Maximum number of points that are allowed
// in the tree
// bkt_size Maximum bucket size (no. of points per leaf)
// bnd_box_lo Bounding box low point
// bnd_box_hi Bounding box high point
// splitRule Splitting method used
//
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// Some types and objects used by kd-tree functions
// See src/kd_tree.h and src/kd_tree.cpp for definitions
//----------------------------------------------------------------------
class ANNkdStats; // stats on kd-tree
class ANNkd_node; // generic node in a kd-tree
typedef ANNkd_node *ANNkd_ptr; // pointer to a kd-tree node
class DLL_API ANNkd_tree: public ANNpointSet {
protected:
int dim; // dimension of space
int n_pts; // number of points in tree
int bkt_size; // bucket size
ANNpointArray pts; // the points
ANNidxArray pidx; // point indices (to pts array)
ANNkd_ptr root; // root of kd-tree
ANNpoint bnd_box_lo; // bounding box low point
ANNpoint bnd_box_hi; // bounding box high point
void SkeletonTree( // construct skeleton tree
int n, // number of points
int dd, // dimension
int bs, // bucket size
ANNpointArray pa = NULL, // point array (optional)
ANNidxArray pi = NULL); // point indices (optional)
public:
ANNkd_tree( // build skeleton tree
int n = 0, // number of points
int dd = 0, // dimension
int bs = 1); // bucket size
ANNkd_tree( // build from point array
ANNpointArray pa, // point array
int n, // number of points
int dd, // dimension
int bs = 1, // bucket size
ANNsplitRule split = ANN_KD_SUGGEST); // splitting method
ANNkd_tree( // build from dump file
std::istream& in); // input stream for dump file
~ANNkd_tree(); // tree destructor
void annkSearch( // approx k near neighbor search
ANNpoint q, // query point
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor array (modified)
ANNdistArray dd, // dist to near neighbors (modified)
double eps=0.0); // error bound
void annkPriSearch( // priority k near neighbor search
ANNpoint q, // query point
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor array (modified)
ANNdistArray dd, // dist to near neighbors (modified)
double eps=0.0); // error bound
int annkFRSearch( // approx fixed-radius kNN search
ANNpoint q, // the query point
ANNdist sqRad, // squared radius of query ball
int k, // number of neighbors to return
ANNidxArray nn_idx = NULL, // nearest neighbor array (modified)
ANNdistArray dd = NULL, // dist to near neighbors (modified)
double eps=0.0); // error bound
int theDim() // return dimension of space
{ return dim; }
int nPoints() // return number of points
{ return n_pts; }
ANNpointArray thePoints() // return pointer to points
{ return pts; }
virtual void Print( // print the tree (for debugging)
ANNbool with_pts, // print points as well?
std::ostream& out); // output stream
virtual void Dump( // dump entire tree
ANNbool with_pts, // print points as well?
std::ostream& out); // output stream
virtual void getStats( // compute tree statistics
ANNkdStats& st); // the statistics (modified)
};
//----------------------------------------------------------------------
// Box decomposition tree (bd-tree)
// The bd-tree is inherited from a kd-tree. The main difference
// in the bd-tree and the kd-tree is a new type of internal node
// called a shrinking node (in the kd-tree there is only one type
// of internal node, a splitting node). The shrinking node
// makes it possible to generate balanced trees in which the
// cells have bounded aspect ratio, by allowing the decomposition
// to zoom in on regions of dense point concentration. Although
// this is a nice idea in theory, few point distributions are so
// densely clustered that this is really needed.
//----------------------------------------------------------------------
class DLL_API ANNbd_tree: public ANNkd_tree {
public:
ANNbd_tree( // build skeleton tree
int n, // number of points
int dd, // dimension
int bs = 1) // bucket size
: ANNkd_tree(n, dd, bs) {} // build base kd-tree
ANNbd_tree( // build from point array
ANNpointArray pa, // point array
int n, // number of points
int dd, // dimension
int bs = 1, // bucket size
ANNsplitRule split = ANN_KD_SUGGEST, // splitting rule
ANNshrinkRule shrink = ANN_BD_SUGGEST); // shrinking rule
ANNbd_tree( // build from dump file
std::istream& in); // input stream for dump file
};
//----------------------------------------------------------------------
// Other functions
// annMaxPtsVisit Sets a limit on the maximum number of points
// to visit in the search.
// annClose Can be called when all use of ANN is finished.
// It clears up a minor memory leak.
//----------------------------------------------------------------------
DLL_API void annMaxPtsVisit( // max. pts to visit in search
int maxPts); // the limit
DLL_API void annClose(); // called to end use of ANN
#endif

View File

@ -0,0 +1,223 @@
//----------------------------------------------------------------------
// File: ANNperf.h
// Programmer: Sunil Arya and David Mount
// Last modified: 03/04/98 (Release 0.1)
// Description: Include file for ANN performance stats
//
// Some of the code for statistics gathering has been adapted
// from the SmplStat.h package in the g++ library.
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Added ANN_ prefix to avoid name conflicts.
//----------------------------------------------------------------------
#ifndef ANNperf_H
#define ANNperf_H
//----------------------------------------------------------------------
// basic includes
//----------------------------------------------------------------------
#include <ann/ANN.h> // basic ANN includes
//----------------------------------------------------------------------
// kd-tree stats object
// This object is used for collecting information about a kd-tree
// or bd-tree.
//----------------------------------------------------------------------
class ANNkdStats { // stats on kd-tree
public:
int dim; // dimension of space
int n_pts; // no. of points
int bkt_size; // bucket size
int n_lf; // no. of leaves (including trivial)
int n_tl; // no. of trivial leaves (no points)
int n_spl; // no. of splitting nodes
int n_shr; // no. of shrinking nodes (for bd-trees)
int depth; // depth of tree
float sum_ar; // sum of leaf aspect ratios
float avg_ar; // average leaf aspect ratio
//
// reset stats
void reset(int d=0, int n=0, int bs=0)
{
dim = d; n_pts = n; bkt_size = bs;
n_lf = n_tl = n_spl = n_shr = depth = 0;
sum_ar = avg_ar = 0.0;
}
ANNkdStats() // basic constructor
{ reset(); }
void merge(const ANNkdStats &st); // merge stats from child
};
//----------------------------------------------------------------------
// ANNsampStat
// A sample stat collects numeric (double) samples and returns some
// simple statistics. Its main functions are:
//
// reset() Reset to no samples.
// += x Include sample x.
// samples() Return number of samples.
// mean() Return mean of samples.
// stdDev() Return standard deviation
// min() Return minimum of samples.
// max() Return maximum of samples.
//----------------------------------------------------------------------
class DLL_API ANNsampStat {
int n; // number of samples
double sum; // sum
double sum2; // sum of squares
double minVal, maxVal; // min and max
public :
void reset() // reset everything
{
n = 0;
sum = sum2 = 0;
minVal = ANN_DBL_MAX;
maxVal = -ANN_DBL_MAX;
}
ANNsampStat() { reset(); } // constructor
void operator+=(double x) // add sample
{
n++; sum += x; sum2 += x*x;
if (x < minVal) minVal = x;
if (x > maxVal) maxVal = x;
}
int samples() { return n; } // number of samples
double mean() { return sum/n; } // mean
// standard deviation
double stdDev() { return sqrt((sum2 - (sum*sum)/n)/(n-1));}
double min() { return minVal; } // minimum
double max() { return maxVal; } // maximum
};
//----------------------------------------------------------------------
// Operation count updates
//----------------------------------------------------------------------
#ifdef ANN_PERF
#define ANN_FLOP(n) {ann_Nfloat_ops += (n);}
#define ANN_LEAF(n) {ann_Nvisit_lfs += (n);}
#define ANN_SPL(n) {ann_Nvisit_spl += (n);}
#define ANN_SHR(n) {ann_Nvisit_shr += (n);}
#define ANN_PTS(n) {ann_Nvisit_pts += (n);}
#define ANN_COORD(n) {ann_Ncoord_hts += (n);}
#else
#define ANN_FLOP(n)
#define ANN_LEAF(n)
#define ANN_SPL(n)
#define ANN_SHR(n)
#define ANN_PTS(n)
#define ANN_COORD(n)
#endif
//----------------------------------------------------------------------
// Performance statistics
// The following data and routines are used for computing performance
// statistics for nearest neighbor searching. Because these routines
// can slow the code down, they can be activated and deactiviated by
// defining the ANN_PERF variable, by compiling with the option:
// -DANN_PERF
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// Global counters for performance measurement
//
// visit_lfs The number of leaf nodes visited in the
// tree.
//
// visit_spl The number of splitting nodes visited in the
// tree.
//
// visit_shr The number of shrinking nodes visited in the
// tree.
//
// visit_pts The number of points visited in all the
// leaf nodes visited. Equivalently, this
// is the number of points for which distance
// calculations are performed.
//
// coord_hts The number of times a coordinate of a
// data point is accessed. This is generally
// less than visit_pts*d if partial distance
// calculation is used. This count is low
// in the sense that if a coordinate is hit
// many times in the same routine we may
// count it only once.
//
// float_ops The number of floating point operations.
// This includes all operations in the heap
// as well as distance calculations to boxes.
//
// average_err The average error of each query (the
// error of the reported point to the true
// nearest neighbor). For k nearest neighbors
// the error is computed k times.
//
// rank_err The rank error of each query (the difference
// in the rank of the reported point and its
// true rank).
//
// data_pts The number of data points. This is not
// a counter, but used in stats computation.
//----------------------------------------------------------------------
extern int ann_Ndata_pts; // number of data points
extern int ann_Nvisit_lfs; // number of leaf nodes visited
extern int ann_Nvisit_spl; // number of splitting nodes visited
extern int ann_Nvisit_shr; // number of shrinking nodes visited
extern int ann_Nvisit_pts; // visited points for one query
extern int ann_Ncoord_hts; // coordinate hits for one query
extern int ann_Nfloat_ops; // floating ops for one query
extern ANNsampStat ann_visit_lfs; // stats on leaf nodes visits
extern ANNsampStat ann_visit_spl; // stats on splitting nodes visits
extern ANNsampStat ann_visit_shr; // stats on shrinking nodes visits
extern ANNsampStat ann_visit_nds; // stats on total nodes visits
extern ANNsampStat ann_visit_pts; // stats on points visited
extern ANNsampStat ann_coord_hts; // stats on coordinate hits
extern ANNsampStat ann_float_ops; // stats on floating ops
//----------------------------------------------------------------------
// The following need to be part of the public interface, because
// they are accessed outside the DLL in ann_test.cpp.
//----------------------------------------------------------------------
DLL_API extern ANNsampStat ann_average_err; // average error
DLL_API extern ANNsampStat ann_rank_err; // rank error
//----------------------------------------------------------------------
// Declaration of externally accessible routines for statistics
//----------------------------------------------------------------------
DLL_API void annResetStats(int data_size); // reset stats for a set of queries
DLL_API void annResetCounts(); // reset counts for one queries
DLL_API void annUpdateStats(); // update stats with current counts
DLL_API void annPrintStats(ANNbool validate); // print statistics for a run
#endif

View File

@ -0,0 +1,169 @@
//----------------------------------------------------------------------
// File: ANNx.h
// Programmer: Sunil Arya and David Mount
// Description: Internal include file for ANN
// Last modified: 01/27/10 (Version 1.1.2)
//
// These declarations are of use in manipulating some of
// the internal data objects appearing in ANN, but are not
// needed for applications just using the nearest neighbor
// search.
//
// Typical users of ANN should not need to access this file.
//----------------------------------------------------------------------
// Copyright (c) 1997-2015 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Changed LO, HI, IN, OUT to ANN_LO, ANN_HI, etc.
// Revision 1.1.2 01/27/10
// Fixed minor compilation bugs for new versions of gcc
//----------------------------------------------------------------------
#ifndef ANNx_H
#define ANNx_H
#include <iomanip> // I/O manipulators
#include <ann/ANN.h> // ANN includes
//----------------------------------------------------------------------
// Global constants and types
//----------------------------------------------------------------------
enum {ANN_LO=0, ANN_HI=1}; // splitting indices
enum {ANN_IN=0, ANN_OUT=1}; // shrinking indices
// what to do in case of error
enum ANNerr {ANNwarn = 0, ANNabort = 1};
//----------------------------------------------------------------------
// Maximum number of points to visit
// We have an option for terminating the search early if the
// number of points visited exceeds some threshold. If the
// threshold is 0 (its default) this means there is no limit
// and the algorithm applies its normal termination condition.
//----------------------------------------------------------------------
extern int ANNmaxPtsVisited; // maximum number of pts visited
extern int ANNptsVisited; // number of pts visited in search
//----------------------------------------------------------------------
// Global function declarations
//----------------------------------------------------------------------
void annError( // ANN error routine
const char* msg, // error message
ANNerr level); // level of error
void annPrintPt( // print a point
ANNpoint pt, // the point
int dim, // the dimension
std::ostream &out); // output stream
//----------------------------------------------------------------------
// Orthogonal (axis aligned) rectangle
// Orthogonal rectangles are represented by two points, one
// for the lower left corner (min coordinates) and the other
// for the upper right corner (max coordinates).
//
// The constructor initializes from either a pair of coordinates,
// pair of points, or another rectangle. Note that all constructors
// allocate new point storage. The destructor deallocates this
// storage.
//
// BEWARE: Orthogonal rectangles should be passed ONLY BY REFERENCE.
// (C++'s default copy constructor will not allocate new point
// storage, then on return the destructor free's storage, and then
// you get into big trouble in the calling procedure.)
//----------------------------------------------------------------------
class ANNorthRect {
public:
ANNpoint lo; // rectangle lower bounds
ANNpoint hi; // rectangle upper bounds
//
ANNorthRect( // basic constructor
int dd, // dimension of space
ANNcoord l=0, // default is empty
ANNcoord h=0)
{ lo = annAllocPt(dd, l); hi = annAllocPt(dd, h); }
ANNorthRect( // (almost a) copy constructor
int dd, // dimension
const ANNorthRect &r) // rectangle to copy
{ lo = annCopyPt(dd, r.lo); hi = annCopyPt(dd, r.hi); }
ANNorthRect( // construct from points
int dd, // dimension
ANNpoint l, // low point
ANNpoint h) // hight point
{ lo = annCopyPt(dd, l); hi = annCopyPt(dd, h); }
~ANNorthRect() // destructor
{ annDeallocPt(lo); annDeallocPt(hi); }
ANNbool inside(int dim, ANNpoint p);// is point p inside rectangle?
};
void annAssignRect( // assign one rect to another
int dim, // dimension (both must be same)
ANNorthRect &dest, // destination (modified)
const ANNorthRect &source); // source
//----------------------------------------------------------------------
// Orthogonal (axis aligned) halfspace
// An orthogonal halfspace is represented by an integer cutting
// dimension cd, coordinate cutting value, cv, and side, sd, which is
// either +1 or -1. Our convention is that point q lies in the (closed)
// halfspace if (q[cd] - cv)*sd >= 0.
//----------------------------------------------------------------------
class ANNorthHalfSpace {
public:
int cd; // cutting dimension
ANNcoord cv; // cutting value
int sd; // which side
//
ANNorthHalfSpace() // default constructor
{ cd = 0; cv = 0; sd = 0; }
ANNorthHalfSpace( // basic constructor
int cdd, // dimension of space
ANNcoord cvv, // cutting value
int sdd) // side
{ cd = cdd; cv = cvv; sd = sdd; }
ANNbool in(ANNpoint q) const // is q inside halfspace?
{ return (ANNbool) ((q[cd] - cv)*sd >= 0); }
ANNbool out(ANNpoint q) const // is q outside halfspace?
{ return (ANNbool) ((q[cd] - cv)*sd < 0); }
ANNdist dist(ANNpoint q) const // (squared) distance from q
{ return (ANNdist) ANN_POW(q[cd] - cv); }
void setLowerBound(int d, ANNpoint p)// set to lower bound at p[i]
{ cd = d; cv = p[d]; sd = +1; }
void setUpperBound(int d, ANNpoint p)// set to upper bound at p[i]
{ cd = d; cv = p[d]; sd = -1; }
void project(ANNpoint &q) // project q (modified) onto halfspace
{ if (out(q)) q[cd] = cv; }
};
// array of halfspaces
typedef ANNorthHalfSpace *ANNorthHSArray;
#endif

View File

@ -0,0 +1,165 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View File

@ -0,0 +1,201 @@
//----------------------------------------------------------------------
// File: ANN.cpp
// Programmer: Sunil Arya and David Mount
// Description: Methods for ANN.h and ANNx.h
// Last modified: 01/27/10 (Version 1.1.2)
//----------------------------------------------------------------------
// Copyright (c) 1997-2010 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Added performance counting to annDist()
// Revision 1.1.2 01/27/10
// Fixed minor compilation bugs for new versions of gcc
//----------------------------------------------------------------------
#include <cstdlib> // C standard lib defs
#include <ann/ANNx.h> // all ANN includes
#include <ann/ANNperf.h> // ANN performance
using namespace std; // make std:: accessible
//----------------------------------------------------------------------
// Point methods
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// Distance utility.
// (Note: In the nearest neighbor search, most distances are
// computed using partial distance calculations, not this
// procedure.)
//----------------------------------------------------------------------
ANNdist annDist( // interpoint squared distance
int dim,
ANNpoint p,
ANNpoint q)
{
register int d;
register ANNcoord diff;
register ANNcoord dist;
dist = 0;
for (d = 0; d < dim; d++) {
diff = p[d] - q[d];
dist = ANN_SUM(dist, ANN_POW(diff));
}
ANN_FLOP(3*dim) // performance counts
ANN_PTS(1)
ANN_COORD(dim)
return dist;
}
//----------------------------------------------------------------------
// annPrintPoint() prints a point to a given output stream.
//----------------------------------------------------------------------
void annPrintPt( // print a point
ANNpoint pt, // the point
int dim, // the dimension
std::ostream &out) // output stream
{
for (int j = 0; j < dim; j++) {
out << pt[j];
if (j < dim-1) out << " ";
}
}
//----------------------------------------------------------------------
// Point allocation/deallocation:
//
// Because points (somewhat like strings in C) are stored
// as pointers. Consequently, creating and destroying
// copies of points may require storage allocation. These
// procedures do this.
//
// annAllocPt() and annDeallocPt() allocate a deallocate
// storage for a single point, and return a pointer to it.
//
// annAllocPts() allocates an array of points as well a place
// to store their coordinates, and initializes the points to
// point to their respective coordinates. It allocates point
// storage in a contiguous block large enough to store all the
// points. It performs no initialization.
//
// annDeallocPts() should only be used on point arrays allocated
// by annAllocPts since it assumes that points are allocated in
// a block.
//
// annCopyPt() copies a point taking care to allocate storage
// for the new point.
//
// annAssignRect() assigns the coordinates of one rectangle to
// another. The two rectangles must have the same dimension
// (and it is not possible to test this here).
//----------------------------------------------------------------------
ANNpoint annAllocPt(int dim, ANNcoord c) // allocate 1 point
{
ANNpoint p = new ANNcoord[dim];
for (int i = 0; i < dim; i++) p[i] = c;
return p;
}
ANNpointArray annAllocPts(int n, int dim) // allocate n pts in dim
{
ANNpointArray pa = new ANNpoint[n]; // allocate points
ANNpoint p = new ANNcoord[n*dim]; // allocate space for coords
for (int i = 0; i < n; i++) {
pa[i] = &(p[i*dim]);
}
return pa;
}
void annDeallocPt(ANNpoint &p) // deallocate 1 point
{
delete [] p;
p = NULL;
}
void annDeallocPts(ANNpointArray &pa) // deallocate points
{
delete [] pa[0]; // dealloc coordinate storage
delete [] pa; // dealloc points
pa = NULL;
}
ANNpoint annCopyPt(int dim, ANNpoint source) // copy point
{
ANNpoint p = new ANNcoord[dim];
for (int i = 0; i < dim; i++) p[i] = source[i];
return p;
}
// assign one rect to another
void annAssignRect(int dim, ANNorthRect &dest, const ANNorthRect &source)
{
for (int i = 0; i < dim; i++) {
dest.lo[i] = source.lo[i];
dest.hi[i] = source.hi[i];
}
}
// is point inside rectangle?
ANNbool ANNorthRect::inside(int dim, ANNpoint p)
{
for (int i = 0; i < dim; i++) {
if (p[i] < lo[i] || p[i] > hi[i]) return ANNfalse;
}
return ANNtrue;
}
//----------------------------------------------------------------------
// Error handler
//----------------------------------------------------------------------
void annError(const char* msg, ANNerr level)
{
if (level == ANNabort) {
cerr << "ANN: ERROR------->" << msg << "<-------------ERROR\n";
exit(1);
}
else {
cerr << "ANN: WARNING----->" << msg << "<-------------WARNING\n";
}
}
//----------------------------------------------------------------------
// Limit on number of points visited
// We have an option for terminating the search early if the
// number of points visited exceeds some threshold. If the
// threshold is 0 (its default) this means there is no limit
// and the algorithm applies its normal termination condition.
// This is for applications where there are real time constraints
// on the running time of the algorithm.
//----------------------------------------------------------------------
int ANNmaxPtsVisited = 0; // maximum number of pts visited
int ANNptsVisited; // number of pts visited in search
//----------------------------------------------------------------------
// Global function declarations
//----------------------------------------------------------------------
void annMaxPtsVisit( // set limit on max. pts to visit in search
int maxPts) // the limit
{
ANNmaxPtsVisited = maxPts;
}

View File

@ -0,0 +1,61 @@
//----------------------------------------------------------------------
// File: bd_fix_rad_search.cpp
// Programmer: David Mount
// Description: Standard bd-tree search
// Last modified: 05/03/05 (Version 1.1)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 1.1 05/03/05
// Initial release
//----------------------------------------------------------------------
#include "bd_tree.h" // bd-tree declarations
#include "kd_fix_rad_search.h" // kd-tree FR search declarations
//----------------------------------------------------------------------
// Approximate searching for bd-trees.
// See the file kd_FR_search.cpp for general information on the
// approximate nearest neighbor search algorithm. Here we
// include the extensions for shrinking nodes.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// bd_shrink::ann_FR_search - search a shrinking node
//----------------------------------------------------------------------
void ANNbd_shrink::ann_FR_search(ANNdist box_dist)
{
// check dist calc term cond.
if (ANNmaxPtsVisited != 0 && ANNptsVisited > ANNmaxPtsVisited) return;
ANNdist inner_dist = 0; // distance to inner box
for (int i = 0; i < n_bnds; i++) { // is query point in the box?
if (bnds[i].out(ANNkdFRQ)) { // outside this bounding side?
// add to inner distance
inner_dist = (ANNdist) ANN_SUM(inner_dist, bnds[i].dist(ANNkdFRQ));
}
}
if (inner_dist <= box_dist) { // if inner box is closer
child[ANN_IN]->ann_FR_search(inner_dist);// search inner child first
child[ANN_OUT]->ann_FR_search(box_dist);// ...then outer child
}
else { // if outer box is closer
child[ANN_OUT]->ann_FR_search(box_dist);// search outer child first
child[ANN_IN]->ann_FR_search(inner_dist);// ...then outer child
}
ANN_FLOP(3*n_bnds) // increment floating ops
ANN_SHR(1) // one more shrinking node
}

View File

@ -0,0 +1,62 @@
//----------------------------------------------------------------------
// File: bd_pr_search.cpp
// Programmer: David Mount
// Description: Priority search for bd-trees
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
//History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#include "bd_tree.h" // bd-tree declarations
#include "kd_pr_search.h" // kd priority search declarations
//----------------------------------------------------------------------
// Approximate priority searching for bd-trees.
// See the file kd_pr_search.cc for general information on the
// approximate nearest neighbor priority search algorithm. Here
// we include the extensions for shrinking nodes.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// bd_shrink::ann_search - search a shrinking node
//----------------------------------------------------------------------
void ANNbd_shrink::ann_pri_search(ANNdist box_dist)
{
ANNdist inner_dist = 0; // distance to inner box
for (int i = 0; i < n_bnds; i++) { // is query point in the box?
if (bnds[i].out(ANNprQ)) { // outside this bounding side?
// add to inner distance
inner_dist = (ANNdist) ANN_SUM(inner_dist, bnds[i].dist(ANNprQ));
}
}
if (inner_dist <= box_dist) { // if inner box is closer
if (child[ANN_OUT] != KD_TRIVIAL) // enqueue outer if not trivial
ANNprBoxPQ->insert(box_dist,child[ANN_OUT]);
// continue with inner child
child[ANN_IN]->ann_pri_search(inner_dist);
}
else { // if outer box is closer
if (child[ANN_IN] != KD_TRIVIAL) // enqueue inner if not trivial
ANNprBoxPQ->insert(inner_dist,child[ANN_IN]);
// continue with outer child
child[ANN_OUT]->ann_pri_search(box_dist);
}
ANN_FLOP(3*n_bnds) // increment floating ops
ANN_SHR(1) // one more shrinking node
}

View File

@ -0,0 +1,61 @@
//----------------------------------------------------------------------
// File: bd_search.cpp
// Programmer: David Mount
// Description: Standard bd-tree search
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#include "bd_tree.h" // bd-tree declarations
#include "kd_search.h" // kd-tree search declarations
//----------------------------------------------------------------------
// Approximate searching for bd-trees.
// See the file kd_search.cpp for general information on the
// approximate nearest neighbor search algorithm. Here we
// include the extensions for shrinking nodes.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// bd_shrink::ann_search - search a shrinking node
//----------------------------------------------------------------------
void ANNbd_shrink::ann_search(ANNdist box_dist)
{
// check dist calc term cond.
if (ANNmaxPtsVisited != 0 && ANNptsVisited > ANNmaxPtsVisited) return;
ANNdist inner_dist = 0; // distance to inner box
for (int i = 0; i < n_bnds; i++) { // is query point in the box?
if (bnds[i].out(ANNkdQ)) { // outside this bounding side?
// add to inner distance
inner_dist = (ANNdist) ANN_SUM(inner_dist, bnds[i].dist(ANNkdQ));
}
}
if (inner_dist <= box_dist) { // if inner box is closer
child[ANN_IN]->ann_search(inner_dist); // search inner child first
child[ANN_OUT]->ann_search(box_dist); // ...then outer child
}
else { // if outer box is closer
child[ANN_OUT]->ann_search(box_dist); // search outer child first
child[ANN_IN]->ann_search(inner_dist); // ...then outer child
}
ANN_FLOP(3*n_bnds) // increment floating ops
ANN_SHR(1) // one more shrinking node
}

View File

@ -0,0 +1,417 @@
//----------------------------------------------------------------------
// File: bd_tree.cpp
// Programmer: David Mount
// Description: Basic methods for bd-trees.
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision l.0 04/01/05
// Fixed centroid shrink threshold condition to depend on the
// dimension.
// Moved dump routine to kd_dump.cpp.
//----------------------------------------------------------------------
#include "bd_tree.h" // bd-tree declarations
#include "kd_util.h" // kd-tree utilities
#include "kd_split.h" // kd-tree splitting rules
#include <ann/ANNperf.h> // performance evaluation
//----------------------------------------------------------------------
// Printing a bd-tree
// These routines print a bd-tree. See the analogous procedure
// in kd_tree.cpp for more information.
//----------------------------------------------------------------------
void ANNbd_shrink::print( // print shrinking node
int level, // depth of node in tree
ostream &out) // output stream
{
child[ANN_OUT]->print(level+1, out); // print out-child
out << " ";
for (int i = 0; i < level; i++) // print indentation
out << "..";
out << "Shrink";
for (int j = 0; j < n_bnds; j++) { // print sides, 2 per line
if (j % 2 == 0) {
out << "\n"; // newline and indentation
for (int i = 0; i < level+2; i++) out << " ";
}
out << " ([" << bnds[j].cd << "]"
<< (bnds[j].sd > 0 ? ">=" : "< ")
<< bnds[j].cv << ")";
}
out << "\n";
child[ANN_IN]->print(level+1, out); // print in-child
}
//----------------------------------------------------------------------
// kd_tree statistics utility (for performance evaluation)
// This routine computes various statistics information for
// shrinking nodes. See file kd_tree.cpp for more information.
//----------------------------------------------------------------------
void ANNbd_shrink::getStats( // get subtree statistics
int dim, // dimension of space
ANNkdStats &st, // stats (modified)
ANNorthRect &bnd_box) // bounding box
{
ANNkdStats ch_stats; // stats for children
ANNorthRect inner_box(dim); // inner box of shrink
annBnds2Box(bnd_box, // enclosing box
dim, // dimension
n_bnds, // number of bounds
bnds, // bounds array
inner_box); // inner box (modified)
// get stats for inner child
ch_stats.reset(); // reset
child[ANN_IN]->getStats(dim, ch_stats, inner_box);
st.merge(ch_stats); // merge them
// get stats for outer child
ch_stats.reset(); // reset
child[ANN_OUT]->getStats(dim, ch_stats, bnd_box);
st.merge(ch_stats); // merge them
st.depth++; // increment depth
st.n_shr++; // increment number of shrinks
}
//----------------------------------------------------------------------
// bd-tree constructor
// This is the main constructor for bd-trees given a set of points.
// It first builds a skeleton kd-tree as a basis, then computes the
// bounding box of the data points, and then invokes rbd_tree() to
// actually build the tree, passing it the appropriate splitting
// and shrinking information.
//----------------------------------------------------------------------
ANNkd_ptr rbd_tree( // recursive construction of bd-tree
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices to store in subtree
int n, // number of points
int dim, // dimension of space
int bsp, // bucket space
ANNorthRect &bnd_box, // bounding box for current node
ANNkd_splitter splitter, // splitting routine
ANNshrinkRule shrink); // shrinking rule
ANNbd_tree::ANNbd_tree( // construct from point array
ANNpointArray pa, // point array (with at least n pts)
int n, // number of points
int dd, // dimension
int bs, // bucket size
ANNsplitRule split, // splitting rule
ANNshrinkRule shrink) // shrinking rule
: ANNkd_tree(n, dd, bs) // build skeleton base tree
{
pts = pa; // where the points are
if (n == 0) return; // no points--no sweat
ANNorthRect bnd_box(dd); // bounding box for points
// construct bounding rectangle
annEnclRect(pa, pidx, n, dd, bnd_box);
// copy to tree structure
bnd_box_lo = annCopyPt(dd, bnd_box.lo);
bnd_box_hi = annCopyPt(dd, bnd_box.hi);
switch (split) { // build by rule
case ANN_KD_STD: // standard kd-splitting rule
root = rbd_tree(pa, pidx, n, dd, bs, bnd_box, kd_split, shrink);
break;
case ANN_KD_MIDPT: // midpoint split
root = rbd_tree(pa, pidx, n, dd, bs, bnd_box, midpt_split, shrink);
break;
case ANN_KD_SUGGEST: // best (in our opinion)
case ANN_KD_SL_MIDPT: // sliding midpoint split
root = rbd_tree(pa, pidx, n, dd, bs, bnd_box, sl_midpt_split, shrink);
break;
case ANN_KD_FAIR: // fair split
root = rbd_tree(pa, pidx, n, dd, bs, bnd_box, fair_split, shrink);
break;
case ANN_KD_SL_FAIR: // sliding fair split
root = rbd_tree(pa, pidx, n, dd, bs,
bnd_box, sl_fair_split, shrink);
break;
default:
annError("Illegal splitting method", ANNabort);
}
}
//----------------------------------------------------------------------
// Shrinking rules
//----------------------------------------------------------------------
enum ANNdecomp {SPLIT, SHRINK}; // decomposition methods
//----------------------------------------------------------------------
// trySimpleShrink - Attempt a simple shrink
//
// We compute the tight bounding box of the points, and compute
// the 2*dim ``gaps'' between the sides of the tight box and the
// bounding box. If any of the gaps is large enough relative to
// the longest side of the tight bounding box, then we shrink
// all sides whose gaps are large enough. (The reason for
// comparing against the tight bounding box, is that after
// shrinking the longest box size will decrease, and if we use
// the standard bounding box, we may decide to shrink twice in
// a row. Since the tight box is fixed, we cannot shrink twice
// consecutively.)
//----------------------------------------------------------------------
const float BD_GAP_THRESH = 0.5; // gap threshold (must be < 1)
const int BD_CT_THRESH = 2; // min number of shrink sides
ANNdecomp trySimpleShrink( // try a simple shrink
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices to store in subtree
int n, // number of points
int dim, // dimension of space
const ANNorthRect &bnd_box, // current bounding box
ANNorthRect &inner_box) // inner box if shrinking (returned)
{
int i;
// compute tight bounding box
annEnclRect(pa, pidx, n, dim, inner_box);
ANNcoord max_length = 0; // find longest box side
for (i = 0; i < dim; i++) {
ANNcoord length = inner_box.hi[i] - inner_box.lo[i];
if (length > max_length) {
max_length = length;
}
}
int shrink_ct = 0; // number of sides we shrunk
for (i = 0; i < dim; i++) { // select which sides to shrink
// gap between boxes
ANNcoord gap_hi = bnd_box.hi[i] - inner_box.hi[i];
// big enough gap to shrink?
if (gap_hi < max_length*BD_GAP_THRESH)
inner_box.hi[i] = bnd_box.hi[i]; // no - expand
else shrink_ct++; // yes - shrink this side
// repeat for high side
ANNcoord gap_lo = inner_box.lo[i] - bnd_box.lo[i];
if (gap_lo < max_length*BD_GAP_THRESH)
inner_box.lo[i] = bnd_box.lo[i]; // no - expand
else shrink_ct++; // yes - shrink this side
}
if (shrink_ct >= BD_CT_THRESH) // did we shrink enough sides?
return SHRINK;
else return SPLIT;
}
//----------------------------------------------------------------------
// tryCentroidShrink - Attempt a centroid shrink
//
// We repeatedly apply the splitting rule, always to the larger subset
// of points, until the number of points decreases by the constant
// fraction BD_FRACTION. If this takes more than dim*BD_MAX_SPLIT_FAC
// splits for this to happen, then we shrink to the final inner box
// Otherwise we split.
//----------------------------------------------------------------------
const float BD_MAX_SPLIT_FAC = 0.5; // maximum number of splits allowed
const float BD_FRACTION = 0.5; // ...to reduce points by this fraction
// ...This must be < 1.
ANNdecomp tryCentroidShrink( // try a centroid shrink
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices to store in subtree
int n, // number of points
int dim, // dimension of space
const ANNorthRect &bnd_box, // current bounding box
ANNkd_splitter splitter, // splitting procedure
ANNorthRect &inner_box) // inner box if shrinking (returned)
{
int n_sub = n; // number of points in subset
int n_goal = (int) (n*BD_FRACTION); // number of point in goal
int n_splits = 0; // number of splits needed
// initialize inner box to bounding box
annAssignRect(dim, inner_box, bnd_box);
while (n_sub > n_goal) { // keep splitting until goal reached
int cd; // cut dim from splitter (ignored)
ANNcoord cv; // cut value from splitter (ignored)
int n_lo; // number of points on low side
// invoke splitting procedure
(*splitter)(pa, pidx, inner_box, n_sub, dim, cd, cv, n_lo);
n_splits++; // increment split count
if (n_lo >= n_sub/2) { // most points on low side
inner_box.hi[cd] = cv; // collapse high side
n_sub = n_lo; // recurse on lower points
}
else { // most points on high side
inner_box.lo[cd] = cv; // collapse low side
pidx += n_lo; // recurse on higher points
n_sub -= n_lo;
}
}
if (n_splits > dim*BD_MAX_SPLIT_FAC)// took too many splits
return SHRINK; // shrink to final subset
else
return SPLIT;
}
//----------------------------------------------------------------------
// selectDecomp - select which decomposition to use
//----------------------------------------------------------------------
ANNdecomp selectDecomp( // select decomposition method
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices to store in subtree
int n, // number of points
int dim, // dimension of space
const ANNorthRect &bnd_box, // current bounding box
ANNkd_splitter splitter, // splitting procedure
ANNshrinkRule shrink, // shrinking rule
ANNorthRect &inner_box) // inner box if shrinking (returned)
{
ANNdecomp decomp = SPLIT; // decomposition
switch (shrink) { // check shrinking rule
case ANN_BD_NONE: // no shrinking allowed
decomp = SPLIT;
break;
case ANN_BD_SUGGEST: // author's suggestion
case ANN_BD_SIMPLE: // simple shrink
decomp = trySimpleShrink(
pa, pidx, // points and indices
n, dim, // number of points and dimension
bnd_box, // current bounding box
inner_box); // inner box if shrinking (returned)
break;
case ANN_BD_CENTROID: // centroid shrink
decomp = tryCentroidShrink(
pa, pidx, // points and indices
n, dim, // number of points and dimension
bnd_box, // current bounding box
splitter, // splitting procedure
inner_box); // inner box if shrinking (returned)
break;
default:
annError("Illegal shrinking rule", ANNabort);
}
return decomp;
}
//----------------------------------------------------------------------
// rbd_tree - recursive procedure to build a bd-tree
//
// This is analogous to rkd_tree, but for bd-trees. See the
// procedure rkd_tree() in kd_split.cpp for more information.
//
// If the number of points falls below the bucket size, then a
// leaf node is created for the points. Otherwise we invoke the
// procedure selectDecomp() which determines whether we are to
// split or shrink. If splitting is chosen, then we essentially
// do exactly as rkd_tree() would, and invoke the specified
// splitting procedure to the points. Otherwise, the selection
// procedure returns a bounding box, from which we extract the
// appropriate shrinking bounds, and create a shrinking node.
// Finally the points are subdivided, and the procedure is
// invoked recursively on the two subsets to form the children.
//----------------------------------------------------------------------
ANNkd_ptr rbd_tree( // recursive construction of bd-tree
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices to store in subtree
int n, // number of points
int dim, // dimension of space
int bsp, // bucket space
ANNorthRect &bnd_box, // bounding box for current node
ANNkd_splitter splitter, // splitting routine
ANNshrinkRule shrink) // shrinking rule
{
ANNdecomp decomp; // decomposition method
ANNorthRect inner_box(dim); // inner box (if shrinking)
if (n <= bsp) { // n small, make a leaf node
if (n == 0) // empty leaf node
return KD_TRIVIAL; // return (canonical) empty leaf
else // construct the node and return
return new ANNkd_leaf(n, pidx);
}
decomp = selectDecomp( // select decomposition method
pa, pidx, // points and indices
n, dim, // number of points and dimension
bnd_box, // current bounding box
splitter, shrink, // splitting/shrinking methods
inner_box); // inner box if shrinking (returned)
if (decomp == SPLIT) { // split selected
int cd; // cutting dimension
ANNcoord cv; // cutting value
int n_lo; // number on low side of cut
// invoke splitting procedure
(*splitter)(pa, pidx, bnd_box, n, dim, cd, cv, n_lo);
ANNcoord lv = bnd_box.lo[cd]; // save bounds for cutting dimension
ANNcoord hv = bnd_box.hi[cd];
bnd_box.hi[cd] = cv; // modify bounds for left subtree
ANNkd_ptr lo = rbd_tree( // build left subtree
pa, pidx, n_lo, // ...from pidx[0..n_lo-1]
dim, bsp, bnd_box, splitter, shrink);
bnd_box.hi[cd] = hv; // restore bounds
bnd_box.lo[cd] = cv; // modify bounds for right subtree
ANNkd_ptr hi = rbd_tree( // build right subtree
pa, pidx + n_lo, n-n_lo,// ...from pidx[n_lo..n-1]
dim, bsp, bnd_box, splitter, shrink);
bnd_box.lo[cd] = lv; // restore bounds
// create the splitting node
return new ANNkd_split(cd, cv, lv, hv, lo, hi);
}
else { // shrink selected
int n_in; // number of points in box
int n_bnds; // number of bounding sides
annBoxSplit( // split points around inner box
pa, // points to split
pidx, // point indices
n, // number of points
dim, // dimension
inner_box, // inner box
n_in); // number of points inside (returned)
ANNkd_ptr in = rbd_tree( // build inner subtree pidx[0..n_in-1]
pa, pidx, n_in, dim, bsp, inner_box, splitter, shrink);
ANNkd_ptr out = rbd_tree( // build outer subtree pidx[n_in..n]
pa, pidx+n_in, n - n_in, dim, bsp, bnd_box, splitter, shrink);
ANNorthHSArray bnds = NULL; // bounds (alloc in Box2Bnds and
// ...freed in bd_shrink destroyer)
annBox2Bnds( // convert inner box to bounds
inner_box, // inner box
bnd_box, // enclosing box
dim, // dimension
n_bnds, // number of bounds (returned)
bnds); // bounds array (modified)
// return shrinking node
return new ANNbd_shrink(n_bnds, bnds, in, out);
}
}

View File

@ -0,0 +1,100 @@
//----------------------------------------------------------------------
// File: bd_tree.h
// Programmer: David Mount
// Description: Declarations for standard bd-tree routines
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Changed IN, OUT to ANN_IN, ANN_OUT
//----------------------------------------------------------------------
#ifndef ANN_bd_tree_H
#define ANN_bd_tree_H
#include <ann/ANNx.h> // all ANN includes
#include "kd_tree.h" // kd-tree includes
//----------------------------------------------------------------------
// bd-tree shrinking node.
// The main addition in the bd-tree is the shrinking node, which
// is declared here.
//
// Shrinking nodes are defined by list of orthogonal halfspaces.
// These halfspaces define a (possibly unbounded) orthogonal
// rectangle. There are two children, in and out. Points that
// lie within this rectangle are stored in the in-child, and the
// other points are stored in the out-child.
//
// We use a list of orthogonal halfspaces rather than an
// orthogonal rectangle object because typically the number of
// sides of the shrinking box will be much smaller than the
// worst case bound of 2*dim.
//
// BEWARE: Note that constructor just copies the pointer to the
// bounding array, but the destructor deallocates it. This is
// rather poor practice, but happens to be convenient. The list
// is allocated in the bd-tree building procedure rbd_tree() just
// prior to construction, and is used for no other purposes.
//
// WARNING: In the near neighbor searching code it is assumed that
// the list of bounding halfspaces is irredundant, meaning that there
// are no two distinct halfspaces in the list with the same outward
// pointing normals.
//----------------------------------------------------------------------
class ANNbd_shrink : public ANNkd_node // splitting node of a kd-tree
{
int n_bnds; // number of bounding halfspaces
ANNorthHSArray bnds; // list of bounding halfspaces
ANNkd_ptr child[2]; // in and out children
public:
ANNbd_shrink( // constructor
int nb, // number of bounding halfspaces
ANNorthHSArray bds, // list of bounding halfspaces
ANNkd_ptr ic=NULL, ANNkd_ptr oc=NULL) // children
{
n_bnds = nb; // cutting dimension
bnds = bds; // assign bounds
child[ANN_IN] = ic; // set children
child[ANN_OUT] = oc;
}
~ANNbd_shrink() // destructor
{
if (child[ANN_IN]!= NULL && child[ANN_IN]!= KD_TRIVIAL)
delete child[ANN_IN];
if (child[ANN_OUT]!= NULL&& child[ANN_OUT]!= KD_TRIVIAL)
delete child[ANN_OUT];
if (bnds != NULL)
delete [] bnds; // delete bounds
}
virtual void getStats( // get tree statistics
int dim, // dimension of space
ANNkdStats &st, // statistics
ANNorthRect &bnd_box); // bounding box
virtual void print(int level, ostream &out);// print node
virtual void dump(ostream &out); // dump node
virtual void ann_search(ANNdist); // standard search
virtual void ann_pri_search(ANNdist); // priority search
virtual void ann_FR_search(ANNdist); // fixed-radius search
};
#endif

View File

@ -0,0 +1,109 @@
//----------------------------------------------------------------------
// File: brute.cpp
// Programmer: Sunil Arya and David Mount
// Description: Brute-force nearest neighbors
// Last modified: 05/03/05 (Version 1.1)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.1 05/03/05
// Added fixed-radius kNN search
//----------------------------------------------------------------------
#include <ann/ANNx.h> // all ANN includes
#include "pr_queue_k.h" // k element priority queue
//----------------------------------------------------------------------
// Brute-force search simply stores a pointer to the list of
// data points and searches linearly for the nearest neighbor.
// The k nearest neighbors are stored in a k-element priority
// queue (which is implemented in a pretty dumb way as well).
//
// If ANN_ALLOW_SELF_MATCH is ANNfalse then data points at distance
// zero are not considered.
//
// Note that the error bound eps is passed in, but it is ignored.
// These routines compute exact nearest neighbors (which is needed
// for validation purposes in ann_test.cpp).
//----------------------------------------------------------------------
ANNbruteForce::ANNbruteForce( // constructor from point array
ANNpointArray pa, // point array
int n, // number of points
int dd) // dimension
{
dim = dd; n_pts = n; pts = pa;
}
ANNbruteForce::~ANNbruteForce() { } // destructor (empty)
void ANNbruteForce::annkSearch( // approx k near neighbor search
ANNpoint q, // query point
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor indices (returned)
ANNdistArray dd, // dist to near neighbors (returned)
double /*eps*/) // error bound (ignored)
{
ANNmin_k mk(k); // construct a k-limited priority queue
int i;
if (k > n_pts) { // too many near neighbors?
annError("Requesting more near neighbors than data points", ANNabort);
}
// run every point through queue
for (i = 0; i < n_pts; i++) {
// compute distance to point
ANNdist sqDist = annDist(dim, pts[i], q);
if (ANN_ALLOW_SELF_MATCH || sqDist != 0)
mk.insert(sqDist, i);
}
for (i = 0; i < k; i++) { // extract the k closest points
dd[i] = mk.ith_smallest_key(i);
nn_idx[i] = mk.ith_smallest_info(i);
}
}
int ANNbruteForce::annkFRSearch( // approx fixed-radius kNN search
ANNpoint q, // query point
ANNdist sqRad, // squared radius
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor array (returned)
ANNdistArray dd, // dist to near neighbors (returned)
double /*eps*/) // error bound
{
ANNmin_k mk(k); // construct a k-limited priority queue
int i;
int pts_in_range = 0; // number of points in query range
// run every point through queue
for (i = 0; i < n_pts; i++) {
// compute distance to point
ANNdist sqDist = annDist(dim, pts[i], q);
if (sqDist <= sqRad && // within radius bound
(ANN_ALLOW_SELF_MATCH || sqDist != 0)) { // ...and no self match
mk.insert(sqDist, i);
pts_in_range++;
}
}
for (i = 0; i < k; i++) { // extract the k closest points
if (dd != NULL)
dd[i] = mk.ith_smallest_key(i);
if (nn_idx != NULL)
nn_idx[i] = mk.ith_smallest_info(i);
}
return pts_in_range;
}

View File

@ -0,0 +1,444 @@
//----------------------------------------------------------------------
// File: kd_dump.cc
// Programmer: David Mount
// Description: Dump and Load for kd- and bd-trees
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Moved dump out of kd_tree.cc into this file.
// Added kd-tree load constructor.
//----------------------------------------------------------------------
// This file contains routines for dumping kd-trees and bd-trees and
// reloading them. (It is an abuse of policy to include both kd- and
// bd-tree routines in the same file, sorry. There should be no problem
// in deleting the bd- versions of the routines if they are not
// desired.)
//----------------------------------------------------------------------
#include "kd_tree.h" // kd-tree declarations
#include "bd_tree.h" // bd-tree declarations
using namespace std; // make std:: available
//----------------------------------------------------------------------
// Constants
//----------------------------------------------------------------------
const int STRING_LEN = 500; // maximum string length
const double EPSILON = 1E-5; // small number for float comparison
enum ANNtreeType {KD_TREE, BD_TREE}; // tree types (used in loading)
//----------------------------------------------------------------------
// Procedure declarations
//----------------------------------------------------------------------
static ANNkd_ptr annReadDump( // read dump file
istream &in, // input stream
ANNtreeType tree_type, // type of tree expected
ANNpointArray &the_pts, // new points (if applic)
ANNidxArray &the_pidx, // point indices (returned)
int &the_dim, // dimension (returned)
int &the_n_pts, // number of points (returned)
int &the_bkt_size, // bucket size (returned)
ANNpoint &the_bnd_box_lo, // low bounding point
ANNpoint &the_bnd_box_hi); // high bounding point
static ANNkd_ptr annReadTree( // read tree-part of dump file
istream &in, // input stream
ANNtreeType tree_type, // type of tree expected
ANNidxArray the_pidx, // point indices (modified)
int &next_idx); // next index (modified)
//----------------------------------------------------------------------
// ANN kd- and bd-tree Dump Format
// The dump file begins with a header containing the version of
// ANN, an optional section containing the points, followed by
// a description of the tree. The tree is printed in preorder.
//
// Format:
// #ANN <version number> <comments> [END_OF_LINE]
// points <dim> <n_pts> (point coordinates: this is optional)
// 0 <xxx> <xxx> ... <xxx> (point indices and coordinates)
// 1 <xxx> <xxx> ... <xxx>
// ...
// tree <dim> <n_pts> <bkt_size>
// <xxx> <xxx> ... <xxx> (lower end of bounding box)
// <xxx> <xxx> ... <xxx> (upper end of bounding box)
// If the tree is null, then a single line "null" is
// output. Otherwise the nodes of the tree are printed
// one per line in preorder. Leaves and splitting nodes
// have the following formats:
// Leaf node:
// leaf <n_pts> <bkt[0]> <bkt[1]> ... <bkt[n-1]>
// Splitting nodes:
// split <cut_dim> <cut_val> <lo_bound> <hi_bound>
//
// For bd-trees:
//
// Shrinking nodes:
// shrink <n_bnds>
// <cut_dim> <cut_val> <side>
// <cut_dim> <cut_val> <side>
// ... (repeated n_bnds times)
//----------------------------------------------------------------------
void ANNkd_tree::Dump( // dump entire tree
ANNbool with_pts, // print points as well?
ostream &out) // output stream
{
out << "#ANN " << ANNversion << "\n";
out.precision(ANNcoordPrec); // use full precision in dumping
if (with_pts) { // print point coordinates
out << "points " << dim << " " << n_pts << "\n";
for (int i = 0; i < n_pts; i++) {
out << i << " ";
annPrintPt(pts[i], dim, out);
out << "\n";
}
}
out << "tree " // print tree elements
<< dim << " "
<< n_pts << " "
<< bkt_size << "\n";
annPrintPt(bnd_box_lo, dim, out); // print lower bound
out << "\n";
annPrintPt(bnd_box_hi, dim, out); // print upper bound
out << "\n";
if (root == NULL) // empty tree?
out << "null\n";
else {
root->dump(out); // invoke printing at root
}
out.precision(0); // restore default precision
}
void ANNkd_split::dump( // dump a splitting node
ostream &out) // output stream
{
out << "split " << cut_dim << " " << cut_val << " ";
out << cd_bnds[ANN_LO] << " " << cd_bnds[ANN_HI] << "\n";
child[ANN_LO]->dump(out); // print low child
child[ANN_HI]->dump(out); // print high child
}
void ANNkd_leaf::dump( // dump a leaf node
ostream &out) // output stream
{
if (this == KD_TRIVIAL) { // canonical trivial leaf node
out << "leaf 0\n"; // leaf no points
}
else{
out << "leaf " << n_pts;
for (int j = 0; j < n_pts; j++) {
out << " " << bkt[j];
}
out << "\n";
}
}
void ANNbd_shrink::dump( // dump a shrinking node
ostream &out) // output stream
{
out << "shrink " << n_bnds << "\n";
for (int j = 0; j < n_bnds; j++) {
out << bnds[j].cd << " " << bnds[j].cv << " " << bnds[j].sd << "\n";
}
child[ANN_IN]->dump(out); // print in-child
child[ANN_OUT]->dump(out); // print out-child
}
//----------------------------------------------------------------------
// Load kd-tree from dump file
// This rebuilds a kd-tree which was dumped to a file. The dump
// file contains all the basic tree information according to a
// preorder traversal. We assume that the dump file also contains
// point data. (This is to guarantee the consistency of the tree.)
// If not, then an error is generated.
//
// Indirectly, this procedure allocates space for points, point
// indices, all nodes in the tree, and the bounding box for the
// tree. When the tree is destroyed, all but the points are
// deallocated.
//
// This routine calls annReadDump to do all the work.
//----------------------------------------------------------------------
ANNkd_tree::ANNkd_tree( // build from dump file
istream &in) // input stream for dump file
{
int the_dim; // local dimension
int the_n_pts; // local number of points
int the_bkt_size; // local number of points
ANNpoint the_bnd_box_lo; // low bounding point
ANNpoint the_bnd_box_hi; // high bounding point
ANNpointArray the_pts; // point storage
ANNidxArray the_pidx; // point index storage
ANNkd_ptr the_root; // root of the tree
the_root = annReadDump( // read the dump file
in, // input stream
KD_TREE, // expecting a kd-tree
the_pts, // point array (returned)
the_pidx, // point indices (returned)
the_dim, the_n_pts, the_bkt_size, // basic tree info (returned)
the_bnd_box_lo, the_bnd_box_hi); // bounding box info (returned)
// create a skeletal tree
SkeletonTree(the_n_pts, the_dim, the_bkt_size, the_pts, the_pidx);
bnd_box_lo = the_bnd_box_lo;
bnd_box_hi = the_bnd_box_hi;
root = the_root; // set the root
}
ANNbd_tree::ANNbd_tree( // build bd-tree from dump file
istream &in) : ANNkd_tree() // input stream for dump file
{
int the_dim; // local dimension
int the_n_pts; // local number of points
int the_bkt_size; // local number of points
ANNpoint the_bnd_box_lo; // low bounding point
ANNpoint the_bnd_box_hi; // high bounding point
ANNpointArray the_pts; // point storage
ANNidxArray the_pidx; // point index storage
ANNkd_ptr the_root; // root of the tree
the_root = annReadDump( // read the dump file
in, // input stream
BD_TREE, // expecting a bd-tree
the_pts, // point array (returned)
the_pidx, // point indices (returned)
the_dim, the_n_pts, the_bkt_size, // basic tree info (returned)
the_bnd_box_lo, the_bnd_box_hi); // bounding box info (returned)
// create a skeletal tree
SkeletonTree(the_n_pts, the_dim, the_bkt_size, the_pts, the_pidx);
bnd_box_lo = the_bnd_box_lo;
bnd_box_hi = the_bnd_box_hi;
root = the_root; // set the root
}
//----------------------------------------------------------------------
// annReadDump - read a dump file
//
// This procedure reads a dump file, constructs a kd-tree
// and returns all the essential information needed to actually
// construct the tree. Because this procedure is used for
// constructing both kd-trees and bd-trees, the second argument
// is used to indicate which type of tree we are expecting.
//----------------------------------------------------------------------
static ANNkd_ptr annReadDump(
istream &in, // input stream
ANNtreeType tree_type, // type of tree expected
ANNpointArray &the_pts, // new points (returned)
ANNidxArray &the_pidx, // point indices (returned)
int &the_dim, // dimension (returned)
int &the_n_pts, // number of points (returned)
int &the_bkt_size, // bucket size (returned)
ANNpoint &the_bnd_box_lo, // low bounding point (ret'd)
ANNpoint &the_bnd_box_hi) // high bounding point (ret'd)
{
int j;
char str[STRING_LEN]; // storage for string
char version[STRING_LEN]; // ANN version number
ANNkd_ptr the_root = NULL;
//------------------------------------------------------------------
// Input file header
//------------------------------------------------------------------
in >> str; // input header
if (strcmp(str, "#ANN") != 0) { // incorrect header
annError("Incorrect header for dump file", ANNabort);
}
in.getline(version, STRING_LEN); // get version (ignore)
//------------------------------------------------------------------
// Input the points
// An array the_pts is allocated and points are read from
// the dump file.
//------------------------------------------------------------------
in >> str; // get major heading
if (strcmp(str, "points") == 0) { // points section
in >> the_dim; // input dimension
in >> the_n_pts; // number of points
// allocate point storage
the_pts = annAllocPts(the_n_pts, the_dim);
for (int i = 0; i < the_n_pts; i++) { // input point coordinates
ANNidx idx; // point index
in >> idx; // input point index
if (idx < 0 || idx >= the_n_pts) {
annError("Point index is out of range", ANNabort);
}
for (j = 0; j < the_dim; j++) {
in >> the_pts[idx][j]; // read point coordinates
}
}
in >> str; // get next major heading
}
else { // no points were input
annError("Points must be supplied in the dump file", ANNabort);
}
//------------------------------------------------------------------
// Input the tree
// After the basic header information, we invoke annReadTree
// to do all the heavy work. We create our own array of
// point indices (so we can pass them to annReadTree())
// but we do not deallocate them. They will be deallocated
// when the tree is destroyed.
//------------------------------------------------------------------
if (strcmp(str, "tree") == 0) { // tree section
in >> the_dim; // read dimension
in >> the_n_pts; // number of points
in >> the_bkt_size; // bucket size
the_bnd_box_lo = annAllocPt(the_dim); // allocate bounding box pts
the_bnd_box_hi = annAllocPt(the_dim);
for (j = 0; j < the_dim; j++) { // read bounding box low
in >> the_bnd_box_lo[j];
}
for (j = 0; j < the_dim; j++) { // read bounding box low
in >> the_bnd_box_hi[j];
}
the_pidx = new ANNidx[the_n_pts]; // allocate point index array
int next_idx = 0; // number of indices filled
// read the tree and indices
the_root = annReadTree(in, tree_type, the_pidx, next_idx);
if (next_idx != the_n_pts) { // didn't see all the points?
annError("Didn't see as many points as expected", ANNwarn);
}
}
else {
annError("Illegal dump format. Expecting section heading", ANNabort);
}
return the_root;
}
//----------------------------------------------------------------------
// annReadTree - input tree and return pointer
//
// annReadTree reads in a node of the tree, makes any recursive
// calls as needed to input the children of this node (if internal).
// It returns a pointer to the node that was created. An array
// of point indices is given along with a pointer to the next
// available location in the array. As leaves are read, their
// point indices are stored here, and the point buckets point
// to the first entry in the array.
//
// Recall that these are the formats. The tree is given in
// preorder.
//
// Leaf node:
// leaf <n_pts> <bkt[0]> <bkt[1]> ... <bkt[n-1]>
// Splitting nodes:
// split <cut_dim> <cut_val> <lo_bound> <hi_bound>
//
// For bd-trees:
//
// Shrinking nodes:
// shrink <n_bnds>
// <cut_dim> <cut_val> <side>
// <cut_dim> <cut_val> <side>
// ... (repeated n_bnds times)
//----------------------------------------------------------------------
static ANNkd_ptr annReadTree(
istream &in, // input stream
ANNtreeType tree_type, // type of tree expected
ANNidxArray the_pidx, // point indices (modified)
int &next_idx) // next index (modified)
{
char tag[STRING_LEN]; // tag (leaf, split, shrink)
int n_pts; // number of points in leaf
int cd; // cut dimension
ANNcoord cv; // cut value
ANNcoord lb; // low bound
ANNcoord hb; // high bound
int n_bnds; // number of bounding sides
int sd; // which side
in >> tag; // input node tag
if (strcmp(tag, "null") == 0) { // null tree
return NULL;
}
//------------------------------------------------------------------
// Read a leaf
//------------------------------------------------------------------
if (strcmp(tag, "leaf") == 0) { // leaf node
in >> n_pts; // input number of points
int old_idx = next_idx; // save next_idx
if (n_pts == 0) { // trivial leaf
return KD_TRIVIAL;
}
else {
for (int i = 0; i < n_pts; i++) { // input point indices
in >> the_pidx[next_idx++]; // store in array of indices
}
}
return new ANNkd_leaf(n_pts, &the_pidx[old_idx]);
}
//------------------------------------------------------------------
// Read a splitting node
//------------------------------------------------------------------
else if (strcmp(tag, "split") == 0) { // splitting node
in >> cd >> cv >> lb >> hb;
// read low and high subtrees
ANNkd_ptr lc = annReadTree(in, tree_type, the_pidx, next_idx);
ANNkd_ptr hc = annReadTree(in, tree_type, the_pidx, next_idx);
// create new node and return
return new ANNkd_split(cd, cv, lb, hb, lc, hc);
}
//------------------------------------------------------------------
// Read a shrinking node (bd-tree only)
//------------------------------------------------------------------
else if (strcmp(tag, "shrink") == 0) { // shrinking node
if (tree_type != BD_TREE) {
annError("Shrinking node not allowed in kd-tree", ANNabort);
}
in >> n_bnds; // number of bounding sides
// allocate bounds array
ANNorthHSArray bds = new ANNorthHalfSpace[n_bnds];
for (int i = 0; i < n_bnds; i++) {
in >> cd >> cv >> sd; // input bounding halfspace
// copy to array
bds[i] = ANNorthHalfSpace(cd, cv, sd);
}
// read inner and outer subtrees
ANNkd_ptr ic = annReadTree(in, tree_type, the_pidx, next_idx);
ANNkd_ptr oc = annReadTree(in, tree_type, the_pidx, next_idx);
// create new node and return
return new ANNbd_shrink(n_bnds, bds, ic, oc);
}
else {
annError("Illegal node type in dump file", ANNabort);
exit(0); // to keep the compiler happy
}
}

View File

@ -0,0 +1,183 @@
//----------------------------------------------------------------------
// File: kd_fix_rad_search.cpp
// Programmer: Sunil Arya and David Mount
// Description: Standard kd-tree fixed-radius kNN search
// Last modified: 05/03/05 (Version 1.1)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 1.1 05/03/05
// Initial release
//----------------------------------------------------------------------
#include "kd_fix_rad_search.h" // kd fixed-radius search decls
//----------------------------------------------------------------------
// Approximate fixed-radius k nearest neighbor search
// The squared radius is provided, and this procedure finds the
// k nearest neighbors within the radius, and returns the total
// number of points lying within the radius.
//
// The method used for searching the kd-tree is a variation of the
// nearest neighbor search used in kd_search.cpp, except that the
// radius of the search ball is known. We refer the reader to that
// file for the explanation of the recursive search procedure.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// To keep argument lists short, a number of global variables
// are maintained which are common to all the recursive calls.
// These are given below.
//----------------------------------------------------------------------
int ANNkdFRDim; // dimension of space
ANNpoint ANNkdFRQ; // query point
ANNdist ANNkdFRSqRad; // squared radius search bound
double ANNkdFRMaxErr; // max tolerable squared error
ANNpointArray ANNkdFRPts; // the points
ANNmin_k* ANNkdFRPointMK; // set of k closest points
int ANNkdFRPtsVisited; // total points visited
int ANNkdFRPtsInRange; // number of points in the range
//----------------------------------------------------------------------
// annkFRSearch - fixed radius search for k nearest neighbors
//----------------------------------------------------------------------
int ANNkd_tree::annkFRSearch(
ANNpoint q, // the query point
ANNdist sqRad, // squared radius search bound
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor indices (returned)
ANNdistArray dd, // the approximate nearest neighbor
double eps) // the error bound
{
ANNkdFRDim = dim; // copy arguments to static equivs
ANNkdFRQ = q;
ANNkdFRSqRad = sqRad;
ANNkdFRPts = pts;
ANNkdFRPtsVisited = 0; // initialize count of points visited
ANNkdFRPtsInRange = 0; // ...and points in the range
ANNkdFRMaxErr = ANN_POW(1.0 + eps);
ANN_FLOP(2) // increment floating op count
ANNkdFRPointMK = new ANNmin_k(k); // create set for closest k points
// search starting at the root
root->ann_FR_search(annBoxDistance(q, bnd_box_lo, bnd_box_hi, dim));
for (int i = 0; i < k; i++) { // extract the k-th closest points
if (dd != NULL)
dd[i] = ANNkdFRPointMK->ith_smallest_key(i);
if (nn_idx != NULL)
nn_idx[i] = ANNkdFRPointMK->ith_smallest_info(i);
}
delete ANNkdFRPointMK; // deallocate closest point set
return ANNkdFRPtsInRange; // return final point count
}
//----------------------------------------------------------------------
// kd_split::ann_FR_search - search a splitting node
// Note: This routine is similar in structure to the standard kNN
// search. It visits the subtree that is closer to the query point
// first. For fixed-radius search, there is no benefit in visiting
// one subtree before the other, but we maintain the same basic
// code structure for the sake of uniformity.
//----------------------------------------------------------------------
void ANNkd_split::ann_FR_search(ANNdist box_dist)
{
// check dist calc term condition
if (ANNmaxPtsVisited != 0 && ANNkdFRPtsVisited > ANNmaxPtsVisited) return;
// distance to cutting plane
ANNcoord cut_diff = ANNkdFRQ[cut_dim] - cut_val;
if (cut_diff < 0) { // left of cutting plane
child[ANN_LO]->ann_FR_search(box_dist);// visit closer child first
ANNcoord box_diff = cd_bnds[ANN_LO] - ANNkdFRQ[cut_dim];
if (box_diff < 0) // within bounds - ignore
box_diff = 0;
// distance to further box
box_dist = (ANNdist) ANN_SUM(box_dist,
ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
// visit further child if in range
if (box_dist * ANNkdFRMaxErr <= ANNkdFRSqRad)
child[ANN_HI]->ann_FR_search(box_dist);
}
else { // right of cutting plane
child[ANN_HI]->ann_FR_search(box_dist);// visit closer child first
ANNcoord box_diff = ANNkdFRQ[cut_dim] - cd_bnds[ANN_HI];
if (box_diff < 0) // within bounds - ignore
box_diff = 0;
// distance to further box
box_dist = (ANNdist) ANN_SUM(box_dist,
ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
// visit further child if close enough
if (box_dist * ANNkdFRMaxErr <= ANNkdFRSqRad)
child[ANN_LO]->ann_FR_search(box_dist);
}
ANN_FLOP(13) // increment floating ops
ANN_SPL(1) // one more splitting node visited
}
//----------------------------------------------------------------------
// kd_leaf::ann_FR_search - search points in a leaf node
// Note: The unreadability of this code is the result of
// some fine tuning to replace indexing by pointer operations.
//----------------------------------------------------------------------
void ANNkd_leaf::ann_FR_search(ANNdist /*box_dist*/)
{
register ANNdist dist; // distance to data point
register ANNcoord* pp; // data coordinate pointer
register ANNcoord* qq; // query coordinate pointer
register ANNcoord t;
register int d;
for (int i = 0; i < n_pts; i++) { // check points in bucket
pp = ANNkdFRPts[bkt[i]]; // first coord of next data point
qq = ANNkdFRQ; // first coord of query point
dist = 0;
for(d = 0; d < ANNkdFRDim; d++) {
ANN_COORD(1) // one more coordinate hit
ANN_FLOP(5) // increment floating ops
t = *(qq++) - *(pp++); // compute length and adv coordinate
// exceeds dist to k-th smallest?
if( (dist = ANN_SUM(dist, ANN_POW(t))) > ANNkdFRSqRad) {
break;
}
}
if (d >= ANNkdFRDim && // among the k best?
(ANN_ALLOW_SELF_MATCH || dist!=0)) { // and no self-match problem
// add it to the list
ANNkdFRPointMK->insert(dist, bkt[i]);
ANNkdFRPtsInRange++; // increment point count
}
}
ANN_LEAF(1) // one more leaf node visited
ANN_PTS(n_pts) // increment points visited
ANNkdFRPtsVisited += n_pts; // increment number of points visited
}

View File

@ -0,0 +1,44 @@
//----------------------------------------------------------------------
// File: kd_fix_rad_search.h
// Programmer: Sunil Arya and David Mount
// Description: Standard kd-tree fixed-radius kNN search
// Last modified: 05/03/05 (Version 1.1)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 1.1 05/03/05
// Initial release
//----------------------------------------------------------------------
#ifndef ANN_kd_fix_rad_search_H
#define ANN_kd_fix_rad_search_H
#include "kd_tree.h" // kd-tree declarations
#include "kd_util.h" // kd-tree utilities
#include "pr_queue_k.h" // k-element priority queue
#include <ann/ANNperf.h> // performance evaluation
//----------------------------------------------------------------------
// Global variables
// These are active for the life of each call to
// annRangeSearch(). They are set to save the number of
// variables that need to be passed among the various search
// procedures.
//----------------------------------------------------------------------
extern ANNpoint ANNkdFRQ; // query point (static copy)
#endif

View File

@ -0,0 +1,221 @@
//----------------------------------------------------------------------
// File: kd_pr_search.cpp
// Programmer: Sunil Arya and David Mount
// Description: Priority search for kd-trees
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#include "kd_pr_search.h" // kd priority search declarations
//----------------------------------------------------------------------
// Approximate nearest neighbor searching by priority search.
// The kd-tree is searched for an approximate nearest neighbor.
// The point is returned through one of the arguments, and the
// distance returned is the SQUARED distance to this point.
//
// The method used for searching the kd-tree is called priority
// search. (It is described in Arya and Mount, ``Algorithms for
// fast vector quantization,'' Proc. of DCC '93: Data Compression
// Conference}, eds. J. A. Storer and M. Cohn, IEEE Press, 1993,
// 381--390.)
//
// The cell of the kd-tree containing the query point is located,
// and cells are visited in increasing order of distance from the
// query point. This is done by placing each subtree which has
// NOT been visited in a priority queue, according to the closest
// distance of the corresponding enclosing rectangle from the
// query point. The search stops when the distance to the nearest
// remaining rectangle exceeds the distance to the nearest point
// seen by a factor of more than 1/(1+eps). (Implying that any
// point found subsequently in the search cannot be closer by more
// than this factor.)
//
// The main entry point is annkPriSearch() which sets things up and
// then call the recursive routine ann_pri_search(). This is a
// recursive routine which performs the processing for one node in
// the kd-tree. There are two versions of this virtual procedure,
// one for splitting nodes and one for leaves. When a splitting node
// is visited, we determine which child to continue the search on
// (the closer one), and insert the other child into the priority
// queue. When a leaf is visited, we compute the distances to the
// points in the buckets, and update information on the closest
// points.
//
// Some trickery is used to incrementally update the distance from
// a kd-tree rectangle to the query point. This comes about from
// the fact that which each successive split, only one component
// (along the dimension that is split) of the squared distance to
// the child rectangle is different from the squared distance to
// the parent rectangle.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// To keep argument lists short, a number of global variables
// are maintained which are common to all the recursive calls.
// These are given below.
//----------------------------------------------------------------------
double ANNprEps; // the error bound
int ANNprDim; // dimension of space
ANNpoint ANNprQ; // query point
double ANNprMaxErr; // max tolerable squared error
ANNpointArray ANNprPts; // the points
ANNpr_queue *ANNprBoxPQ; // priority queue for boxes
ANNmin_k *ANNprPointMK; // set of k closest points
//----------------------------------------------------------------------
// annkPriSearch - priority search for k nearest neighbors
//----------------------------------------------------------------------
void ANNkd_tree::annkPriSearch(
ANNpoint q, // query point
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor indices (returned)
ANNdistArray dd, // dist to near neighbors (returned)
double eps) // error bound (ignored)
{
// max tolerable squared error
ANNprMaxErr = ANN_POW(1.0 + eps);
ANN_FLOP(2) // increment floating ops
ANNprDim = dim; // copy arguments to static equivs
ANNprQ = q;
ANNprPts = pts;
ANNptsVisited = 0; // initialize count of points visited
ANNprPointMK = new ANNmin_k(k); // create set for closest k points
// distance to root box
ANNdist box_dist = annBoxDistance(q,
bnd_box_lo, bnd_box_hi, dim);
ANNprBoxPQ = new ANNpr_queue(n_pts);// create priority queue for boxes
ANNprBoxPQ->insert(box_dist, root); // insert root in priority queue
while (ANNprBoxPQ->non_empty() &&
(!(ANNmaxPtsVisited != 0 && ANNptsVisited > ANNmaxPtsVisited))) {
ANNkd_ptr np; // next box from prior queue
PQinfo pq;
// extract closest box from queue
ANNprBoxPQ->extr_min(box_dist, pq);
np = reinterpret_cast<ANNkd_node*>(pq);
ANN_FLOP(2) // increment floating ops
if (box_dist*ANNprMaxErr >= ANNprPointMK->max_key())
break;
np->ann_pri_search(box_dist); // search this subtree.
}
for (int i = 0; i < k; i++) { // extract the k-th closest points
dd[i] = ANNprPointMK->ith_smallest_key(i);
nn_idx[i] = ANNprPointMK->ith_smallest_info(i);
}
delete ANNprPointMK; // deallocate closest point set
delete ANNprBoxPQ; // deallocate priority queue
}
//----------------------------------------------------------------------
// kd_split::ann_pri_search - search a splitting node
//----------------------------------------------------------------------
void ANNkd_split::ann_pri_search(ANNdist box_dist)
{
ANNdist new_dist; // distance to child visited later
// distance to cutting plane
ANNcoord cut_diff = ANNprQ[cut_dim] - cut_val;
if (cut_diff < 0) { // left of cutting plane
ANNcoord box_diff = cd_bnds[ANN_LO] - ANNprQ[cut_dim];
if (box_diff < 0) // within bounds - ignore
box_diff = 0;
// distance to further box
new_dist = (ANNdist) ANN_SUM(box_dist,
ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
if (child[ANN_HI] != KD_TRIVIAL)// enqueue if not trivial
ANNprBoxPQ->insert(new_dist, child[ANN_HI]);
// continue with closer child
child[ANN_LO]->ann_pri_search(box_dist);
}
else { // right of cutting plane
ANNcoord box_diff = ANNprQ[cut_dim] - cd_bnds[ANN_HI];
if (box_diff < 0) // within bounds - ignore
box_diff = 0;
// distance to further box
new_dist = (ANNdist) ANN_SUM(box_dist,
ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
if (child[ANN_LO] != KD_TRIVIAL)// enqueue if not trivial
ANNprBoxPQ->insert(new_dist, child[ANN_LO]);
// continue with closer child
child[ANN_HI]->ann_pri_search(box_dist);
}
ANN_SPL(1) // one more splitting node visited
ANN_FLOP(8) // increment floating ops
}
//----------------------------------------------------------------------
// kd_leaf::ann_pri_search - search points in a leaf node
//
// This is virtually identical to the ann_search for standard search.
//----------------------------------------------------------------------
void ANNkd_leaf::ann_pri_search(ANNdist /*box_dist*/)
{
register ANNdist dist; // distance to data point
register ANNcoord* pp; // data coordinate pointer
register ANNcoord* qq; // query coordinate pointer
register ANNdist min_dist; // distance to k-th closest point
register ANNcoord t;
register int d;
min_dist = ANNprPointMK->max_key(); // k-th smallest distance so far
for (int i = 0; i < n_pts; i++) { // check points in bucket
pp = ANNprPts[bkt[i]]; // first coord of next data point
qq = ANNprQ; // first coord of query point
dist = 0;
for(d = 0; d < ANNprDim; d++) {
ANN_COORD(1) // one more coordinate hit
ANN_FLOP(4) // increment floating ops
t = *(qq++) - *(pp++); // compute length and adv coordinate
// exceeds dist to k-th smallest?
if( (dist = ANN_SUM(dist, ANN_POW(t))) > min_dist) {
break;
}
}
if (d >= ANNprDim && // among the k best?
(ANN_ALLOW_SELF_MATCH || dist!=0)) { // and no self-match problem
// add it to the list
ANNprPointMK->insert(dist, bkt[i]);
min_dist = ANNprPointMK->max_key();
}
}
ANN_LEAF(1) // one more leaf node visited
ANN_PTS(n_pts) // increment points visited
ANNptsVisited += n_pts; // increment number of points visited
}

View File

@ -0,0 +1,49 @@
//----------------------------------------------------------------------
// File: kd_pr_search.h
// Programmer: Sunil Arya and David Mount
// Description: Priority kd-tree search
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#ifndef ANN_kd_pr_search_H
#define ANN_kd_pr_search_H
#include "kd_tree.h" // kd-tree declarations
#include "kd_util.h" // kd-tree utilities
#include "pr_queue.h" // priority queue declarations
#include "pr_queue_k.h" // k-element priority queue
#include <ann/ANNperf.h> // performance evaluation
//----------------------------------------------------------------------
// Global variables
// Active for the life of each call to Appx_Near_Neigh() or
// Appx_k_Near_Neigh().
//----------------------------------------------------------------------
extern double ANNprEps; // the error bound
extern int ANNprDim; // dimension of space
extern ANNpoint ANNprQ; // query point
extern double ANNprMaxErr; // max tolerable squared error
extern ANNpointArray ANNprPts; // the points
extern ANNpr_queue *ANNprBoxPQ; // priority queue for boxes
extern ANNmin_k *ANNprPointMK; // set of k closest points
#endif

View File

@ -0,0 +1,210 @@
//----------------------------------------------------------------------
// File: kd_search.cpp
// Programmer: Sunil Arya and David Mount
// Description: Standard kd-tree search
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Changed names LO, HI to ANN_LO, ANN_HI
//----------------------------------------------------------------------
#include "kd_search.h" // kd-search declarations
//----------------------------------------------------------------------
// Approximate nearest neighbor searching by kd-tree search
// The kd-tree is searched for an approximate nearest neighbor.
// The point is returned through one of the arguments, and the
// distance returned is the squared distance to this point.
//
// The method used for searching the kd-tree is an approximate
// adaptation of the search algorithm described by Friedman,
// Bentley, and Finkel, ``An algorithm for finding best matches
// in logarithmic expected time,'' ACM Transactions on Mathematical
// Software, 3(3):209-226, 1977).
//
// The algorithm operates recursively. When first encountering a
// node of the kd-tree we first visit the child which is closest to
// the query point. On return, we decide whether we want to visit
// the other child. If the box containing the other child exceeds
// 1/(1+eps) times the current best distance, then we skip it (since
// any point found in this child cannot be closer to the query point
// by more than this factor.) Otherwise, we visit it recursively.
// The distance between a box and the query point is computed exactly
// (not approximated as is often done in kd-tree), using incremental
// distance updates, as described by Arya and Mount in ``Algorithms
// for fast vector quantization,'' Proc. of DCC '93: Data Compression
// Conference, eds. J. A. Storer and M. Cohn, IEEE Press, 1993,
// 381-390.
//
// The main entry points is annkSearch() which sets things up and
// then call the recursive routine ann_search(). This is a recursive
// routine which performs the processing for one node in the kd-tree.
// There are two versions of this virtual procedure, one for splitting
// nodes and one for leaves. When a splitting node is visited, we
// determine which child to visit first (the closer one), and visit
// the other child on return. When a leaf is visited, we compute
// the distances to the points in the buckets, and update information
// on the closest points.
//
// Some trickery is used to incrementally update the distance from
// a kd-tree rectangle to the query point. This comes about from
// the fact that which each successive split, only one component
// (along the dimension that is split) of the squared distance to
// the child rectangle is different from the squared distance to
// the parent rectangle.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// To keep argument lists short, a number of global variables
// are maintained which are common to all the recursive calls.
// These are given below.
//----------------------------------------------------------------------
int ANNkdDim; // dimension of space
ANNpoint ANNkdQ; // query point
double ANNkdMaxErr; // max tolerable squared error
ANNpointArray ANNkdPts; // the points
ANNmin_k *ANNkdPointMK; // set of k closest points
//----------------------------------------------------------------------
// annkSearch - search for the k nearest neighbors
//----------------------------------------------------------------------
void ANNkd_tree::annkSearch(
ANNpoint q, // the query point
int k, // number of near neighbors to return
ANNidxArray nn_idx, // nearest neighbor indices (returned)
ANNdistArray dd, // the approximate nearest neighbor
double eps) // the error bound
{
ANNkdDim = dim; // copy arguments to static equivs
ANNkdQ = q;
ANNkdPts = pts;
ANNptsVisited = 0; // initialize count of points visited
if (k > n_pts) { // too many near neighbors?
annError("Requesting more near neighbors than data points", ANNabort);
}
ANNkdMaxErr = ANN_POW(1.0 + eps);
ANN_FLOP(2) // increment floating op count
ANNkdPointMK = new ANNmin_k(k); // create set for closest k points
// search starting at the root
root->ann_search(annBoxDistance(q, bnd_box_lo, bnd_box_hi, dim));
for (int i = 0; i < k; i++) { // extract the k-th closest points
dd[i] = ANNkdPointMK->ith_smallest_key(i);
nn_idx[i] = ANNkdPointMK->ith_smallest_info(i);
}
delete ANNkdPointMK; // deallocate closest point set
}
//----------------------------------------------------------------------
// kd_split::ann_search - search a splitting node
//----------------------------------------------------------------------
void ANNkd_split::ann_search(ANNdist box_dist)
{
// check dist calc term condition
if (ANNmaxPtsVisited != 0 && ANNptsVisited > ANNmaxPtsVisited) return;
// distance to cutting plane
ANNcoord cut_diff = ANNkdQ[cut_dim] - cut_val;
if (cut_diff < 0) { // left of cutting plane
child[ANN_LO]->ann_search(box_dist);// visit closer child first
ANNcoord box_diff = cd_bnds[ANN_LO] - ANNkdQ[cut_dim];
if (box_diff < 0) // within bounds - ignore
box_diff = 0;
// distance to further box
box_dist = (ANNdist) ANN_SUM(box_dist,
ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
// visit further child if close enough
if (box_dist * ANNkdMaxErr < ANNkdPointMK->max_key())
child[ANN_HI]->ann_search(box_dist);
}
else { // right of cutting plane
child[ANN_HI]->ann_search(box_dist);// visit closer child first
ANNcoord box_diff = ANNkdQ[cut_dim] - cd_bnds[ANN_HI];
if (box_diff < 0) // within bounds - ignore
box_diff = 0;
// distance to further box
box_dist = (ANNdist) ANN_SUM(box_dist,
ANN_DIFF(ANN_POW(box_diff), ANN_POW(cut_diff)));
// visit further child if close enough
if (box_dist * ANNkdMaxErr < ANNkdPointMK->max_key())
child[ANN_LO]->ann_search(box_dist);
}
ANN_FLOP(10) // increment floating ops
ANN_SPL(1) // one more splitting node visited
}
//----------------------------------------------------------------------
// kd_leaf::ann_search - search points in a leaf node
// Note: The unreadability of this code is the result of
// some fine tuning to replace indexing by pointer operations.
//----------------------------------------------------------------------
void ANNkd_leaf::ann_search(ANNdist /*box_dist*/)
{
register ANNdist dist; // distance to data point
register ANNcoord* pp; // data coordinate pointer
register ANNcoord* qq; // query coordinate pointer
register ANNdist min_dist; // distance to k-th closest point
register ANNcoord t;
register int d;
min_dist = ANNkdPointMK->max_key(); // k-th smallest distance so far
for (int i = 0; i < n_pts; i++) { // check points in bucket
pp = ANNkdPts[bkt[i]]; // first coord of next data point
qq = ANNkdQ; // first coord of query point
dist = 0;
for(d = 0; d < ANNkdDim; d++) {
ANN_COORD(1) // one more coordinate hit
ANN_FLOP(4) // increment floating ops
t = *(qq++) - *(pp++); // compute length and adv coordinate
// exceeds dist to k-th smallest?
if( (dist = ANN_SUM(dist, ANN_POW(t))) > min_dist) {
break;
}
}
if (d >= ANNkdDim && // among the k best?
(ANN_ALLOW_SELF_MATCH || dist!=0)) { // and no self-match problem
// add it to the list
ANNkdPointMK->insert(dist, bkt[i]);
min_dist = ANNkdPointMK->max_key();
}
}
ANN_LEAF(1) // one more leaf node visited
ANN_PTS(n_pts) // increment points visited
ANNptsVisited += n_pts; // increment number of points visited
}

View File

@ -0,0 +1,48 @@
//----------------------------------------------------------------------
// File: kd_search.h
// Programmer: Sunil Arya and David Mount
// Description: Standard kd-tree search
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#ifndef ANN_kd_search_H
#define ANN_kd_search_H
#include "kd_tree.h" // kd-tree declarations
#include "kd_util.h" // kd-tree utilities
#include "pr_queue_k.h" // k-element priority queue
#include <ann/ANNperf.h> // performance evaluation
//----------------------------------------------------------------------
// More global variables
// These are active for the life of each call to annkSearch(). They
// are set to save the number of variables that need to be passed
// among the various search procedures.
//----------------------------------------------------------------------
extern int ANNkdDim; // dimension of space (static copy)
extern ANNpoint ANNkdQ; // query point (static copy)
extern double ANNkdMaxErr; // max tolerable squared error
extern ANNpointArray ANNkdPts; // the points (static copy)
extern ANNmin_k *ANNkdPointMK; // set of k closest points
extern int ANNptsVisited; // number of points visited
#endif

View File

@ -0,0 +1,428 @@
//----------------------------------------------------------------------
// File: kd_split.cpp
// Programmer: Sunil Arya and David Mount
// Description: Methods for splitting kd-trees
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
//----------------------------------------------------------------------
#include "kd_tree.h" // kd-tree definitions
#include "kd_util.h" // kd-tree utilities
#include "kd_split.h" // splitting functions
//----------------------------------------------------------------------
// Constants
//----------------------------------------------------------------------
const double ERR = 0.001; // a small value
const double FS_ASPECT_RATIO = 3.0; // maximum allowed aspect ratio
// in fair split. Must be >= 2.
//----------------------------------------------------------------------
// kd_split - Bentley's standard splitting routine for kd-trees
// Find the dimension of the greatest spread, and split
// just before the median point along this dimension.
//----------------------------------------------------------------------
void kd_split(
ANNpointArray pa, // point array (permuted on return)
ANNidxArray pidx, // point indices
const ANNorthRect &/*bnds*/, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo) // num of points on low side (returned)
{
// find dimension of maximum spread
cut_dim = annMaxSpread(pa, pidx, n, dim);
n_lo = n/2; // median rank
// split about median
annMedianSplit(pa, pidx, n, cut_dim, cut_val, n_lo);
}
//----------------------------------------------------------------------
// midpt_split - midpoint splitting rule for box-decomposition trees
//
// This is the simplest splitting rule that guarantees boxes
// of bounded aspect ratio. It simply cuts the box with the
// longest side through its midpoint. If there are ties, it
// selects the dimension with the maximum point spread.
//
// WARNING: This routine (while simple) doesn't seem to work
// well in practice in high dimensions, because it tends to
// generate a large number of trivial and/or unbalanced splits.
// Either kd_split(), sl_midpt_split(), or fair_split() are
// recommended, instead.
//----------------------------------------------------------------------
void midpt_split(
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo) // num of points on low side (returned)
{
int d;
ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
for (d = 1; d < dim; d++) { // find length of longest box side
ANNcoord length = bnds.hi[d] - bnds.lo[d];
if (length > max_length) {
max_length = length;
}
}
ANNcoord max_spread = -1; // find long side with most spread
for (d = 0; d < dim; d++) {
// is it among longest?
if (double(bnds.hi[d] - bnds.lo[d]) >= (1-ERR)*max_length) {
// compute its spread
ANNcoord spr = annSpread(pa, pidx, n, d);
if (spr > max_spread) { // is it max so far?
max_spread = spr;
cut_dim = d;
}
}
}
// split along cut_dim at midpoint
cut_val = (bnds.lo[cut_dim] + bnds.hi[cut_dim]) / 2;
// permute points accordingly
int br1, br2;
annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
//------------------------------------------------------------------
// On return: pa[0..br1-1] < cut_val
// pa[br1..br2-1] == cut_val
// pa[br2..n-1] > cut_val
//
// We can set n_lo to any value in the range [br1..br2].
// We choose split so that points are most evenly divided.
//------------------------------------------------------------------
if (br1 > n/2) n_lo = br1;
else if (br2 < n/2) n_lo = br2;
else n_lo = n/2;
}
//----------------------------------------------------------------------
// sl_midpt_split - sliding midpoint splitting rule
//
// This is a modification of midpt_split, which has the nonsensical
// name "sliding midpoint". The idea is that we try to use the
// midpoint rule, by bisecting the longest side. If there are
// ties, the dimension with the maximum spread is selected. If,
// however, the midpoint split produces a trivial split (no points
// on one side of the splitting plane) then we slide the splitting
// (maintaining its orientation) until it produces a nontrivial
// split. For example, if the splitting plane is along the x-axis,
// and all the data points have x-coordinate less than the x-bisector,
// then the split is taken along the maximum x-coordinate of the
// data points.
//
// Intuitively, this rule cannot generate trivial splits, and
// hence avoids midpt_split's tendency to produce trees with
// a very large number of nodes.
//
//----------------------------------------------------------------------
void sl_midpt_split(
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo) // num of points on low side (returned)
{
int d;
ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
for (d = 1; d < dim; d++) { // find length of longest box side
ANNcoord length = bnds.hi[d] - bnds.lo[d];
if (length > max_length) {
max_length = length;
}
}
ANNcoord max_spread = -1; // find long side with most spread
for (d = 0; d < dim; d++) {
// is it among longest?
if ((bnds.hi[d] - bnds.lo[d]) >= (1-ERR)*max_length) {
// compute its spread
ANNcoord spr = annSpread(pa, pidx, n, d);
if (spr > max_spread) { // is it max so far?
max_spread = spr;
cut_dim = d;
}
}
}
// ideal split at midpoint
ANNcoord ideal_cut_val = (bnds.lo[cut_dim] + bnds.hi[cut_dim])/2;
ANNcoord min, max;
annMinMax(pa, pidx, n, cut_dim, min, max); // find min/max coordinates
if (ideal_cut_val < min) // slide to min or max as needed
cut_val = min;
else if (ideal_cut_val > max)
cut_val = max;
else
cut_val = ideal_cut_val;
// permute points accordingly
int br1, br2;
annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
//------------------------------------------------------------------
// On return: pa[0..br1-1] < cut_val
// pa[br1..br2-1] == cut_val
// pa[br2..n-1] > cut_val
//
// We can set n_lo to any value in the range [br1..br2] to satisfy
// the exit conditions of the procedure.
//
// if ideal_cut_val < min (implying br2 >= 1),
// then we select n_lo = 1 (so there is one point on left) and
// if ideal_cut_val > max (implying br1 <= n-1),
// then we select n_lo = n-1 (so there is one point on right).
// Otherwise, we select n_lo as close to n/2 as possible within
// [br1..br2].
//------------------------------------------------------------------
if (ideal_cut_val < min) n_lo = 1;
else if (ideal_cut_val > max) n_lo = n-1;
else if (br1 > n/2) n_lo = br1;
else if (br2 < n/2) n_lo = br2;
else n_lo = n/2;
}
//----------------------------------------------------------------------
// fair_split - fair-split splitting rule
//
// This is a compromise between the kd-tree splitting rule (which
// always splits data points at their median) and the midpoint
// splitting rule (which always splits a box through its center.
// The goal of this procedure is to achieve both nicely balanced
// splits, and boxes of bounded aspect ratio.
//
// A constant FS_ASPECT_RATIO is defined. Given a box, those sides
// which can be split so that the ratio of the longest to shortest
// side does not exceed ASPECT_RATIO are identified. Among these
// sides, we select the one in which the points have the largest
// spread. We then split the points in a manner which most evenly
// distributes the points on either side of the splitting plane,
// subject to maintaining the bound on the ratio of long to short
// sides. To determine that the aspect ratio will be preserved,
// we determine the longest side (other than this side), and
// determine how narrowly we can cut this side, without causing the
// aspect ratio bound to be exceeded (small_piece).
//
// This procedure is more robust than either kd_split or midpt_split,
// but is more complicated as well. When point distribution is
// extremely skewed, this degenerates to midpt_split (actually
// 1/3 point split), and when the points are most evenly distributed,
// this degenerates to kd-split.
//----------------------------------------------------------------------
void fair_split(
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo) // num of points on low side (returned)
{
int d;
ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
cut_dim = 0;
for (d = 1; d < dim; d++) { // find length of longest box side
ANNcoord length = bnds.hi[d] - bnds.lo[d];
if (length > max_length) {
max_length = length;
cut_dim = d;
}
}
ANNcoord max_spread = 0; // find legal cut with max spread
cut_dim = 0;
for (d = 0; d < dim; d++) {
ANNcoord length = bnds.hi[d] - bnds.lo[d];
// is this side midpoint splitable
// without violating aspect ratio?
if (((double) max_length)*2.0/((double) length) <= FS_ASPECT_RATIO) {
// compute spread along this dim
ANNcoord spr = annSpread(pa, pidx, n, d);
if (spr > max_spread) { // best spread so far
max_spread = spr;
cut_dim = d; // this is dimension to cut
}
}
}
max_length = 0; // find longest side other than cut_dim
for (d = 0; d < dim; d++) {
ANNcoord length = bnds.hi[d] - bnds.lo[d];
if (d != cut_dim && length > max_length)
max_length = length;
}
// consider most extreme splits
ANNcoord small_piece = max_length / FS_ASPECT_RATIO;
ANNcoord lo_cut = bnds.lo[cut_dim] + small_piece;// lowest legal cut
ANNcoord hi_cut = bnds.hi[cut_dim] - small_piece;// highest legal cut
int br1, br2;
// is median below lo_cut ?
if (annSplitBalance(pa, pidx, n, cut_dim, lo_cut) >= 0) {
cut_val = lo_cut; // cut at lo_cut
annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
n_lo = br1;
}
// is median above hi_cut?
else if (annSplitBalance(pa, pidx, n, cut_dim, hi_cut) <= 0) {
cut_val = hi_cut; // cut at hi_cut
annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
n_lo = br2;
}
else { // median cut preserves asp ratio
n_lo = n/2; // split about median
annMedianSplit(pa, pidx, n, cut_dim, cut_val, n_lo);
}
}
//----------------------------------------------------------------------
// sl_fair_split - sliding fair split splitting rule
//
// Sliding fair split is a splitting rule that combines the
// strengths of both fair split with sliding midpoint split.
// Fair split tends to produce balanced splits when the points
// are roughly uniformly distributed, but it can produce many
// trivial splits when points are highly clustered. Sliding
// midpoint never produces trivial splits, and shrinks boxes
// nicely if points are highly clustered, but it may produce
// rather unbalanced splits when points are unclustered but not
// quite uniform.
//
// Sliding fair split is based on the theory that there are two
// types of splits that are "good": balanced splits that produce
// fat boxes, and unbalanced splits provided the cell with fewer
// points is fat.
//
// This splitting rule operates by first computing the longest
// side of the current bounding box. Then it asks which sides
// could be split (at the midpoint) and still satisfy the aspect
// ratio bound with respect to this side. Among these, it selects
// the side with the largest spread (as fair split would). It
// then considers the most extreme cuts that would be allowed by
// the aspect ratio bound. This is done by dividing the longest
// side of the box by the aspect ratio bound. If the median cut
// lies between these extreme cuts, then we use the median cut.
// If not, then consider the extreme cut that is closer to the
// median. If all the points lie to one side of this cut, then
// we slide the cut until it hits the first point. This may
// violate the aspect ratio bound, but will never generate empty
// cells. However the sibling of every such skinny cell is fat,
// and hence packing arguments still apply.
//
//----------------------------------------------------------------------
void sl_fair_split(
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo) // num of points on low side (returned)
{
int d;
ANNcoord min, max; // min/max coordinates
int br1, br2; // split break points
ANNcoord max_length = bnds.hi[0] - bnds.lo[0];
cut_dim = 0;
for (d = 1; d < dim; d++) { // find length of longest box side
ANNcoord length = bnds.hi[d] - bnds.lo[d];
if (length > max_length) {
max_length = length;
cut_dim = d;
}
}
ANNcoord max_spread = 0; // find legal cut with max spread
cut_dim = 0;
for (d = 0; d < dim; d++) {
ANNcoord length = bnds.hi[d] - bnds.lo[d];
// is this side midpoint splitable
// without violating aspect ratio?
if (((double) max_length)*2.0/((double) length) <= FS_ASPECT_RATIO) {
// compute spread along this dim
ANNcoord spr = annSpread(pa, pidx, n, d);
if (spr > max_spread) { // best spread so far
max_spread = spr;
cut_dim = d; // this is dimension to cut
}
}
}
max_length = 0; // find longest side other than cut_dim
for (d = 0; d < dim; d++) {
ANNcoord length = bnds.hi[d] - bnds.lo[d];
if (d != cut_dim && length > max_length)
max_length = length;
}
// consider most extreme splits
ANNcoord small_piece = max_length / FS_ASPECT_RATIO;
ANNcoord lo_cut = bnds.lo[cut_dim] + small_piece;// lowest legal cut
ANNcoord hi_cut = bnds.hi[cut_dim] - small_piece;// highest legal cut
// find min and max along cut_dim
annMinMax(pa, pidx, n, cut_dim, min, max);
// is median below lo_cut?
if (annSplitBalance(pa, pidx, n, cut_dim, lo_cut) >= 0) {
if (max > lo_cut) { // are any points above lo_cut?
cut_val = lo_cut; // cut at lo_cut
annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
n_lo = br1; // balance if there are ties
}
else { // all points below lo_cut
cut_val = max; // cut at max value
annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
n_lo = n-1;
}
}
// is median above hi_cut?
else if (annSplitBalance(pa, pidx, n, cut_dim, hi_cut) <= 0) {
if (min < hi_cut) { // are any points below hi_cut?
cut_val = hi_cut; // cut at hi_cut
annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
n_lo = br2; // balance if there are ties
}
else { // all points above hi_cut
cut_val = min; // cut at min value
annPlaneSplit(pa, pidx, n, cut_dim, cut_val, br1, br2);
n_lo = 1;
}
}
else { // median cut is good enough
n_lo = n/2; // split about median
annMedianSplit(pa, pidx, n, cut_dim, cut_val, n_lo);
}
}

View File

@ -0,0 +1,85 @@
//----------------------------------------------------------------------
// File: kd_split.h
// Programmer: Sunil Arya and David Mount
// Description: Methods for splitting kd-trees
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#ifndef ANN_KD_SPLIT_H
#define ANN_KD_SPLIT_H
#include "kd_tree.h" // kd-tree definitions
//----------------------------------------------------------------------
// External entry points
// These are all splitting procedures for kd-trees.
//----------------------------------------------------------------------
void kd_split( // standard (optimized) kd-splitter
ANNpointArray pa, // point array (unaltered)
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo); // num of points on low side (returned)
void midpt_split( // midpoint kd-splitter
ANNpointArray pa, // point array (unaltered)
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo); // num of points on low side (returned)
void sl_midpt_split( // sliding midpoint kd-splitter
ANNpointArray pa, // point array (unaltered)
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo); // num of points on low side (returned)
void fair_split( // fair-split kd-splitter
ANNpointArray pa, // point array (unaltered)
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo); // num of points on low side (returned)
void sl_fair_split( // sliding fair-split kd-splitter
ANNpointArray pa, // point array (unaltered)
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo); // num of points on low side (returned)
#endif

View File

@ -0,0 +1,405 @@
//----------------------------------------------------------------------
// File: kd_tree.cpp
// Programmer: Sunil Arya and David Mount
// Description: Basic methods for kd-trees.
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Increased aspect ratio bound (ANN_AR_TOOBIG) from 100 to 1000.
// Fixed leaf counts to count trivial leaves.
// Added optional pa, pi arguments to Skeleton kd_tree constructor
// for use in load constructor.
// Added annClose() to eliminate KD_TRIVIAL memory leak.
//----------------------------------------------------------------------
#include "kd_tree.h" // kd-tree declarations
#include "kd_split.h" // kd-tree splitting rules
#include "kd_util.h" // kd-tree utilities
#include <ann/ANNperf.h> // performance evaluation
//----------------------------------------------------------------------
// Global data
//
// For some splitting rules, especially with small bucket sizes,
// it is possible to generate a large number of empty leaf nodes.
// To save storage we allocate a single trivial leaf node which
// contains no points. For messy coding reasons it is convenient
// to have it reference a trivial point index.
//
// KD_TRIVIAL is allocated when the first kd-tree is created. It
// must *never* deallocated (since it may be shared by more than
// one tree).
//----------------------------------------------------------------------
static int IDX_TRIVIAL[] = {0}; // trivial point index
ANNkd_leaf *KD_TRIVIAL = NULL; // trivial leaf node
//----------------------------------------------------------------------
// Printing the kd-tree
// These routines print a kd-tree in reverse inorder (high then
// root then low). (This is so that if you look at the output
// from the right side it appear from left to right in standard
// inorder.) When outputting leaves we output only the point
// indices rather than the point coordinates. There is an option
// to print the point coordinates separately.
//
// The tree printing routine calls the printing routines on the
// individual nodes of the tree, passing in the level or depth
// in the tree. The level in the tree is used to print indentation
// for readability.
//----------------------------------------------------------------------
void ANNkd_split::print( // print splitting node
int level, // depth of node in tree
ostream &out) // output stream
{
child[ANN_HI]->print(level+1, out); // print high child
out << " ";
for (int i = 0; i < level; i++) // print indentation
out << "..";
out << "Split cd=" << cut_dim << " cv=" << cut_val;
out << " lbnd=" << cd_bnds[ANN_LO];
out << " hbnd=" << cd_bnds[ANN_HI];
out << "\n";
child[ANN_LO]->print(level+1, out); // print low child
}
void ANNkd_leaf::print( // print leaf node
int level, // depth of node in tree
ostream &out) // output stream
{
out << " ";
for (int i = 0; i < level; i++) // print indentation
out << "..";
if (this == KD_TRIVIAL) { // canonical trivial leaf node
out << "Leaf (trivial)\n";
}
else{
out << "Leaf n=" << n_pts << " <";
for (int j = 0; j < n_pts; j++) {
out << bkt[j];
if (j < n_pts-1) out << ",";
}
out << ">\n";
}
}
void ANNkd_tree::Print( // print entire tree
ANNbool with_pts, // print points as well?
ostream &out) // output stream
{
out << "ANN Version " << ANNversion << "\n";
if (with_pts) { // print point coordinates
out << " Points:\n";
for (int i = 0; i < n_pts; i++) {
out << "\t" << i << ": ";
annPrintPt(pts[i], dim, out);
out << "\n";
}
}
if (root == NULL) // empty tree?
out << " Null tree.\n";
else {
root->print(0, out); // invoke printing at root
}
}
//----------------------------------------------------------------------
// kd_tree statistics (for performance evaluation)
// This routine compute various statistics information for
// a kd-tree. It is used by the implementors for performance
// evaluation of the data structure.
//----------------------------------------------------------------------
#define MAX(a,b) ((a) > (b) ? (a) : (b))
void ANNkdStats::merge(const ANNkdStats &st) // merge stats from child
{
n_lf += st.n_lf; n_tl += st.n_tl;
n_spl += st.n_spl; n_shr += st.n_shr;
depth = MAX(depth, st.depth);
sum_ar += st.sum_ar;
}
//----------------------------------------------------------------------
// Update statistics for nodes
//----------------------------------------------------------------------
const double ANN_AR_TOOBIG = 1000; // too big an aspect ratio
void ANNkd_leaf::getStats( // get subtree statistics
int dim, // dimension of space
ANNkdStats &st, // stats (modified)
ANNorthRect &bnd_box) // bounding box
{
st.reset();
st.n_lf = 1; // count this leaf
if (this == KD_TRIVIAL) st.n_tl = 1; // count trivial leaf
double ar = annAspectRatio(dim, bnd_box); // aspect ratio of leaf
// incr sum (ignore outliers)
st.sum_ar += float(ar < ANN_AR_TOOBIG ? ar : ANN_AR_TOOBIG);
}
void ANNkd_split::getStats( // get subtree statistics
int dim, // dimension of space
ANNkdStats &st, // stats (modified)
ANNorthRect &bnd_box) // bounding box
{
ANNkdStats ch_stats; // stats for children
// get stats for low child
ANNcoord hv = bnd_box.hi[cut_dim]; // save box bounds
bnd_box.hi[cut_dim] = cut_val; // upper bound for low child
ch_stats.reset(); // reset
child[ANN_LO]->getStats(dim, ch_stats, bnd_box);
st.merge(ch_stats); // merge them
bnd_box.hi[cut_dim] = hv; // restore bound
// get stats for high child
ANNcoord lv = bnd_box.lo[cut_dim]; // save box bounds
bnd_box.lo[cut_dim] = cut_val; // lower bound for high child
ch_stats.reset(); // reset
child[ANN_HI]->getStats(dim, ch_stats, bnd_box);
st.merge(ch_stats); // merge them
bnd_box.lo[cut_dim] = lv; // restore bound
st.depth++; // increment depth
st.n_spl++; // increment number of splits
}
//----------------------------------------------------------------------
// getStats
// Collects a number of statistics related to kd_tree or
// bd_tree.
//----------------------------------------------------------------------
void ANNkd_tree::getStats( // get tree statistics
ANNkdStats &st) // stats (modified)
{
st.reset(dim, n_pts, bkt_size); // reset stats
// create bounding box
ANNorthRect bnd_box(dim, bnd_box_lo, bnd_box_hi);
if (root != NULL) { // if nonempty tree
root->getStats(dim, st, bnd_box); // get statistics
st.avg_ar = st.sum_ar / st.n_lf; // average leaf asp ratio
}
}
//----------------------------------------------------------------------
// kd_tree destructor
// The destructor just frees the various elements that were
// allocated in the construction process.
//----------------------------------------------------------------------
ANNkd_tree::~ANNkd_tree() // tree destructor
{
if (root != NULL) delete root;
if (pidx != NULL) delete [] pidx;
if (bnd_box_lo != NULL) annDeallocPt(bnd_box_lo);
if (bnd_box_hi != NULL) annDeallocPt(bnd_box_hi);
}
//----------------------------------------------------------------------
// This is called with all use of ANN is finished. It eliminates the
// minor memory leak caused by the allocation of KD_TRIVIAL.
//----------------------------------------------------------------------
void annClose() // close use of ANN
{
if (KD_TRIVIAL != NULL) {
delete KD_TRIVIAL;
KD_TRIVIAL = NULL;
}
}
//----------------------------------------------------------------------
// kd_tree constructors
// There is a skeleton kd-tree constructor which sets up a
// trivial empty tree. The last optional argument allows
// the routine to be passed a point index array which is
// assumed to be of the proper size (n). Otherwise, one is
// allocated and initialized to the identity. Warning: In
// either case the destructor will deallocate this array.
//
// As a kludge, we need to allocate KD_TRIVIAL if one has not
// already been allocated. (This is because I'm too dumb to
// figure out how to cause a pointer to be allocated at load
// time.)
//----------------------------------------------------------------------
void ANNkd_tree::SkeletonTree( // construct skeleton tree
int n, // number of points
int dd, // dimension
int bs, // bucket size
ANNpointArray pa, // point array
ANNidxArray pi) // point indices
{
dim = dd; // initialize basic elements
n_pts = n;
bkt_size = bs;
pts = pa; // initialize points array
root = NULL; // no associated tree yet
if (pi == NULL) { // point indices provided?
pidx = new ANNidx[n]; // no, allocate space for point indices
for (int i = 0; i < n; i++) {
pidx[i] = i; // initially identity
}
}
else {
pidx = pi; // yes, use them
}
bnd_box_lo = bnd_box_hi = NULL; // bounding box is nonexistent
if (KD_TRIVIAL == NULL) // no trivial leaf node yet?
KD_TRIVIAL = new ANNkd_leaf(0, IDX_TRIVIAL); // allocate it
}
ANNkd_tree::ANNkd_tree( // basic constructor
int n, // number of points
int dd, // dimension
int bs) // bucket size
{ SkeletonTree(n, dd, bs); } // construct skeleton tree
//----------------------------------------------------------------------
// rkd_tree - recursive procedure to build a kd-tree
//
// Builds a kd-tree for points in pa as indexed through the
// array pidx[0..n-1] (typically a subarray of the array used in
// the top-level call). This routine permutes the array pidx,
// but does not alter pa[].
//
// The construction is based on a standard algorithm for constructing
// the kd-tree (see Friedman, Bentley, and Finkel, ``An algorithm for
// finding best matches in logarithmic expected time,'' ACM Transactions
// on Mathematical Software, 3(3):209-226, 1977). The procedure
// operates by a simple divide-and-conquer strategy, which determines
// an appropriate orthogonal cutting plane (see below), and splits
// the points. When the number of points falls below the bucket size,
// we simply store the points in a leaf node's bucket.
//
// One of the arguments is a pointer to a splitting routine,
// whose prototype is:
//
// void split(
// ANNpointArray pa, // complete point array
// ANNidxArray pidx, // point array (permuted on return)
// ANNorthRect &bnds, // bounds of current cell
// int n, // number of points
// int dim, // dimension of space
// int &cut_dim, // cutting dimension
// ANNcoord &cut_val, // cutting value
// int &n_lo) // no. of points on low side of cut
//
// This procedure selects a cutting dimension and cutting value,
// partitions pa about these values, and returns the number of
// points on the low side of the cut.
//----------------------------------------------------------------------
ANNkd_ptr rkd_tree( // recursive construction of kd-tree
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices to store in subtree
int n, // number of points
int dim, // dimension of space
int bsp, // bucket space
ANNorthRect &bnd_box, // bounding box for current node
ANNkd_splitter splitter) // splitting routine
{
if (n <= bsp) { // n small, make a leaf node
if (n == 0) // empty leaf node
return KD_TRIVIAL; // return (canonical) empty leaf
else // construct the node and return
return new ANNkd_leaf(n, pidx);
}
else { // n large, make a splitting node
int cd; // cutting dimension
ANNcoord cv; // cutting value
int n_lo; // number on low side of cut
ANNkd_node *lo, *hi; // low and high children
// invoke splitting procedure
(*splitter)(pa, pidx, bnd_box, n, dim, cd, cv, n_lo);
ANNcoord lv = bnd_box.lo[cd]; // save bounds for cutting dimension
ANNcoord hv = bnd_box.hi[cd];
bnd_box.hi[cd] = cv; // modify bounds for left subtree
lo = rkd_tree( // build left subtree
pa, pidx, n_lo, // ...from pidx[0..n_lo-1]
dim, bsp, bnd_box, splitter);
bnd_box.hi[cd] = hv; // restore bounds
bnd_box.lo[cd] = cv; // modify bounds for right subtree
hi = rkd_tree( // build right subtree
pa, pidx + n_lo, n-n_lo,// ...from pidx[n_lo..n-1]
dim, bsp, bnd_box, splitter);
bnd_box.lo[cd] = lv; // restore bounds
// create the splitting node
ANNkd_split *ptr = new ANNkd_split(cd, cv, lv, hv, lo, hi);
return ptr; // return pointer to this node
}
}
//----------------------------------------------------------------------
// kd-tree constructor
// This is the main constructor for kd-trees given a set of points.
// It first builds a skeleton tree, then computes the bounding box
// of the data points, and then invokes rkd_tree() to actually
// build the tree, passing it the appropriate splitting routine.
//----------------------------------------------------------------------
ANNkd_tree::ANNkd_tree( // construct from point array
ANNpointArray pa, // point array (with at least n pts)
int n, // number of points
int dd, // dimension
int bs, // bucket size
ANNsplitRule split) // splitting method
{
SkeletonTree(n, dd, bs); // set up the basic stuff
pts = pa; // where the points are
if (n == 0) return; // no points--no sweat
ANNorthRect bnd_box(dd); // bounding box for points
annEnclRect(pa, pidx, n, dd, bnd_box);// construct bounding rectangle
// copy to tree structure
bnd_box_lo = annCopyPt(dd, bnd_box.lo);
bnd_box_hi = annCopyPt(dd, bnd_box.hi);
switch (split) { // build by rule
case ANN_KD_STD: // standard kd-splitting rule
root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, kd_split);
break;
case ANN_KD_MIDPT: // midpoint split
root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, midpt_split);
break;
case ANN_KD_FAIR: // fair split
root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, fair_split);
break;
case ANN_KD_SUGGEST: // best (in our opinion)
case ANN_KD_SL_MIDPT: // sliding midpoint split
root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, sl_midpt_split);
break;
case ANN_KD_SL_FAIR: // sliding fair split
root = rkd_tree(pa, pidx, n, dd, bs, bnd_box, sl_fair_split);
break;
default:
annError("Illegal splitting method", ANNabort);
}
}

View File

@ -0,0 +1,197 @@
//----------------------------------------------------------------------
// File: kd_tree.h
// Programmer: Sunil Arya and David Mount
// Description: Declarations for standard kd-tree routines
// Last modified: 05/03/05 (Version 1.1)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.1 05/03/05
// Added fixed radius kNN search
//----------------------------------------------------------------------
#ifndef ANN_kd_tree_H
#define ANN_kd_tree_H
#include <ann/ANNx.h> // all ANN includes
using namespace std; // make std:: available
//----------------------------------------------------------------------
// Generic kd-tree node
//
// Nodes in kd-trees are of two types, splitting nodes which contain
// splitting information (a splitting hyperplane orthogonal to one
// of the coordinate axes) and leaf nodes which contain point
// information (an array of points stored in a bucket). This is
// handled by making a generic class kd_node, which is essentially an
// empty shell, and then deriving the leaf and splitting nodes from
// this.
//----------------------------------------------------------------------
class ANNkd_node{ // generic kd-tree node (empty shell)
public:
virtual ~ANNkd_node() {} // virtual distroyer
virtual void ann_search(ANNdist) = 0; // tree search
virtual void ann_pri_search(ANNdist) = 0; // priority search
virtual void ann_FR_search(ANNdist) = 0; // fixed-radius search
virtual void getStats( // get tree statistics
int dim, // dimension of space
ANNkdStats &st, // statistics
ANNorthRect &bnd_box) = 0; // bounding box
// print node
virtual void print(int level, ostream &out) = 0;
virtual void dump(ostream &out) = 0; // dump node
friend class ANNkd_tree; // allow kd-tree to access us
};
//----------------------------------------------------------------------
// kd-splitting function:
// kd_splitter is a pointer to a splitting routine for preprocessing.
// Different splitting procedures result in different strategies
// for building the tree.
//----------------------------------------------------------------------
typedef void (*ANNkd_splitter)( // splitting routine for kd-trees
ANNpointArray pa, // point array (unaltered)
ANNidxArray pidx, // point indices (permuted on return)
const ANNorthRect &bnds, // bounding rectangle for cell
int n, // number of points
int dim, // dimension of space
int &cut_dim, // cutting dimension (returned)
ANNcoord &cut_val, // cutting value (returned)
int &n_lo); // num of points on low side (returned)
//----------------------------------------------------------------------
// Leaf kd-tree node
// Leaf nodes of the kd-tree store the set of points associated
// with this bucket, stored as an array of point indices. These
// are indices in the array points, which resides with the
// root of the kd-tree. We also store the number of points
// that reside in this bucket.
//----------------------------------------------------------------------
class ANNkd_leaf: public ANNkd_node // leaf node for kd-tree
{
int n_pts; // no. points in bucket
ANNidxArray bkt; // bucket of points
public:
ANNkd_leaf( // constructor
int n, // number of points
ANNidxArray b) // bucket
{
n_pts = n; // number of points in bucket
bkt = b; // the bucket
}
~ANNkd_leaf() { } // destructor (none)
virtual void getStats( // get tree statistics
int dim, // dimension of space
ANNkdStats &st, // statistics
ANNorthRect &bnd_box); // bounding box
virtual void print(int level, ostream &out);// print node
virtual void dump(ostream &out); // dump node
virtual void ann_search(ANNdist); // standard search
virtual void ann_pri_search(ANNdist); // priority search
virtual void ann_FR_search(ANNdist); // fixed-radius search
};
//----------------------------------------------------------------------
// KD_TRIVIAL is a special pointer to an empty leaf node. Since
// some splitting rules generate many (more than 50%) trivial
// leaves, we use this one shared node to save space.
//
// The pointer is initialized to NULL, but whenever a kd-tree is
// created, we allocate this node, if it has not already been
// allocated. This node is *never* deallocated, so it produces
// a small memory leak.
//----------------------------------------------------------------------
extern ANNkd_leaf *KD_TRIVIAL; // trivial (empty) leaf node
//----------------------------------------------------------------------
// kd-tree splitting node.
// Splitting nodes contain a cutting dimension and a cutting value.
// These indicate the axis-parellel plane which subdivide the
// box for this node. The extent of the bounding box along the
// cutting dimension is maintained (this is used to speed up point
// to box distance calculations) [we do not store the entire bounding
// box since this may be wasteful of space in high dimensions].
// We also store pointers to the 2 children.
//----------------------------------------------------------------------
class ANNkd_split : public ANNkd_node // splitting node of a kd-tree
{
int cut_dim; // dim orthogonal to cutting plane
ANNcoord cut_val; // location of cutting plane
ANNcoord cd_bnds[2]; // lower and upper bounds of
// rectangle along cut_dim
ANNkd_ptr child[2]; // left and right children
public:
ANNkd_split( // constructor
int cd, // cutting dimension
ANNcoord cv, // cutting value
ANNcoord lv, ANNcoord hv, // low and high values
ANNkd_ptr lc=NULL, ANNkd_ptr hc=NULL) // children
{
cut_dim = cd; // cutting dimension
cut_val = cv; // cutting value
cd_bnds[ANN_LO] = lv; // lower bound for rectangle
cd_bnds[ANN_HI] = hv; // upper bound for rectangle
child[ANN_LO] = lc; // left child
child[ANN_HI] = hc; // right child
}
~ANNkd_split() // destructor
{
if (child[ANN_LO]!= NULL && child[ANN_LO]!= KD_TRIVIAL)
delete child[ANN_LO];
if (child[ANN_HI]!= NULL && child[ANN_HI]!= KD_TRIVIAL)
delete child[ANN_HI];
}
virtual void getStats( // get tree statistics
int dim, // dimension of space
ANNkdStats &st, // statistics
ANNorthRect &bnd_box); // bounding box
virtual void print(int level, ostream &out);// print node
virtual void dump(ostream &out); // dump node
virtual void ann_search(ANNdist); // standard search
virtual void ann_pri_search(ANNdist); // priority search
virtual void ann_FR_search(ANNdist); // fixed-radius search
};
//----------------------------------------------------------------------
// External entry points
//----------------------------------------------------------------------
ANNkd_ptr rkd_tree( // recursive construction of kd-tree
ANNpointArray pa, // point array (unaltered)
ANNidxArray pidx, // point indices to store in subtree
int n, // number of points
int dim, // dimension of space
int bsp, // bucket space
ANNorthRect &bnd_box, // bounding box for current node
ANNkd_splitter splitter); // splitting routine
#endif

View File

@ -0,0 +1,439 @@
//----------------------------------------------------------------------
// File: kd_util.cpp
// Programmer: Sunil Arya and David Mount
// Description: Common utilities for kd-trees
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#include "kd_util.h" // kd-utility declarations
#include <ann/ANNperf.h> // performance evaluation
//----------------------------------------------------------------------
// The following routines are utility functions for manipulating
// points sets, used in determining splitting planes for kd-tree
// construction.
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// NOTE: Virtually all point indexing is done through an index (i.e.
// permutation) array pidx. Consequently, a reference to the d-th
// coordinate of the i-th point is pa[pidx[i]][d]. The macro PA(i,d)
// is a shorthand for this.
//----------------------------------------------------------------------
// standard 2-d indirect indexing
#define PA(i,d) (pa[pidx[(i)]][(d)])
// accessing a single point
#define PP(i) (pa[pidx[(i)]])
//----------------------------------------------------------------------
// annAspectRatio
// Compute the aspect ratio (ratio of longest to shortest side)
// of a rectangle.
//----------------------------------------------------------------------
double annAspectRatio(
int dim, // dimension
const ANNorthRect &bnd_box) // bounding cube
{
ANNcoord length = bnd_box.hi[0] - bnd_box.lo[0];
ANNcoord min_length = length; // min side length
ANNcoord max_length = length; // max side length
for (int d = 0; d < dim; d++) {
length = bnd_box.hi[d] - bnd_box.lo[d];
if (length < min_length) min_length = length;
if (length > max_length) max_length = length;
}
return max_length/min_length;
}
//----------------------------------------------------------------------
// annEnclRect, annEnclCube
// These utilities compute the smallest rectangle and cube enclosing
// a set of points, respectively.
//----------------------------------------------------------------------
void annEnclRect(
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int dim, // dimension
ANNorthRect &bnds) // bounding cube (returned)
{
for (int d = 0; d < dim; d++) { // find smallest enclosing rectangle
ANNcoord lo_bnd = PA(0,d); // lower bound on dimension d
ANNcoord hi_bnd = PA(0,d); // upper bound on dimension d
for (int i = 0; i < n; i++) {
if (PA(i,d) < lo_bnd) lo_bnd = PA(i,d);
else if (PA(i,d) > hi_bnd) hi_bnd = PA(i,d);
}
bnds.lo[d] = lo_bnd;
bnds.hi[d] = hi_bnd;
}
}
void annEnclCube( // compute smallest enclosing cube
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int dim, // dimension
ANNorthRect &bnds) // bounding cube (returned)
{
int d;
// compute smallest enclosing rect
annEnclRect(pa, pidx, n, dim, bnds);
ANNcoord max_len = 0; // max length of any side
for (d = 0; d < dim; d++) { // determine max side length
ANNcoord len = bnds.hi[d] - bnds.lo[d];
if (len > max_len) { // update max_len if longest
max_len = len;
}
}
for (d = 0; d < dim; d++) { // grow sides to match max
ANNcoord len = bnds.hi[d] - bnds.lo[d];
ANNcoord half_diff = (max_len - len) / 2;
bnds.lo[d] -= half_diff;
bnds.hi[d] += half_diff;
}
}
//----------------------------------------------------------------------
// annBoxDistance - utility routine which computes distance from point to
// box (Note: most distances to boxes are computed using incremental
// distance updates, not this function.)
//----------------------------------------------------------------------
ANNdist annBoxDistance( // compute distance from point to box
const ANNpoint q, // the point
const ANNpoint lo, // low point of box
const ANNpoint hi, // high point of box
int dim) // dimension of space
{
register ANNdist dist = 0.0; // sum of squared distances
register ANNdist t;
for (register int d = 0; d < dim; d++) {
if (q[d] < lo[d]) { // q is left of box
t = ANNdist(lo[d]) - ANNdist(q[d]);
dist = ANN_SUM(dist, ANN_POW(t));
}
else if (q[d] > hi[d]) { // q is right of box
t = ANNdist(q[d]) - ANNdist(hi[d]);
dist = ANN_SUM(dist, ANN_POW(t));
}
}
ANN_FLOP(4*dim) // increment floating op count
return dist;
}
//----------------------------------------------------------------------
// annSpread - find spread along given dimension
// annMinMax - find min and max coordinates along given dimension
// annMaxSpread - find dimension of max spread
//----------------------------------------------------------------------
ANNcoord annSpread( // compute point spread along dimension
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int d) // dimension to check
{
ANNcoord min = PA(0,d); // compute max and min coords
ANNcoord max = PA(0,d);
for (int i = 1; i < n; i++) {
ANNcoord c = PA(i,d);
if (c < min) min = c;
else if (c > max) max = c;
}
return (max - min); // total spread is difference
}
void annMinMax( // compute min and max coordinates along dim
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int d, // dimension to check
ANNcoord &min, // minimum value (returned)
ANNcoord &max) // maximum value (returned)
{
min = PA(0,d); // compute max and min coords
max = PA(0,d);
for (int i = 1; i < n; i++) {
ANNcoord c = PA(i,d);
if (c < min) min = c;
else if (c > max) max = c;
}
}
int annMaxSpread( // compute dimension of max spread
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int dim) // dimension of space
{
int max_dim = 0; // dimension of max spread
ANNcoord max_spr = 0; // amount of max spread
if (n == 0) return max_dim; // no points, who cares?
for (int d = 0; d < dim; d++) { // compute spread along each dim
ANNcoord spr = annSpread(pa, pidx, n, d);
if (spr > max_spr) { // bigger than current max
max_spr = spr;
max_dim = d;
}
}
return max_dim;
}
//----------------------------------------------------------------------
// annMedianSplit - split point array about its median
// Splits a subarray of points pa[0..n] about an element of given
// rank (median: n_lo = n/2) with respect to dimension d. It places
// the element of rank n_lo-1 correctly (because our splitting rule
// takes the mean of these two). On exit, the array is permuted so
// that:
//
// pa[0..n_lo-2][d] <= pa[n_lo-1][d] <= pa[n_lo][d] <= pa[n_lo+1..n-1][d].
//
// The mean of pa[n_lo-1][d] and pa[n_lo][d] is returned as the
// splitting value.
//
// All indexing is done indirectly through the index array pidx.
//
// This function uses the well known selection algorithm due to
// C.A.R. Hoare.
//----------------------------------------------------------------------
// swap two points in pa array
#define PASWAP(a,b) { int tmp = pidx[a]; pidx[a] = pidx[b]; pidx[b] = tmp; }
void annMedianSplit(
ANNpointArray pa, // points to split
ANNidxArray pidx, // point indices
int n, // number of points
int d, // dimension along which to split
ANNcoord &cv, // cutting value
int n_lo) // split into n_lo and n-n_lo
{
int l = 0; // left end of current subarray
int r = n-1; // right end of current subarray
while (l < r) {
register int i = (r+l)/2; // select middle as pivot
register int k;
if (PA(i,d) > PA(r,d)) // make sure last > pivot
PASWAP(i,r)
PASWAP(l,i); // move pivot to first position
ANNcoord c = PA(l,d); // pivot value
i = l;
k = r;
for(;;) { // pivot about c
while (PA(++i,d) < c) ;
while (PA(--k,d) > c) ;
if (i < k) PASWAP(i,k) else break;
}
PASWAP(l,k); // pivot winds up in location k
if (k > n_lo) r = k-1; // recurse on proper subarray
else if (k < n_lo) l = k+1;
else break; // got the median exactly
}
if (n_lo > 0) { // search for next smaller item
ANNcoord c = PA(0,d); // candidate for max
int k = 0; // candidate's index
for (int i = 1; i < n_lo; i++) {
if (PA(i,d) > c) {
c = PA(i,d);
k = i;
}
}
PASWAP(n_lo-1, k); // max among pa[0..n_lo-1] to pa[n_lo-1]
}
// cut value is midpoint value
cv = (PA(n_lo-1,d) + PA(n_lo,d))/2.0;
}
//----------------------------------------------------------------------
// annPlaneSplit - split point array about a cutting plane
// Split the points in an array about a given plane along a
// given cutting dimension. On exit, br1 and br2 are set so
// that:
//
// pa[ 0 ..br1-1] < cv
// pa[br1..br2-1] == cv
// pa[br2.. n -1] > cv
//
// All indexing is done indirectly through the index array pidx.
//
//----------------------------------------------------------------------
void annPlaneSplit( // split points by a plane
ANNpointArray pa, // points to split
ANNidxArray pidx, // point indices
int n, // number of points
int d, // dimension along which to split
ANNcoord cv, // cutting value
int &br1, // first break (values < cv)
int &br2) // second break (values == cv)
{
int l = 0;
int r = n-1;
for(;;) { // partition pa[0..n-1] about cv
while (l < n && PA(l,d) < cv) l++;
while (r >= 0 && PA(r,d) >= cv) r--;
if (l > r) break;
PASWAP(l,r);
l++; r--;
}
br1 = l; // now: pa[0..br1-1] < cv <= pa[br1..n-1]
r = n-1;
for(;;) { // partition pa[br1..n-1] about cv
while (l < n && PA(l,d) <= cv) l++;
while (r >= br1 && PA(r,d) > cv) r--;
if (l > r) break;
PASWAP(l,r);
l++; r--;
}
br2 = l; // now: pa[br1..br2-1] == cv < pa[br2..n-1]
}
//----------------------------------------------------------------------
// annBoxSplit - split point array about a orthogonal rectangle
// Split the points in an array about a given orthogonal
// rectangle. On exit, n_in is set to the number of points
// that are inside (or on the boundary of) the rectangle.
//
// All indexing is done indirectly through the index array pidx.
//
//----------------------------------------------------------------------
void annBoxSplit( // split points by a box
ANNpointArray pa, // points to split
ANNidxArray pidx, // point indices
int n, // number of points
int dim, // dimension of space
ANNorthRect &box, // the box
int &n_in) // number of points inside (returned)
{
int l = 0;
int r = n-1;
for(;;) { // partition pa[0..n-1] about box
while (l < n && box.inside(dim, PP(l))) l++;
while (r >= 0 && !box.inside(dim, PP(r))) r--;
if (l > r) break;
PASWAP(l,r);
l++; r--;
}
n_in = l; // now: pa[0..n_in-1] inside and rest outside
}
//----------------------------------------------------------------------
// annSplitBalance - compute balance factor for a given plane split
// Balance factor is defined as the number of points lying
// below the splitting value minus n/2 (median). Thus, a
// median split has balance 0, left of this is negative and
// right of this is positive. (The points are unchanged.)
//----------------------------------------------------------------------
int annSplitBalance( // determine balance factor of a split
ANNpointArray pa, // points to split
ANNidxArray pidx, // point indices
int n, // number of points
int d, // dimension along which to split
ANNcoord cv) // cutting value
{
int n_lo = 0;
for(int i = 0; i < n; i++) { // count number less than cv
if (PA(i,d) < cv) n_lo++;
}
return n_lo - n/2;
}
//----------------------------------------------------------------------
// annBox2Bnds - convert bounding box to list of bounds
// Given two boxes, an inner box enclosed within a bounding
// box, this routine determines all the sides for which the
// inner box is strictly contained with the bounding box,
// and adds an appropriate entry to a list of bounds. Then
// we allocate storage for the final list of bounds, and return
// the resulting list and its size.
//----------------------------------------------------------------------
void annBox2Bnds( // convert inner box to bounds
const ANNorthRect &inner_box, // inner box
const ANNorthRect &bnd_box, // enclosing box
int dim, // dimension of space
int &n_bnds, // number of bounds (returned)
ANNorthHSArray &bnds) // bounds array (returned)
{
int i;
n_bnds = 0; // count number of bounds
for (i = 0; i < dim; i++) {
if (inner_box.lo[i] > bnd_box.lo[i]) // low bound is inside
n_bnds++;
if (inner_box.hi[i] < bnd_box.hi[i]) // high bound is inside
n_bnds++;
}
bnds = new ANNorthHalfSpace[n_bnds]; // allocate appropriate size
int j = 0;
for (i = 0; i < dim; i++) { // fill the array
if (inner_box.lo[i] > bnd_box.lo[i]) {
bnds[j].cd = i;
bnds[j].cv = inner_box.lo[i];
bnds[j].sd = +1;
j++;
}
if (inner_box.hi[i] < bnd_box.hi[i]) {
bnds[j].cd = i;
bnds[j].cv = inner_box.hi[i];
bnds[j].sd = -1;
j++;
}
}
}
//----------------------------------------------------------------------
// annBnds2Box - convert list of bounds to bounding box
// Given an enclosing box and a list of bounds, this routine
// computes the corresponding inner box. It is assumed that
// the box points have been allocated already.
//----------------------------------------------------------------------
void annBnds2Box(
const ANNorthRect &bnd_box, // enclosing box
int dim, // dimension of space
int n_bnds, // number of bounds
ANNorthHSArray bnds, // bounds array
ANNorthRect &inner_box) // inner box (returned)
{
annAssignRect(dim, inner_box, bnd_box); // copy bounding box to inner
for (int i = 0; i < n_bnds; i++) {
bnds[i].project(inner_box.lo); // project each endpoint
bnds[i].project(inner_box.hi);
}
}

View File

@ -0,0 +1,124 @@
//----------------------------------------------------------------------
// File: kd_util.h
// Programmer: Sunil Arya and David Mount
// Description: Common utilities for kd- trees
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#ifndef ANN_kd_util_H
#define ANN_kd_util_H
#include "kd_tree.h" // kd-tree declarations
//----------------------------------------------------------------------
// externally accessible functions
//----------------------------------------------------------------------
double annAspectRatio( // compute aspect ratio of box
int dim, // dimension
const ANNorthRect &bnd_box); // bounding cube
void annEnclRect( // compute smallest enclosing rectangle
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int dim, // dimension
ANNorthRect &bnds); // bounding cube (returned)
void annEnclCube( // compute smallest enclosing cube
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int dim, // dimension
ANNorthRect &bnds); // bounding cube (returned)
ANNdist annBoxDistance( // compute distance from point to box
const ANNpoint q, // the point
const ANNpoint lo, // low point of box
const ANNpoint hi, // high point of box
int dim); // dimension of space
ANNcoord annSpread( // compute point spread along dimension
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int d); // dimension to check
void annMinMax( // compute min and max coordinates along dim
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int d, // dimension to check
ANNcoord& min, // minimum value (returned)
ANNcoord& max); // maximum value (returned)
int annMaxSpread( // compute dimension of max spread
ANNpointArray pa, // point array
ANNidxArray pidx, // point indices
int n, // number of points
int dim); // dimension of space
void annMedianSplit( // split points along median value
ANNpointArray pa, // points to split
ANNidxArray pidx, // point indices
int n, // number of points
int d, // dimension along which to split
ANNcoord &cv, // cutting value
int n_lo); // split into n_lo and n-n_lo
void annPlaneSplit( // split points by a plane
ANNpointArray pa, // points to split
ANNidxArray pidx, // point indices
int n, // number of points
int d, // dimension along which to split
ANNcoord cv, // cutting value
int &br1, // first break (values < cv)
int &br2); // second break (values == cv)
void annBoxSplit( // split points by a box
ANNpointArray pa, // points to split
ANNidxArray pidx, // point indices
int n, // number of points
int dim, // dimension of space
ANNorthRect &box, // the box
int &n_in); // number of points inside (returned)
int annSplitBalance( // determine balance factor of a split
ANNpointArray pa, // points to split
ANNidxArray pidx, // point indices
int n, // number of points
int d, // dimension along which to split
ANNcoord cv); // cutting value
void annBox2Bnds( // convert inner box to bounds
const ANNorthRect &inner_box, // inner box
const ANNorthRect &bnd_box, // enclosing box
int dim, // dimension of space
int &n_bnds, // number of bounds (returned)
ANNorthHSArray &bnds); // bounds array (returned)
void annBnds2Box( // convert bounds to inner box
const ANNorthRect &bnd_box, // enclosing box
int dim, // dimension of space
int n_bnds, // number of bounds
ANNorthHSArray bnds, // bounds array
ANNorthRect &inner_box); // inner box (returned)
#endif

View File

@ -0,0 +1,136 @@
//----------------------------------------------------------------------
// File: perf.cpp
// Programmer: Sunil Arya and David Mount
// Description: Methods for performance stats
// Last modified: 01/27/10 (Version 1.1.2)
//----------------------------------------------------------------------
// Copyright (c) 1997-2010 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
// Revision 1.0 04/01/05
// Changed names to avoid namespace conflicts.
// Added flush after printing performance stats to fix bug
// in Microsoft Windows version.
// Revision 1.1.2 01/27/10
// Fixed minor compilation bugs for new versions of gcc
//----------------------------------------------------------------------
#include <ann/ANN.h> // basic ANN includes
#include <ann/ANNperf.h> // performance includes
using namespace std; // make std:: available
//----------------------------------------------------------------------
// Performance statistics
// The following data and routines are used for computing
// performance statistics for nearest neighbor searching.
// Because these routines can slow the code down, they can be
// activated and deactiviated by defining the PERF variable,
// by compiling with the option: -DPERF
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// Global counters for performance measurement
//----------------------------------------------------------------------
int ann_Ndata_pts = 0; // number of data points
int ann_Nvisit_lfs = 0; // number of leaf nodes visited
int ann_Nvisit_spl = 0; // number of splitting nodes visited
int ann_Nvisit_shr = 0; // number of shrinking nodes visited
int ann_Nvisit_pts = 0; // visited points for one query
int ann_Ncoord_hts = 0; // coordinate hits for one query
int ann_Nfloat_ops = 0; // floating ops for one query
ANNsampStat ann_visit_lfs; // stats on leaf nodes visits
ANNsampStat ann_visit_spl; // stats on splitting nodes visits
ANNsampStat ann_visit_shr; // stats on shrinking nodes visits
ANNsampStat ann_visit_nds; // stats on total nodes visits
ANNsampStat ann_visit_pts; // stats on points visited
ANNsampStat ann_coord_hts; // stats on coordinate hits
ANNsampStat ann_float_ops; // stats on floating ops
//
ANNsampStat ann_average_err; // average error
ANNsampStat ann_rank_err; // rank error
//----------------------------------------------------------------------
// Routines for statistics.
//----------------------------------------------------------------------
DLL_API void annResetStats(int data_size) // reset stats for a set of queries
{
ann_Ndata_pts = data_size;
ann_visit_lfs.reset();
ann_visit_spl.reset();
ann_visit_shr.reset();
ann_visit_nds.reset();
ann_visit_pts.reset();
ann_coord_hts.reset();
ann_float_ops.reset();
ann_average_err.reset();
ann_rank_err.reset();
}
DLL_API void annResetCounts() // reset counts for one query
{
ann_Nvisit_lfs = 0;
ann_Nvisit_spl = 0;
ann_Nvisit_shr = 0;
ann_Nvisit_pts = 0;
ann_Ncoord_hts = 0;
ann_Nfloat_ops = 0;
}
DLL_API void annUpdateStats() // update stats with current counts
{
ann_visit_lfs += ann_Nvisit_lfs;
ann_visit_nds += ann_Nvisit_spl + ann_Nvisit_lfs;
ann_visit_spl += ann_Nvisit_spl;
ann_visit_shr += ann_Nvisit_shr;
ann_visit_pts += ann_Nvisit_pts;
ann_coord_hts += ann_Ncoord_hts;
ann_float_ops += ann_Nfloat_ops;
}
// print a single statistic
void print_one_stat(const char* title, ANNsampStat s, double div)
{
cout << title << "= [ ";
cout.width(9); cout << s.mean()/div << " : ";
cout.width(9); cout << s.stdDev()/div << " ]<";
cout.width(9); cout << s.min()/div << " , ";
cout.width(9); cout << s.max()/div << " >\n";
}
DLL_API void annPrintStats( // print statistics for a run
ANNbool validate) // true if average errors desired
{
cout.precision(4); // set floating precision
cout << " (Performance stats: "
<< " [ mean : stddev ]< min , max >\n";
print_one_stat(" leaf_nodes ", ann_visit_lfs, 1);
print_one_stat(" splitting_nodes ", ann_visit_spl, 1);
print_one_stat(" shrinking_nodes ", ann_visit_shr, 1);
print_one_stat(" total_nodes ", ann_visit_nds, 1);
print_one_stat(" points_visited ", ann_visit_pts, 1);
print_one_stat(" coord_hits/pt ", ann_coord_hts, ann_Ndata_pts);
print_one_stat(" floating_ops_(K) ", ann_float_ops, 1000);
if (validate) {
print_one_stat(" average_error ", ann_average_err, 1);
print_one_stat(" rank_error ", ann_rank_err, 1);
}
cout.precision(0); // restore the default
cout << " )\n";
cout.flush();
}

View File

@ -0,0 +1,125 @@
//----------------------------------------------------------------------
// File: pr_queue.h
// Programmer: Sunil Arya and David Mount
// Description: Include file for priority queue and related
// structures.
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#ifndef PR_QUEUE_H
#define PR_QUEUE_H
#include <ann/ANNx.h> // all ANN includes
#include <ann/ANNperf.h> // performance evaluation
//----------------------------------------------------------------------
// Basic types.
//----------------------------------------------------------------------
typedef void *PQinfo; // info field is generic pointer
typedef ANNdist PQkey; // key field is distance
//----------------------------------------------------------------------
// Priority queue
// A priority queue is a list of items, along with associated
// priorities. The basic operations are insert and extract_minimum.
//
// The priority queue is maintained using a standard binary heap.
// (Implementation note: Indexing is performed from [1..max] rather
// than the C standard of [0..max-1]. This simplifies parent/child
// computations.) User information consists of a void pointer,
// and the user is responsible for casting this quantity into whatever
// useful form is desired.
//
// Because the priority queue is so central to the efficiency of
// query processing, all the code is inline.
//----------------------------------------------------------------------
class ANNpr_queue {
struct pq_node { // node in priority queue
PQkey key; // key value
PQinfo info; // info field
};
int n; // number of items in queue
int max_size; // maximum queue size
pq_node *pq; // the priority queue (array of nodes)
public:
ANNpr_queue(int max) // constructor (given max size)
{
n = 0; // initially empty
max_size = max; // maximum number of items
pq = new pq_node[max+1]; // queue is array [1..max] of nodes
}
~ANNpr_queue() // destructor
{ delete [] pq; }
ANNbool empty() // is queue empty?
{ if (n==0) return ANNtrue; else return ANNfalse; }
ANNbool non_empty() // is queue nonempty?
{ if (n==0) return ANNfalse; else return ANNtrue; }
void reset() // make existing queue empty
{ n = 0; }
inline void insert( // insert item (inlined for speed)
PQkey kv, // key value
PQinfo inf) // item info
{
if (++n > max_size) annError("Priority queue overflow.", ANNabort);
register int r = n;
while (r > 1) { // sift up new item
register int p = r/2;
ANN_FLOP(1) // increment floating ops
if (pq[p].key <= kv) // in proper order
break;
pq[r] = pq[p]; // else swap with parent
r = p;
}
pq[r].key = kv; // insert new item at final location
pq[r].info = inf;
}
inline void extr_min( // extract minimum (inlined for speed)
PQkey &kv, // key (returned)
PQinfo &inf) // item info (returned)
{
kv = pq[1].key; // key of min item
inf = pq[1].info; // information of min item
register PQkey kn = pq[n--].key;// last item in queue
register int p = 1; // p points to item out of position
register int r = p<<1; // left child of p
while (r <= n) { // while r is still within the heap
ANN_FLOP(2) // increment floating ops
// set r to smaller child of p
if (r < n && pq[r].key > pq[r+1].key) r++;
if (kn <= pq[r].key) // in proper order
break;
pq[p] = pq[r]; // else swap with child
p = r; // advance pointers
r = p<<1;
}
pq[p] = pq[n+1]; // insert last item in proper place
}
};
#endif

View File

@ -0,0 +1,118 @@
//----------------------------------------------------------------------
// File: pr_queue_k.h
// Programmer: Sunil Arya and David Mount
// Description: Include file for priority queue with k items.
// Last modified: 01/04/05 (Version 1.0)
//----------------------------------------------------------------------
// Copyright (c) 1997-2005 University of Maryland and Sunil Arya and
// David Mount. All Rights Reserved.
//
// This software and related documentation is part of the Approximate
// Nearest Neighbor Library (ANN). This software is provided under
// the provisions of the Lesser GNU Public License (LGPL). See the
// file ../ReadMe.txt for further information.
//
// The University of Maryland (U.M.) and the authors make no
// representations about the suitability or fitness of this software for
// any purpose. It is provided "as is" without express or implied
// warranty.
//----------------------------------------------------------------------
// History:
// Revision 0.1 03/04/98
// Initial release
//----------------------------------------------------------------------
#ifndef PR_QUEUE_K_H
#define PR_QUEUE_K_H
#include <ann/ANNx.h> // all ANN includes
#include <ann/ANNperf.h> // performance evaluation
//----------------------------------------------------------------------
// Basic types
//----------------------------------------------------------------------
typedef ANNdist PQKkey; // key field is distance
typedef int PQKinfo; // info field is int
//----------------------------------------------------------------------
// Constants
// The NULL key value is used to initialize the priority queue, and
// so it should be larger than any valid distance, so that it will
// be replaced as legal distance values are inserted. The NULL
// info value must be a nonvalid array index, we use ANN_NULL_IDX,
// which is guaranteed to be negative.
//----------------------------------------------------------------------
const PQKkey PQ_NULL_KEY = ANN_DIST_INF; // nonexistent key value
const PQKinfo PQ_NULL_INFO = ANN_NULL_IDX; // nonexistent info value
//----------------------------------------------------------------------
// ANNmin_k
// An ANNmin_k structure is one which maintains the smallest
// k values (of type PQKkey) and associated information (of type
// PQKinfo). The special info and key values PQ_NULL_INFO and
// PQ_NULL_KEY means that thise entry is empty.
//
// It is currently implemented using an array with k items.
// Items are stored in increasing sorted order, and insertions
// are made through standard insertion sort. (This is quite
// inefficient, but current applications call for small values
// of k and relatively few insertions.)
//
// Note that the list contains k+1 entries, but the last entry
// is used as a simple placeholder and is otherwise ignored.
//----------------------------------------------------------------------
class ANNmin_k {
struct mk_node { // node in min_k structure
PQKkey key; // key value
PQKinfo info; // info field (user defined)
};
int k; // max number of keys to store
int n; // number of keys currently active
mk_node *mk; // the list itself
public:
ANNmin_k(int max) // constructor (given max size)
{
n = 0; // initially no items
k = max; // maximum number of items
mk = new mk_node[max+1]; // sorted array of keys
}
~ANNmin_k() // destructor
{ delete [] mk; }
PQKkey ANNmin_key() // return minimum key
{ return (n > 0 ? mk[0].key : PQ_NULL_KEY); }
PQKkey max_key() // return maximum key
{ return (n == k ? mk[k-1].key : PQ_NULL_KEY); }
PQKkey ith_smallest_key(int i) // ith smallest key (i in [0..n-1])
{ return (i < n ? mk[i].key : PQ_NULL_KEY); }
PQKinfo ith_smallest_info(int i) // info for ith smallest (i in [0..n-1])
{ return (i < n ? mk[i].info : PQ_NULL_INFO); }
inline void insert( // insert item (inlined for speed)
PQKkey kv, // key value
PQKinfo inf) // item info
{
register int i;
// slide larger values up
for (i = n; i > 0; i--) {
if (mk[i-1].key > kv)
mk[i] = mk[i-1];
else
break;
}
mk[i].key = kv; // store element here
mk[i].info = inf;
if (n < k) n++; // increment number of items
ANN_FLOP(k-i+1) // increment floating ops
}
};
#endif

View File

@ -0,0 +1,39 @@
add_definitions(-DUSE_PQP=0)
add_definitions(-DUSE_SVMLIGHT=0)
set (sources
src/AABB.cpp
src/OBB.cpp
src/RSS.cpp
src/vec_3f.cpp
src/traversal_node_base.cpp
src/traversal_node_bvhs.cpp
src/intersect.cpp
src/motion.cpp
src/BV_fitter.cpp
src/BV_splitter.cpp
src/BVH_model.cpp
src/BVH_utility.cpp
src/transform.cpp
src/simple_setup.cpp
src/geometric_shapes.cpp
src/geometric_shapes_utility.cpp
src/geometric_shapes_intersect.cpp
src/collision_node.cpp
src/traversal_recurse.cpp
src/broad_phase_collision.cpp
src/collision.cpp
src/collision_func_matrix.cpp
src/interval_tree.cpp
src/conservative_advancement.cpp
)
include_directories(SYSTEM
${CMAKE_SOURCE_DIR}/deps/fcl/include
${CMAKE_SOURCE_DIR}/deps/ann/include
${CMAKE_SOURCE_DIR}/deps/libccd/include
)
gz_add_library(gazebo_fcl ${sources})
target_link_libraries(gazebo_fcl gazebo_ccd gazebo_ann)
gz_install_library(gazebo_fcl)

View File

@ -0,0 +1,185 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_AABB_H
#define FCL_AABB_H
#include "fcl/BVH_internal.h"
#include "fcl/vec_3f.h"
/** \brief Main namespace */
namespace fcl
{
/** \brief A class describing the AABB collision structure, which is a box in 3D space determined by two diagonal points */
class AABB
{
public:
/** \brief The min point in the AABB */
Vec3f min_;
/** \brief The max point in the AABB */
Vec3f max_;
/** \brief Constructor creating an AABB with infinite size */
AABB();
/** \brief Constructor creating an AABB at position v with zero size */
AABB(const Vec3f& v) : min_(v), max_(v)
{
}
/** \brief Constructor creating an AABB with two endpoints a and b */
AABB(const Vec3f& a, const Vec3f&b)
{
min_ = min(a, b);
max_ = max(a, b);
}
/** \brief Check whether two AABB are overlap */
inline bool overlap(const AABB& other) const
{
if(min_[0] > other.max_[0]) return false;
if(min_[1] > other.max_[1]) return false;
if(min_[2] > other.max_[2]) return false;
if(max_[0] < other.min_[0]) return false;
if(max_[1] < other.min_[1]) return false;
if(max_[2] < other.min_[2]) return false;
return true;
}
/** \brief Check whether two AABB are overlapped along specific axis */
inline bool axisOverlap(const AABB& other, int axis_id) const
{
if(min_[axis_id] > other.max_[axis_id]) return false;
if(max_[axis_id] < other.min_[axis_id]) return false;
return true;
}
/** \brief Check whether two AABB are overlap and return the overlap part */
inline bool overlap(const AABB& other, AABB& overlap_part) const
{
if(!overlap(other))
return false;
overlap_part.min_ = max(min_, other.min_);
overlap_part.max_ = min(max_, other.max_);
return true;
}
/** \brief Check whether the AABB contains a point */
inline bool contain(const Vec3f& p) const
{
if(p[0] < min_[0] || p[0] > max_[0]) return false;
if(p[1] < min_[1] || p[1] > max_[1]) return false;
if(p[2] < min_[2] || p[2] > max_[2]) return false;
return true;
}
/** \brief Merge the AABB and a point */
inline AABB& operator += (const Vec3f& p)
{
min_ = min(min_, p);
max_ = max(max_, p);
return *this;
}
/** \brief Merge the AABB and another AABB */
inline AABB& operator += (const AABB& other)
{
min_ = min(min_, other.min_);
max_ = max(max_, other.max_);
return *this;
}
/** \brief Return the merged AABB of current AABB and the other one */
inline AABB operator + (const AABB& other) const
{
AABB res(*this);
return res += other;
}
/** \brief Width of the AABB */
inline BVH_REAL width() const
{
return max_[0] - min_[0];
}
/** \brief Height of the AABB */
inline BVH_REAL height() const
{
return max_[1] - min_[1];
}
/** \brief Depth of the AABB */
inline BVH_REAL depth() const
{
return max_[2] - min_[2];
}
/** \brief Volume of the AABB */
inline BVH_REAL volume() const
{
return width() * height() * depth();
}
/** \brief Size of the AABB, for split order */
inline BVH_REAL size() const
{
return (max_ - min_).sqrLength();
}
/** \brief Center of the AABB */
inline Vec3f center() const
{
return (min_ + max_) * 0.5;
}
/** \brief The distance between two AABB
* Not implemented.
*/
BVH_REAL distance(const AABB& other, Vec3f* P = NULL, Vec3f* Q = NULL) const;
};
}
#endif

View File

@ -0,0 +1,52 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BV_H
#define FCL_BV_H
#include "fcl/kDOP.h"
#include "fcl/AABB.h"
#include "fcl/OBB.h"
#include "fcl/RSS.h"
/** \brief Main namespace */
namespace fcl
{
}
#endif

View File

@ -0,0 +1,67 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BVH_FRONT_H
#define FCL_BVH_FRONT_H
#include <list>
/** \brief Main namespace */
namespace fcl
{
/** \brief A class describing the node for BVH front */
struct BVHFrontNode
{
bool valid; // not valid when collision detected on the front node
int left, right;
BVHFrontNode(int left_, int right_)
{
left = left_;
right = right_;
valid = true;
}
};
/** \brief A class describing the BVH front list */
typedef std::list<BVHFrontNode> BVHFrontList;
}
#endif

View File

@ -0,0 +1,87 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BVH_INTERNAL_H
#define FCL_BVH_INTERNAL_H
namespace fcl
{
typedef double BVH_REAL;
/** \brief States for BVH construction
* empty->begun->processed ->replace_begun->processed -> ......
* |
* |-> update_begun -> updated -> .....
* */
enum BVHBuildState
{
BVH_BUILD_STATE_EMPTY, // empty state, immediately after constructor
BVH_BUILD_STATE_BEGUN, // after beginModel(), state for adding geometry primitives
BVH_BUILD_STATE_PROCESSED, // after tree has been build, ready for cd use
BVH_BUILD_STATE_UPDATE_BEGUN, // after beginUpdateModel(), state for updating geometry primitives
BVH_BUILD_STATE_UPDATED, // after tree has been build for updated geometry, ready for ccd use
BVH_BUILD_STATE_REPLACE_BEGUN // after beginReplaceModel(), state for replacing geometry primitives
};
/** \brief Error code for BVH */
enum BVHReturnCode
{
BVH_OK = 0,
BVH_ERR_MODEL_OUT_OF_MEMORY = -1,
BVH_ERR_OUT_OF_MEMORY = -2,
BVH_ERR_UNPROCESSED_MODEL = -3,
BVH_ERR_BUILD_OUT_OF_SEQUENCE = -4,
BVH_ERR_BUILD_EMPTY_MODEL = -5,
BVH_ERR_BUILD_EMPTY_PREVIOUS_FRAME = -6,
BVH_ERR_UNSUPPORTED_FUNCTION = -7,
BVH_ERR_UNUPDATED_MODEL = -8,
BVH_ERR_INCORRECT_DATA = -9,
BVH_ERR_UNKNOWN = -10
};
/** \brief BVH model type */
enum BVHModelType
{
BVH_MODEL_UNKNOWN,
BVH_MODEL_TRIANGLES,
BVH_MODEL_POINTCLOUD
};
}
#endif

View File

@ -0,0 +1,276 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BVH_MODEL_H
#define FCL_BVH_MODEL_H
#include "fcl/collision_object.h"
#include "fcl/BVH_internal.h"
#include "fcl/BV.h"
#include "fcl/BV_node.h"
#include "fcl/primitive.h"
#include "fcl/vec_3f.h"
#include "fcl/BV_splitter.h"
#include "fcl/BV_fitter.h"
#include <vector>
#include <boost/shared_ptr.hpp>
/** \brief Main namespace */
namespace fcl
{
/** \brief A class describing the bounding hierarchy of a mesh model */
template<typename BV>
class BVHModel : public CollisionObject
{
private:
int num_tris_allocated;
int num_vertices_allocated;
int num_bvs_allocated;
int num_vertex_updated; // for ccd vertex update
unsigned int* primitive_indices;
public:
/** \brief The state of BVH building process */
BVHBuildState build_state;
/** \brief Split rule to split one BV node into two children */
boost::shared_ptr<BVSplitterBase<BV> > bv_splitter;
/** \brief Fitting rule to fit a BV node to a set of geometry primitives */
boost::shared_ptr<BVFitterBase<BV> > bv_fitter;
/** \brief Model type */
BVHModelType getModelType() const
{
if(num_tris && num_vertices)
return BVH_MODEL_TRIANGLES;
else if(num_vertices)
return BVH_MODEL_POINTCLOUD;
else
return BVH_MODEL_UNKNOWN;
}
/** \brief Constructing an empty BVH */
BVHModel()
{
vertices = NULL;
tri_indices = NULL;
bvs = NULL;
num_tris = 0;
num_tris_allocated = 0;
num_vertices = 0;
num_vertices_allocated = 0;
num_bvs = 0;
num_bvs_allocated = 0;
prev_vertices = NULL;
primitive_indices = NULL;
build_state = BVH_BUILD_STATE_EMPTY;
bv_splitter.reset(new BVSplitter<BV>(SPLIT_METHOD_MEAN));
bv_fitter.reset(new BVFitter<BV>());
}
BVHModel(const BVHModel& other);
~BVHModel()
{
delete [] vertices;
delete [] tri_indices;
delete [] bvs;
delete [] prev_vertices;
delete [] primitive_indices;
}
/** \brief We provide getBV() and getNumBVs() because BVH may be compressed (in future), so
we must provide some flexibility here */
/** \brief Get the BV of index id */
const BVNode<BV>& getBV(int id) const
{
return bvs[id];
}
/** \brief Get the BV of index id */
BVNode<BV>& getBV(int id)
{
return bvs[id];
}
/** \brief Get the number of BVs */
int getNumBVs() const
{
return num_bvs;
}
/** \brief Get the object type: it is a BVH */
OBJECT_TYPE getObjectType() const { return OT_BVH; }
/** \brief Get the BV type */
NODE_TYPE getNodeType() const { return BV_UNKNOWN; }
/** \brief Compute the AABB for the BVH, used for broad-phase collision */
void computeLocalAABB();
/** \brief Geometry point data */
Vec3f* vertices;
/** \brief Geometry triangle index data, will be NULL for point clouds */
Triangle* tri_indices;
/** \brief Geometry point data in previous frame */
Vec3f* prev_vertices;
/** \brief Number of triangles */
int num_tris;
/** \brief Number of points */
int num_vertices;
/** \brief Begin a new BVH model */
int beginModel(int num_tris = 0, int num_vertices = 0);
/** \brief Add one point in the new BVH model */
int addVertex(const Vec3f& p);
/** \brief Add one triangle in the new BVH model */
int addTriangle(const Vec3f& p1, const Vec3f& p2, const Vec3f& p3);
/** \brief Add a set of triangles in the new BVH model */
int addSubModel(const std::vector<Vec3f>& ps, const std::vector<Triangle>& ts);
/** \brief Add a set of points in the new BVH model */
int addSubModel(const std::vector<Vec3f>& ps);
/** \brief End BVH model construction, will build the bounding volume hierarchy */
int endModel();
/** \brief Replace the geometry information of current frame (i.e. should have the same mesh topology with the previous frame) */
int beginReplaceModel();
/** \brief Replace one point in the old BVH model */
int replaceVertex(const Vec3f& p);
/** \brief Replace one triangle in the old BVH model */
int replaceTriangle(const Vec3f& p1, const Vec3f& p2, const Vec3f& p3);
/** \brief Replace a set of points in the old BVH model */
int replaceSubModel(const std::vector<Vec3f>& ps);
/** \brief End BVH model replacement, will also refit or rebuild the bounding volume hierarchy */
int endReplaceModel(bool refit = true, bool bottomup = true);
/** \brief Replace the geometry information of current frame (i.e. should have the same mesh topology with the previous frame).
* The current frame will be saved as the previous frame in prev_vertices.
* */
int beginUpdateModel();
/** \brief Update one point in the old BVH model */
int updateVertex(const Vec3f& p);
/** \brief Update one triangle in the old BVH model */
int updateTriangle(const Vec3f& p1, const Vec3f& p2, const Vec3f& p3);
/** \brief Update a set of points in the old BVH model */
int updateSubModel(const std::vector<Vec3f>& ps);
/** \brief End BVH model update, will also refit or rebuild the bounding volume hierarchy */
int endUpdateModel(bool refit = true, bool bottomup = true);
/** \brief Check the number of memory used */
int memUsage(int msg) const;
private:
/** \brief Bounding volume hierarchy */
BVNode<BV>* bvs;
/** \brief Number of BV nodes in bounding volume hierarchy */
int num_bvs;
/** \brief Build the bounding volume hierarchy */
int buildTree();
/** \brief Refit the bounding volume hierarchy */
int refitTree(bool bottomup);
/** \brief Refit the bounding volume hierarchy in a top-down way (slow but more compact)*/
int refitTree_topdown();
/** \brief Refit the bounding volume hierarchy in a bottom-up way (fast but less compact) */
int refitTree_bottomup();
/** \brief Recursive kernel for hierarchy construction */
int recursiveBuildTree(int bv_id, int first_primitive, int num_primitives);
/** \brief Recursive kernel for bottomup refitting */
int recursiveRefitTree_bottomup(int bv_id);
};
/** Specialization of getNodeType() for BVHModel with different BV types */
template<>
NODE_TYPE BVHModel<AABB>::getNodeType() const;
template<>
NODE_TYPE BVHModel<OBB>::getNodeType() const;
template<>
NODE_TYPE BVHModel<RSS>::getNodeType() const;
template<>
NODE_TYPE BVHModel<KDOP<16> >::getNodeType() const;
template<>
NODE_TYPE BVHModel<KDOP<18> >::getNodeType() const;
template<>
NODE_TYPE BVHModel<KDOP<24> >::getNodeType() const;
}
#endif

View File

@ -0,0 +1,116 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BVH_UTILITY_H
#define FCL_BVH_UTILITY_H
#include "fcl/primitive.h"
#include "fcl/vec_3f.h"
#include "fcl/BVH_model.h"
namespace fcl
{
/** \brief Expand the BVH bounding boxes according to uncertainty */
template<typename BV>
void BVHExpand(BVHModel<BV>& model, const Uncertainty* ucs, BVH_REAL r)
{
for(int i = 0; i < model.num_bvs; ++i)
{
BVNode<BV>& bvnode = model.getBV(i);
BV bv;
for(int j = 0; j < bvnode.num_primitives; ++j)
{
int v_id = bvnode.first_primitive + j;
const Uncertainty& uc = ucs[v_id];
Vec3f& v = model.vertices[bvnode.first_primitive + j];
for(int k = 0; k < 3; ++k)
{
bv += (v + uc.axis[k] * (r * uc.sigma[k]));
bv += (v - uc.axis[k] * (r * uc.sigma[k]));
}
}
bvnode.bv = bv;
}
}
/** \brief Expand the BVH bounding boxes according to uncertainty, for OBB */
void BVHExpand(BVHModel<OBB>& model, const Uncertainty* ucs, BVH_REAL r);
/** \brief Expand the BVH bounding boxes according to uncertainty, for RSS */
void BVHExpand(BVHModel<RSS>& model, const Uncertainty* ucs, BVH_REAL r);
/** \brief Estimate the uncertainty of point clouds due to sampling procedure */
void estimateSamplingUncertainty(Vec3f* vertices, int num_vertices, Uncertainty* ucs);
/** \brief Compute the covariance matrix for a set or subset of points. */
void getCovariance(Vec3f* ps, Vec3f* ps2, unsigned int* indices, int n, Vec3f M[3]);
/** \brief Compute the covariance matrix for a set or subset of triangles. */
void getCovariance(Vec3f* ps, Vec3f* ps2, Triangle* ts, unsigned int* indices, int n, Vec3f M[3]);
/** \brief Compute the RSS bounding volume parameters: radius, rectangle size and the origin.
* The bounding volume axes are known.
*/
void getRadiusAndOriginAndRectangleSize(Vec3f* ps, Vec3f* ps2, unsigned int* indices, int n, Vec3f axis[3], Vec3f& origin, BVH_REAL l[2], BVH_REAL& r);
/** \brief Compute the RSS bounding volume parameters: radius, rectangle size and the origin.
* The bounding volume axes are known.
*/
void getRadiusAndOriginAndRectangleSize(Vec3f* ps, Vec3f* ps2, Triangle* ts, unsigned int* indices, int n, Vec3f axis[3], Vec3f& origin, BVH_REAL l[2], BVH_REAL& r);
/** \brief Compute the bounding volume extent and center for a set or subset of points.
* The bounding volume axes are known.
*/
void getExtentAndCenter(Vec3f* ps, Vec3f* ps2, unsigned int* indices, int n, Vec3f axis[3], Vec3f& center, Vec3f& extent);
/** \brief Compute the bounding volume extent and center for a set or subset of points.
* The bounding volume axes are known.
*/
void getExtentAndCenter(Vec3f* ps, Vec3f* ps2, Triangle* ts, unsigned int* indices, int n, Vec3f axis[3], Vec3f& center, Vec3f& extent);
}
#endif

View File

@ -0,0 +1,256 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BV_FITTER_H
#define FCL_BV_FITTER_H
#include "fcl/BVH_internal.h"
#include "fcl/primitive.h"
#include "fcl/vec_3f.h"
#include "fcl/OBB.h"
#include "fcl/RSS.h"
#include <iostream>
/** \brief Main namespace */
namespace fcl
{
/** \brief Compute a bounding volume that fits a set of n points. */
template<typename BV>
void fit(Vec3f* ps, int n, BV& bv)
{
for(int i = 0; i < n; ++i)
{
bv += ps[i];
}
}
template<>
void fit<OBB>(Vec3f* ps, int n, OBB& bv);
template<>
void fit<RSS>(Vec3f* ps, int n, RSS& bv);
template<typename BV>
class BVFitterBase
{
public:
virtual void set(Vec3f* vertices_, Triangle* tri_indices_, BVHModelType type_) = 0;
virtual void set(Vec3f* vertices_, Vec3f* prev_vertices_, Triangle* tri_indices_, BVHModelType type_) = 0;
virtual BV fit(unsigned int* primitive_indices, int num_primitives) = 0;
virtual void clear() = 0;
};
/** \brief A class for fitting a bounding volume to a set of points */
template<typename BV>
class BVFitter : public BVFitterBase<BV>
{
public:
/** \brief Prepare the geometry primitive data for fitting */
void set(Vec3f* vertices_, Triangle* tri_indices_, BVHModelType type_)
{
vertices = vertices_;
prev_vertices = NULL;
tri_indices = tri_indices_;
type = type_;
}
/** \brief Prepare the geometry primitive data for fitting */
void set(Vec3f* vertices_, Vec3f* prev_vertices_, Triangle* tri_indices_, BVHModelType type_)
{
vertices = vertices_;
prev_vertices = prev_vertices_;
tri_indices = tri_indices_;
type = type_;
}
/** \brief Compute a bounding volume that fits a set of primitives (points or triangles).
* The primitive data was set by set function and primitive_indices is the primitive index relative to the data
*/
BV fit(unsigned int* primitive_indices, int num_primitives)
{
BV bv;
if(type == BVH_MODEL_TRIANGLES) /* The primitive is triangle */
{
for(int i = 0; i < num_primitives; ++i)
{
Triangle t = tri_indices[primitive_indices[i]];
bv += vertices[t[0]];
bv += vertices[t[1]];
bv += vertices[t[2]];
if(prev_vertices) /* When fitting both current and previous frame */
{
bv += prev_vertices[t[0]];
bv += prev_vertices[t[1]];
bv += prev_vertices[t[2]];
}
}
}
else if(type == BVH_MODEL_POINTCLOUD) /* The primitive is point */
{
for(int i = 0; i < num_primitives; ++i)
{
bv += vertices[primitive_indices[i]];
if(prev_vertices) /* When fitting both current and previous frame */
{
bv += prev_vertices[primitive_indices[i]];
}
}
}
return bv;
}
/** \brief Clear the geometry primitive data */
void clear()
{
vertices = NULL;
prev_vertices = NULL;
tri_indices = NULL;
type = BVH_MODEL_UNKNOWN;
}
private:
Vec3f* vertices;
Vec3f* prev_vertices;
Triangle* tri_indices;
BVHModelType type;
};
/** \brief Specification of BVFitter for OBB bounding volume */
template<>
class BVFitter<OBB> : public BVFitterBase<OBB>
{
public:
/** \brief Prepare the geometry primitive data for fitting */
void set(Vec3f* vertices_, Triangle* tri_indices_, BVHModelType type_)
{
vertices = vertices_;
prev_vertices = NULL;
tri_indices = tri_indices_;
type = type_;
}
/** \brief Prepare the geometry primitive data for fitting */
void set(Vec3f* vertices_, Vec3f* prev_vertices_, Triangle* tri_indices_, BVHModelType type_)
{
vertices = vertices_;
prev_vertices = prev_vertices_;
tri_indices = tri_indices_;
type = type_;
}
/** \brief Compute a bounding volume that fits a set of primitives (points or triangles).
* The primitive data was set by set function and primitive_indices is the primitive index relative to the data.
*/
OBB fit(unsigned int* primitive_indices, int num_primitives);
/** \brief Clear the geometry primitive data */
void clear()
{
vertices = NULL;
prev_vertices = NULL;
tri_indices = NULL;
type = BVH_MODEL_UNKNOWN;
}
private:
Vec3f* vertices;
Vec3f* prev_vertices;
Triangle* tri_indices;
BVHModelType type;
};
/** \brief Specification of BVFitter for RSS bounding volume */
template<>
class BVFitter<RSS> : public BVFitterBase<RSS>
{
public:
/** \brief Prepare the geometry primitive data for fitting */
void set(Vec3f* vertices_, Triangle* tri_indices_, BVHModelType type_)
{
vertices = vertices_;
prev_vertices = NULL;
tri_indices = tri_indices_;
type = type_;
}
/** \brief Prepare the geometry primitive data for fitting */
void set(Vec3f* vertices_, Vec3f* prev_vertices_, Triangle* tri_indices_, BVHModelType type_)
{
vertices = vertices_;
prev_vertices = prev_vertices_;
tri_indices = tri_indices_;
type = type_;
}
/** \brief Compute a bounding volume that fits a set of primitives (points or triangles).
* The primitive data was set by set function and primitive_indices is the primitive index relative to the data.
*/
RSS fit(unsigned int* primitive_indices, int num_primitives);
/** \brief Clear the geometry primitive data */
void clear()
{
vertices = NULL;
prev_vertices = NULL;
tri_indices = NULL;
type = BVH_MODEL_UNKNOWN;
}
private:
Vec3f* vertices;
Vec3f* prev_vertices;
Triangle* tri_indices;
BVHModelType type;
};
}
#endif

View File

@ -0,0 +1,94 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BV_NODE_H
#define FCL_BV_NODE_H
/** \brief Main namespace */
namespace fcl
{
/** \brief A class describing a bounding volume node */
template<typename BV>
struct BVNode
{
/** \brief A bounding volume */
BV bv;
/** \brief An index for first child node or primitive
* If the value is positive, it is the index of the first child bv node
* If the value is negative, it is -(primitive index + 1)
* Zero is not used.
*/
int first_child;
/** \brief The start id the primitive belonging to the current node. The index is referred to the primitive_indices in BVHModel and from that
* we can obtain the primitive's index in original data indirectly.
*/
int first_primitive;
/** \brief The number of primitives belonging to the current node */
int num_primitives;
/** \brief Whether current node is a leaf node (i.e. contains a primitive index */
bool isLeaf() const { return first_child < 0; }
/** \brief Return the primitive index. The index is referred to the original data (i.e. vertices or tri_indices) in BVHModel */
int primitiveId() const { return -(first_child + 1); }
/** \brief Return the index of the first child. The index is referred to the bounding volume array (i.e. bvs) in BVHModel */
int leftChild() const { return first_child; }
/** \brief Return the index of the second child. The index is referred to the bounding volume array (i.e. bvs) in BVHModel */
int rightChild() const { return first_child + 1; }
/** \brief Check whether two BVNode collide */
bool overlap(const BVNode& other) const
{
return bv.overlap(other.bv);
}
BVH_REAL distance(const BVNode& other, Vec3f* P1 = NULL, Vec3f* P2 = NULL) const
{
return bv.distance(other.bv, P1, P2);
}
};
}
#endif

View File

@ -0,0 +1,291 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BV_SPLITTER_H
#define FCL_BV_SPLITTER_H
#include "fcl/BVH_internal.h"
#include "fcl/primitive.h"
#include "fcl/vec_3f.h"
#include "fcl/OBB.h"
#include <vector>
#include <iostream>
/** \brief Main namespace */
namespace fcl
{
/** \brief Base class for BV splitting operation */
template<typename BV>
class BVSplitterBase
{
public:
/** \brief Set the geometry data needed by the split rule */
virtual void set(Vec3f* vertices_, Triangle* tri_indices_, BVHModelType type_) = 0;
/** \brief Compute the split rule according to a subset of geometry and the corresponding BV node */
virtual void computeRule(const BV& bv, unsigned int* primitive_indices, int num_primitives) = 0;
/** \brief Apply the split rule on a given point */
virtual bool apply(const Vec3f& q) const = 0;
/** \brief Clear the geometry data set before */
virtual void clear() = 0;
};
enum SplitMethodType {SPLIT_METHOD_MEAN, SPLIT_METHOD_MEDIAN, SPLIT_METHOD_BV_CENTER};
/** \brief A class describing the split rule that splits each BV node */
template<typename BV>
class BVSplitter : public BVSplitterBase<BV>
{
public:
BVSplitter(SplitMethodType method)
{
split_method = method;
}
/** \brief Set the geometry data needed by the split rule */
void set(Vec3f* vertices_, Triangle* tri_indices_, BVHModelType type_)
{
vertices = vertices_;
tri_indices = tri_indices_;
type = type_;
}
/** \brief Compute the split rule according to a subset of geometry and the corresponding BV node */
void computeRule(const BV& bv, unsigned int* primitive_indices, int num_primitives)
{
switch(split_method)
{
case SPLIT_METHOD_MEAN:
computeRule_mean(bv, primitive_indices, num_primitives);
break;
case SPLIT_METHOD_MEDIAN:
computeRule_median(bv, primitive_indices, num_primitives);
break;
case SPLIT_METHOD_BV_CENTER:
computeRule_bvcenter(bv, primitive_indices, num_primitives);
break;
default:
std::cerr << "Split method not supported" << std::endl;
}
}
/** \brief Apply the split rule on a given point */
bool apply(const Vec3f& q) const
{
return q[split_axis] > split_value;
}
/** \brief Clear the geometry data set before */
void clear()
{
vertices = NULL;
tri_indices = NULL;
type = BVH_MODEL_UNKNOWN;
}
private:
/** \brief The split axis */
int split_axis;
/** \brief The split threshold */
BVH_REAL split_value;
Vec3f* vertices;
Triangle* tri_indices;
BVHModelType type;
SplitMethodType split_method;
/** \brief Split the node from center */
void computeRule_bvcenter(const BV& bv, unsigned int* /*primitive_indices*/, int /*num_primitives*/)
{
Vec3f center = bv.center();
int axis = 2;
if(bv.width() >= bv.height() && bv.width() >= bv.depth())
axis = 0;
else if(bv.height() >= bv.width() && bv.height() >= bv.depth())
axis = 1;
split_axis = axis;
split_value = center[axis];
}
/** \brief Split the node according to the mean of the data contained */
void computeRule_mean(const BV& bv, unsigned int* primitive_indices, int num_primitives)
{
int axis = 2;
if(bv.width() >= bv.height() && bv.width() >= bv.depth())
axis = 0;
else if(bv.height() >= bv.width() && bv.height() >= bv.depth())
axis = 1;
split_axis = axis;
BVH_REAL sum = 0;
if(type == BVH_MODEL_TRIANGLES)
{
for(int i = 0; i < num_primitives; ++i)
{
const Triangle& t = tri_indices[primitive_indices[i]];
sum += ((vertices[t[0]][split_axis] + vertices[t[1]][split_axis] + vertices[t[2]][split_axis]) / 3);
}
}
else if(type == BVH_MODEL_POINTCLOUD)
{
for(int i = 0; i < num_primitives; ++i)
{
sum += vertices[primitive_indices[i]][split_axis];
}
}
split_value = sum / num_primitives;
}
/** \brief Split the node according to the median of the data contained */
void computeRule_median(const BV& bv, unsigned int* primitive_indices, int num_primitives)
{
int axis = 2;
if(bv.width() >= bv.height() && bv.width() >= bv.depth())
axis = 0;
else if(bv.height() >= bv.width() && bv.height() >= bv.depth())
axis = 1;
split_axis = axis;
std::vector<BVH_REAL> proj(num_primitives);
if(type == BVH_MODEL_TRIANGLES)
{
for(int i = 0; i < num_primitives; ++i)
{
const Triangle& t = tri_indices[primitive_indices[i]];
proj[i] = (vertices[t[0]][split_axis] + vertices[t[1]][split_axis] + vertices[t[2]][split_axis]) / 3;
}
}
else if(type == BVH_MODEL_POINTCLOUD)
{
for(int i = 0; i < num_primitives; ++i)
proj[i] = vertices[primitive_indices[i]][split_axis];
}
std::sort(proj.begin(), proj.end());
if(num_primitives % 2 == 1)
{
split_value = proj[(num_primitives - 1) / 2];
}
else
{
split_value = (proj[num_primitives / 2] + proj[num_primitives / 2 - 1]) / 2;
}
}
};
/** \brief BVHSplitRule specialization for OBB */
template<>
class BVSplitter<OBB> : public BVSplitterBase<OBB>
{
public:
BVSplitter(SplitMethodType method)
{
split_method = method;
}
/** \brief Set the geometry data needed by the split rule */
void set(Vec3f* vertices_, Triangle* tri_indices_, BVHModelType type_)
{
vertices = vertices_;
tri_indices = tri_indices_;
type = type_;
}
/** \brief Compute the split rule according to a subset of geometry and the corresponding BV node */
void computeRule(const OBB& bv, unsigned int* /*primitive_indices*/, int /*num_primitives*/)
{
Vec3f center = bv.center();
split_vector = bv.axis[0];
split_value = center[0];
}
/** \brief Apply the split rule on a given point */
bool apply(const Vec3f& q) const
{
return split_vector.dot(Vec3f(q[0], q[1], q[2])) > split_value;
}
/** \brief Clear the geometry data set before */
void clear()
{
vertices = NULL;
tri_indices = NULL;
type = BVH_MODEL_UNKNOWN;
}
private:
/** \brief Split the node from center */
void computeRule_bvcenter(const OBB& bv, unsigned int* primitive_indices, int num_primitives);
/** \brief Split the node according to the mean of the data contained */
void computeRule_mean(const OBB& bv, unsigned int* primitive_indices, int num_primitives);
/** \brief Split the node according to the median of the data contained */
void computeRule_median(const OBB& bv, unsigned int* primitive_indices, int num_primitives);
BVH_REAL split_value;
Vec3f split_vector;
Vec3f* vertices;
Triangle* tri_indices;
BVHModelType type;
SplitMethodType split_method;
};
}
#endif

View File

@ -0,0 +1,153 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_PQP_H
#define FCL_PQP_H
#include "fcl/BVH_internal.h"
#include "fcl/vec_3f.h"
/** \brief Main namespace */
namespace fcl
{
/** \brief OBB class */
class OBB
{
public:
/** \brief Orientation of OBB */
Vec3f axis[3]; // R[i] is the ith column of the orientation matrix, or the axis of the OBB
/** \brief center of OBB */
Vec3f To;
/** \brief Half dimensions of OBB */
Vec3f extent;
OBB() {}
/** \brief Check collision between two OBB */
bool overlap(const OBB& other) const;
/** \brief Check collision between two OBB and return the overlap part.
* For OBB, we return nothing, as the overlap part of two obbs usually is not an obb
*/
bool overlap(const OBB& other, OBB& /*overlap_part*/) const
{
return overlap(other);
}
/** \brief Check whether the OBB contains a point */
inline bool contain(const Vec3f& p) const;
/** \brief A simple way to merge the OBB and a point, not compact. */
OBB& operator += (const Vec3f& p);
/** \brief Merge the OBB and another OBB */
OBB& operator += (const OBB& other)
{
*this = *this + other;
return *this;
}
/** \brief Return the merged OBB of current OBB and the other one */
OBB operator + (const OBB& other) const;
/** \brief Width of the OBB */
inline BVH_REAL width() const
{
return 2 * extent[0];
}
/** \brief Height of the OBB */
inline BVH_REAL height() const
{
return 2 * extent[1];
}
/** \brief Depth of the OBB */
inline BVH_REAL depth() const
{
return 2 * extent[2];
}
/** \brief Volume of the OBB */
inline BVH_REAL volume() const
{
return width() * height() * depth();
}
/** \brief Size of the OBB, for split order */
inline BVH_REAL size() const
{
return extent.sqrLength();
}
/** \brief Center of the OBB */
inline Vec3f center() const
{
return To;
}
/** \brief The distance between two OBB
* Not implemented
*/
BVH_REAL distance(const OBB& other, Vec3f* P = NULL, Vec3f* Q = NULL) const;
private:
/** Compute the 8 vertices of a OBB */
void computeVertices(Vec3f vertex[8]) const;
/** \brief OBB merge method when the centers of two smaller OBB are far away */
static OBB merge_largedist(const OBB& b1, const OBB& b2);
/** \brief OBB merge method when the centers of two smaller OBB are close */
static OBB merge_smalldist(const OBB& b1, const OBB& b2);
public:
/** Kernel check whether two OBB are disjoint */
static bool obbDisjoint(const Vec3f B[3], const Vec3f& T, const Vec3f& a, const Vec3f& b);
};
bool overlap(const Vec3f R0[3], const Vec3f& T0, const OBB& b1, const OBB& b2);
}
#endif

View File

@ -0,0 +1,178 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_RSS_H
#define FCL_RSS_H
#include "fcl/BVH_internal.h"
#include "fcl/vec_3f.h"
namespace fcl
{
class RSS
{
public:
/** \brief Orientation of RSS */
Vec3f axis[3];
/** \brief position of rectangle (origin of the rectangle) */
Vec3f Tr;
/** \brief side lengths of rectangle */
BVH_REAL l[2];
/** \brief radius of sphere summed with rectangle to form RSS */
BVH_REAL r;
RSS() {}
/** \brief Check collision between two RSS */
bool overlap(const RSS& other) const;
/** \brief Check collision between two RSS and return the overlap part.
* For RSS, we return nothing, as the overlap part of two RSSs usually is not a RSS
*/
bool overlap(const RSS& other, RSS& /*overlap_part*/) const
{
return overlap(other);
}
/** \brief Check whether the RSS contains a point */
inline bool contain(const Vec3f& p) const;
/** \brief A simple way to merge the RSS and a point, not compact.
* THIS FUNCTION STILL HAS PROBLEM!!
*/
RSS& operator += (const Vec3f& p);
/** \brief Merge the RSS and another RSS */
inline RSS& operator += (const RSS& other)
{
*this = *this + other;
return *this;
}
/** \brief Return the merged RSS of current RSS and the other one */
RSS operator + (const RSS& other) const;
/** \brief Width of the RSS */
inline BVH_REAL width() const
{
return l[0] + 2 * r;
}
/** \brief Height of the RSS */
inline BVH_REAL height() const
{
return l[1] + 2 * r;
}
/** \brief Depth of the RSS */
inline BVH_REAL depth() const
{
return 2 * r;
}
/** \brief Volume of the RSS */
inline BVH_REAL volume() const
{
return (l[0] * l[1] * 2 * r + 4 * 3.1415926 * r * r * r);
}
/** \brief Size of the RSS, for split order */
inline BVH_REAL size() const
{
return (sqrt(l[0] * l[0] + l[1] * l[1]) + 2 * r);
}
/** \brief The RSS center */
inline Vec3f center() const
{
return Tr;
}
/** \brief the distance between two RSS */
BVH_REAL distance(const RSS& other, Vec3f* P = NULL, Vec3f* Q = NULL) const;
protected:
/** \brief Clip val between a and b */
static void clipToRange(BVH_REAL& val, BVH_REAL a, BVH_REAL b);
/** \brief Finds the parameters t & u corresponding to the two closest points on a pair of line segments.
* The first segment is defined as Pa + A*t, 0 <= t <= a, where "Pa" is one endpoint of the segment, "A" is a unit vector
* pointing to the other endpoint, and t is a scalar that produces all the points between the two endpoints. Since "A" is a unit
* vector, "a" is the segment's length.
* The second segment is defined as Pb + B*u, 0 <= u <= b.
* Many of the terms needed by the algorithm are already computed for other purposes,so we just pass these terms into the function
* instead of complete specifications of each segment. "T" in the dot products is the vector betweeen Pa and Pb.
* Reference: "On fast computation of distance between line segments." Vladimir J. Lumelsky, in Information Processing Letters, no. 21, pages 55-61, 1985.
*/
static void segCoords(BVH_REAL& t, BVH_REAL& u, BVH_REAL a, BVH_REAL b, BVH_REAL A_dot_B, BVH_REAL A_dot_T, BVH_REAL B_dot_T);
/** \brief Returns whether the nearest point on rectangle edge
* Pb + B*u, 0 <= u <= b, to the rectangle edge,
* Pa + A*t, 0 <= t <= a, is within the half space
* determined by the point Pa and the direction Anorm.
*
* A,B, and Anorm are unit vectors.
* T is the vector between Pa and Pb.
*/
static bool inVoronoi(BVH_REAL a, BVH_REAL b, BVH_REAL Anorm_dot_B, BVH_REAL Anorm_dot_T, BVH_REAL A_dot_B, BVH_REAL A_dot_T, BVH_REAL B_dot_T);
public:
/** \brief distance between two oriented rectangles
* P and Q (optional return values) are the closest points in the rectangles, both are in the local frame of the first rectangle
*/
static BVH_REAL rectDistance(const Vec3f Rab[3], Vec3f const& Tab, const BVH_REAL a[2], const BVH_REAL b[2], Vec3f* P = NULL, Vec3f* Q = NULL);
};
/** \brief distance between two RSS bounding volumes
* P and Q (optional return values) are the closest points in the rectangles, not the RSS. But the direction P - Q is the correct direction for cloest points
* Notice that P and Q are both in the local frame of the first RSS (not global frame and not even the local frame of object 1)
*/
BVH_REAL distance(const Vec3f R0[3], const Vec3f& T0, const RSS& b1, const RSS& b2, Vec3f* P = NULL, Vec3f* Q = NULL);
bool overlap(const Vec3f R0[3], const Vec3f& T0, const RSS& b1, const RSS& b2);
}
#endif

View File

@ -0,0 +1,493 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_BROAD_PHASE_COLLISION_H
#define FCL_BROAD_PHASE_COLLISION_H
#include "fcl/collision_object.h"
#include "fcl/collision_data.h"
#include "fcl/AABB.h"
#include "fcl/interval_tree.h"
#include <vector>
#include <list>
namespace fcl
{
bool defaultCollisionFunction(CollisionObject* o1, CollisionObject* o2, void* cdata);
/** \brief return value is whether can stop now */
typedef bool (*CollisionCallBack)(CollisionObject* o1, CollisionObject* o2, void* cdata);
/** \brief Base class for broad phase collision */
class BroadPhaseCollisionManager
{
public:
/** \brief add one object to the manager */
virtual void registerObject(CollisionObject* obj) = 0;
/** \brief remove one object from the manager */
virtual void unregisterObject(CollisionObject* obj) = 0;
/** \brief initialize the manager, related with the specific type of manager */
virtual void setup() = 0;
/** \brief update the condition of manager */
virtual void update() = 0;
/** \brief clear the manager */
virtual void clear() = 0;
/** \brief return the objects managed by the manager */
virtual void getObjects(std::vector<CollisionObject*>& objs) const = 0;
/** \brief perform collision test between one object and all the objects belonging to the manager */
virtual void collide(CollisionObject* obj, void* cdata, CollisionCallBack callback) const = 0;
/** \brief perform collision test for the objects belonging to the manager (i.e., N^2 self collision) */
virtual void collide(void* cdata, CollisionCallBack callback) const = 0;
/** \brief whether the manager is empty */
virtual bool empty() const = 0;
/** \brief the number of objects managed by the manager */
virtual size_t size() const = 0;
};
/** \brief Brute force N-body collision manager */
class NaiveCollisionManager : public BroadPhaseCollisionManager
{
public:
NaiveCollisionManager() {}
/** \brief remove one object from the manager */
void registerObject(CollisionObject* obj);
/** \brief add one object to the manager */
void unregisterObject(CollisionObject* obj);
/** \brief initialize the manager, related with the specific type of manager */
void setup();
/** \brief update the condition of manager */
void update();
/** \brief clear the manager */
void clear();
/** \brief return the objects managed by the manager */
void getObjects(std::vector<CollisionObject*>& objs) const;
/** \brief perform collision test between one object and all the objects belonging to the manager */
void collide(CollisionObject* obj, void* cdata, CollisionCallBack callback) const;
/** \brief perform collision test for the objects belonging to the manager (i.e., N^2 self collision) */
void collide(void* cdata, CollisionCallBack callback) const;
/** \brief whether the manager is empty */
bool empty() const;
/** \brief the number of objects managed by the manager */
inline size_t size() const { return objs.size(); }
protected:
/** \brief objects belonging to the manager are stored in a list structure */
std::list<CollisionObject*> objs;
};
/** Rigorous SAP collision manager */
class SaPCollisionManager : public BroadPhaseCollisionManager
{
public:
SaPCollisionManager()
{
elist[0] = NULL;
elist[1] = NULL;
elist[2] = NULL;
}
~SaPCollisionManager()
{
clear();
}
/** \brief remove one object from the manager */
void registerObject(CollisionObject* obj);
/** \brief add one object to the manager */
void unregisterObject(CollisionObject* obj);
/** \brief initialize the manager, related with the specific type of manager */
void setup();
/** \brief update the condition of manager */
void update();
/** \brief clear the manager */
void clear();
/** \brief return the objects managed by the manager */
void getObjects(std::vector<CollisionObject*>& objs) const;
/** \brief perform collision test between one object and all the objects belonging to the manager */
void collide(CollisionObject* obj, void* cdata, CollisionCallBack callback) const;
/** \brief perform collision test for the objects belonging to the manager (i.e., N^2 self collision) */
void collide(void* cdata, CollisionCallBack callback) const;
/** \brief whether the manager is empty */
bool empty() const;
/** \brief the number of objects managed by the manager */
inline size_t size() const { return AABB_arr.size(); }
protected:
struct EndPoint;
/** \brief SAP interval for one object */
struct SaPAABB
{
/** \brief object */
CollisionObject* obj;
/** \brief lower bound end point of the interval */
EndPoint* lo;
/** \brief higher bound end point of the interval */
EndPoint* hi;
/** \brief cached AABB value */
AABB cached;
};
/** \brief End point for an interval */
struct EndPoint
{
/** \brief tag for whether it is a lower bound or higher bound of an interval, 0 for lo, and 1 for hi */
char minmax;
/** \brief back pointer to SAP interval */
SaPAABB* aabb;
/** \brief the previous end point in the end point list */
EndPoint* prev[3];
/** \brief the next end point in the end point list */
EndPoint* next[3];
/** \brief get the value of the end point */
const Vec3f& getVal() const
{
if(minmax == 0) return aabb->cached.min_;
else return aabb->cached.max_;
}
/** \brief set the value of the end point */
Vec3f& getVal()
{
if(minmax == 0) return aabb->cached.min_;
else return aabb->cached.max_;
}
};
/** \brief A pair of objects that are not culling away and should further check collision */
struct SaPPair
{
SaPPair(CollisionObject* a, CollisionObject* b)
{
obj1 = a;
obj2 = b;
}
CollisionObject* obj1;
CollisionObject* obj2;
};
/** Functor to help unregister one object */
class isUnregistered
{
CollisionObject* obj;
public:
isUnregistered(CollisionObject* obj_)
{
obj = obj_;
}
bool operator() (const SaPPair& pair)
{
return (pair.obj1 == obj) || (pair.obj2 == obj);
}
};
/** Functor to help remove collision pairs no longer valid (i.e., should be culled away) */
class isNotValidPair
{
CollisionObject* obj1;
CollisionObject* obj2;
public:
isNotValidPair(CollisionObject* obj1_, CollisionObject* obj2_)
{
obj1 = obj1_;
obj2 = obj2_;
}
bool operator() (const SaPPair& pair)
{
return (pair.obj1 == obj1 && pair.obj2 == obj2) || (pair.obj1 == obj2 && pair.obj2 == obj1);
}
};
/** \brief End point list for x, y, z coordinates */
EndPoint* elist[3];
/** \brief SAP interval list */
std::list<SaPAABB*> AABB_arr;
/** \brief The pair of objects that should further check for collision */
std::list<SaPPair> overlap_pairs;
};
/** Simple SAP collision manager */
class SSaPCollisionManager : public BroadPhaseCollisionManager
{
public:
SSaPCollisionManager()
{
setup_ = false;
}
/** \brief remove one object from the manager */
void registerObject(CollisionObject* obj);
/** \brief add one object to the manager */
void unregisterObject(CollisionObject* obj);
/** \brief initialize the manager, related with the specific type of manager */
void setup();
/** \brief update the condition of manager */
void update();
/** \brief clear the manager */
void clear();
/** \brief return the objects managed by the manager */
void getObjects(std::vector<CollisionObject*>& objs) const;
/** \brief perform collision test between one object and all the objects belonging to the manager */
void collide(CollisionObject* obj, void* cdata, CollisionCallBack callback) const;
/** \brief perform collision test for the objects belonging to the manager (i.e., N^2 self collision) */
void collide(void* cdata, CollisionCallBack callback) const;
/** \brief whether the manager is empty */
bool empty() const;
/** \brief the number of objects managed by the manager */
inline size_t size() const { return objs_x.size(); }
protected:
/** \brief Functor sorting objects according to the AABB lower x bound */
struct SortByXLow
{
bool operator()(const CollisionObject* a, const CollisionObject* b) const
{
if(a->getAABB().min_[0] < b->getAABB().min_[0])
return true;
return false;
}
};
/** \brief Functor sorting objects according to the AABB lower y bound */
struct SortByYLow
{
bool operator()(const CollisionObject* a, const CollisionObject* b) const
{
if(a->getAABB().min_[1] < b->getAABB().min_[1])
return true;
return false;
}
};
/** \brief Functor sorting objects according to the AABB lower z bound */
struct SortByZLow
{
bool operator()(const CollisionObject* a, const CollisionObject* b) const
{
if(a->getAABB().min_[2] < b->getAABB().min_[2])
return true;
return false;
}
};
/** \brief Dummy collision object with a point AABB */
class DummyCollisionObject : public CollisionObject
{
public:
DummyCollisionObject(const AABB& aabb_)
{
aabb = aabb_;
}
void computeLocalAABB() {}
};
/** \brief check collision between one object and a list of objects */
void checkColl(std::vector<CollisionObject*>::const_iterator pos_start, std::vector<CollisionObject*>::const_iterator pos_end,
CollisionObject* obj, void* cdata, CollisionCallBack callback) const;
/** \brief Objects sorted according to lower x value */
std::vector<CollisionObject*> objs_x;
/** \brief Objects sorted according to lower y value */
std::vector<CollisionObject*> objs_y;
/** \brief Objects sorted according to lower z value */
std::vector<CollisionObject*> objs_z;
/** \brief tag about whether the environment is maintained suitably (i.e., the objs_x, objs_y, objs_z are sorted correctly */
bool setup_;
};
/** Collision manager based on interval tree */
class IntervalTreeCollisionManager : public BroadPhaseCollisionManager
{
public:
IntervalTreeCollisionManager()
{
setup_ = false;
for(int i = 0; i < 3; ++i)
interval_trees[i] = NULL;
}
/** \brief remove one object from the manager */
void registerObject(CollisionObject* obj);
/** \brief add one object to the manager */
void unregisterObject(CollisionObject* obj);
/** \brief initialize the manager, related with the specific type of manager */
void setup();
/** \brief update the condition of manager */
void update();
/** \brief clear the manager */
void clear();
/** \brief return the objects managed by the manager */
void getObjects(std::vector<CollisionObject*>& objs) const;
/** \brief perform collision test between one object and all the objects belonging to the manager */
void collide(CollisionObject* obj, void* cdata, CollisionCallBack callback) const;
/** \brief perform collision test for the objects belonging to the manager (i.e., N^2 self collision) */
void collide(void* cdata, CollisionCallBack callback) const;
/** \brief whether the manager is empty */
bool empty() const;
/** \brief the number of objects managed by the manager */
inline size_t size() const { return endpoints[0].size() / 2; }
protected:
/** \brief check collision between one object and a list of objects */
void checkColl(std::vector<CollisionObject*>::const_iterator pos_start, std::vector<CollisionObject*>::const_iterator pos_end,
CollisionObject* obj, void* cdata, CollisionCallBack callback) const;
/** \brief SAP end point */
struct EndPoint
{
/** \brief object related with the end point */
CollisionObject* obj;
/** \brief end point value */
BVH_REAL value;
/** \brief tag for whether it is a lower bound or higher bound of an interval, 0 for lo, and 1 for hi */
char minmax;
};
/** \brief Functor for sorting end points */
struct SortByValue
{
bool operator()(const EndPoint& a, const EndPoint& b) const
{
if(a.value < b.value)
return true;
return false;
}
};
/** \brief Extention interval tree's interval to SAP interval, adding more information */
struct SAPInterval : public Interval
{
CollisionObject* obj;
SAPInterval(double low_, double high_, CollisionObject* obj_)
{
low = low_;
high = high_;
obj = obj_;
}
};
/** \brief vector stores all the end points */
std::vector<EndPoint> endpoints[3];
/** \brief interval tree manages the intervals */
IntervalTree* interval_trees[3];
/** \brief tag for whether the interval tree is maintained suitably */
bool setup_;
};
}
#endif

View File

@ -0,0 +1,59 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_COLLISION_H
#define FCL_COLLISION_H
#include "fcl/vec_3f.h"
#include "fcl/collision_object.h"
#include "fcl/collision_data.h"
/** \brief Main namespace */
namespace fcl
{
/** \brief Main collision interface: given two collision objects, and the requirements for contacts, including num of max contacts, whether perform exhaustive collision (i.e., returning
* returning all the contact points), whether return detailed contact information (i.e., normal, contact point, depth; otherwise only contact primitive id is returned), this function
* performs the collision between them.
* Return value is the number of contacts returned
*/
int collide(const CollisionObject* o1, const CollisionObject* o2,
int num_max_contacts, bool exhaustive, bool enable_contact,
std::vector<Contact>& contacts);
}
#endif

View File

@ -0,0 +1,75 @@
#ifndef FCL_COLLISION_DATA_H
#define FCL_COLLISION_DATA_H
#include "fcl/collision_object.h"
#include "fcl/vec_3f.h"
#include <vector>
namespace fcl
{
struct Contact
{
BVH_REAL penetration_depth;
Vec3f normal;
Vec3f pos;
const CollisionObject* o1;
const CollisionObject* o2;
int b1;
int b2;
Contact()
{
o1 = NULL;
o2 = NULL;
b1 = -1;
b2 = -1;
}
Contact(const CollisionObject* o1_, const CollisionObject* o2_, int b1_, int b2_)
{
o1 = o1_;
o2 = o2_;
b1 = b1_;
b2 = b2_;
}
Contact(const CollisionObject* o1_, const CollisionObject* o2_, int b1_, int b2_,
const Vec3f& pos_, const Vec3f& normal_, BVH_REAL depth_)
{
o1 = o1_;
o2 = o2_;
b1 = b1_;
b2 = b2_;
normal = normal_;
pos = pos_;
penetration_depth = depth_;
}
};
struct CollisionData
{
CollisionData()
{
done = false;
is_collision = false;
num_max_contacts = 1;
enable_contact = false;
exhaustive = false;
}
bool done;
bool is_collision;
bool exhaustive;
unsigned int num_max_contacts;
bool enable_contact;
std::vector<Contact> contacts;
};
}
#endif

View File

@ -0,0 +1,63 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_COLLISION_FUNC_MATRIX_H
#define FCL_COLLISION_FUNC_MATRIX_H
#include "fcl/collision_object.h"
#include "fcl/collision_data.h"
namespace fcl
{
typedef int (*CollisionFunc)(const CollisionObject* o1, const CollisionObject* o2, int num_max_contacts, bool exhaustive, bool enable_contact, std::vector<Contact>& contacts);
struct CollisionFunctionMatrix
{
CollisionFunc collision_matrix[14][14];
CollisionFunctionMatrix();
};
}
#endif

View File

@ -0,0 +1,58 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_COLLISION_NODE_H
#define FCL_COLLISION_NODE_H
#include "fcl/traversal_node_base.h"
#include "fcl/traversal_node_bvhs.h"
#include "fcl/BVH_front.h"
namespace fcl
{
void collide(CollisionTraversalNodeBase* node, BVHFrontList* front_list = NULL);
void selfCollide(CollisionTraversalNodeBase* node, BVHFrontList* front_list = NULL);
void distance(DistanceTraversalNodeBase* node, BVHFrontList* front_list = NULL, int qsize = 2);
void distance(MeshDistanceTraversalNodeRSS* node, BVHFrontList* front_list = NULL, int qsize = 2);
}
#endif

View File

@ -0,0 +1,147 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_COLLISION_OBJECT_BASE_H
#define FCL_COLLISION_OBJECT_BASE_H
#include "fcl/AABB.h"
#include "fcl/transform.h"
namespace fcl
{
enum OBJECT_TYPE {OT_UNKNOWN, OT_BVH, OT_GEOM};
enum NODE_TYPE {BV_UNKNOWN, BV_AABB, BV_OBB, BV_RSS, BV_KDOP16, BV_KDOP18, BV_KDOP24,
GEOM_BOX, GEOM_SPHERE, GEOM_CAPSULE, GEOM_CONE, GEOM_CYLINDER, GEOM_CONVEX, GEOM_PLANE};
/** \brief Base class for all objects participating in collision */
class CollisionObject
{
public:
virtual ~CollisionObject() {}
virtual OBJECT_TYPE getObjectType() const { return OT_UNKNOWN; }
virtual NODE_TYPE getNodeType() const { return BV_UNKNOWN; }
virtual void computeLocalAABB() = 0;
inline const AABB& getAABB() const
{
return aabb;
}
inline void computeAABB()
{
Vec3f center = t.transform(aabb_center);
Vec3f delta(aabb_radius, aabb_radius, aabb_radius);
aabb.min_ = center - delta;
aabb.max_ = center + delta;
}
inline const Vec3f& getTranslation() const
{
return t.getTranslation();
}
inline const Vec3f* getRotation() const
{
return t.getRotation();
}
inline const SimpleQuaternion& getQuatRotation() const
{
return t.getQuatRotation();
}
void setRotation(const Vec3f R[3])
{
t.setRotation(R);
}
void setTranslation(const Vec3f& T)
{
t.setTranslation(T);
}
void setQuatRotation(const SimpleQuaternion& q)
{
t.setQuatRotation(q);
}
void setTransform(const Vec3f R[3], const Vec3f& T)
{
t.setTransform(R, T);
}
void setTransform(const SimpleQuaternion& q, const Vec3f& T)
{
t.setTransform(q, T);
}
bool isIdentityTransform() const
{
return t.isIdentity();
}
void setIdentityTransform()
{
t.setIdentity();
}
protected:
/** AABB in global coordinate */
mutable AABB aabb;
/** AABB in local coordinate */
AABB aabb_local;
/** AABB center in local coordinate */
Vec3f aabb_center;
/** AABB radius */
BVH_REAL aabb_radius;
SimpleTransform t;
};
}
#endif

View File

@ -0,0 +1,61 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_CONSERVATIVE_ADVANCEMENT_H
#define FCL_CONSERVATIVE_ADVANCEMENT_H
#include "fcl/vec_3f.h"
#include "fcl/collision_object.h"
#include "fcl/collision_data.h"
#include "fcl/motion_base.h"
namespace fcl
{
template<typename BV>
int conservativeAdvancement(const CollisionObject* o1,
MotionBase<BV>* motion1,
const CollisionObject* o2,
MotionBase<BV>* motion2,
int num_max_contacts, bool exhaustive, bool enable_contact,
std::vector<Contact>& contacts,
BVH_REAL& toc);
}
#endif

View File

@ -0,0 +1,314 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef GEOMETRIC_SHAPE_TO_BVH_MODEL_H
#define GEOMETRIC_SHAPE_TO_BVH_MODEL_H
#include "fcl/geometric_shapes.h"
#include "fcl/BVH_model.h"
#include <boost/math/constants/constants.hpp>
/** \brief Main namespace */
namespace fcl
{
/** \brief Generate BVH model from box */
template<typename BV>
void generateBVHModel(BVHModel<BV>& model, const Box& shape)
{
double a = shape.side[0];
double b = shape.side[1];
double c = shape.side[2];
std::vector<Vec3f> points(8);
std::vector<Triangle> tri_indices(12);
points[0] = Vec3f(0.5 * a, -0.5 * b, 0.5 * c);
points[1] = Vec3f(0.5 * a, 0.5 * b, 0.5 * c);
points[2] = Vec3f(-0.5 * a, 0.5 * b, 0.5 * c);
points[3] = Vec3f(-0.5 * a, -0.5 * b, 0.5 * c);
points[4] = Vec3f(0.5 * a, -0.5 * b, -0.5 * c);
points[5] = Vec3f(0.5 * a, 0.5 * b, -0.5 * c);
points[6] = Vec3f(-0.5 * a, 0.5 * b, -0.5 * c);
points[7] = Vec3f(-0.5 * a, -0.5 * b, -0.5 * c);
tri_indices[0] = Triangle(0, 4, 1);
tri_indices[1] = Triangle(1, 4, 5);
tri_indices[2] = Triangle(2, 6, 3);
tri_indices[3] = Triangle(3, 6, 7);
tri_indices[4] = Triangle(3, 0, 2);
tri_indices[5] = Triangle(2, 0, 1);
tri_indices[6] = Triangle(6, 5, 7);
tri_indices[7] = Triangle(7, 5, 4);
tri_indices[8] = Triangle(1, 5, 2);
tri_indices[9] = Triangle(2, 5, 6);
tri_indices[10] = Triangle(3, 7, 0);
tri_indices[11] = Triangle(0, 7, 4);
for(unsigned int i = 0; i < points.size(); ++i)
{
Vec3f v = matMulVec(shape.getLocalRotation(), points[i]) + shape.getLocalTranslation();
v = matMulVec(shape.getRotation(), v) + shape.getTranslation();
points[i] = v;
}
model.beginModel();
model.addSubModel(points, tri_indices);
model.endModel();
model.computeLocalAABB();
}
/** Generate BVH model from sphere */
template<typename BV>
void generateBVHModel(BVHModel<BV>& model, const Sphere& shape, unsigned int seg = 16, unsigned int ring = 16)
{
std::vector<Vec3f> points;
std::vector<Triangle> tri_indices;
double r = shape.radius;
double phi, phid;
const double pi = boost::math::constants::pi<double>();
phid = pi * 2 / seg;
phi = 0;
double theta, thetad;
thetad = pi / (ring + 1);
theta = 0;
for(unsigned int i = 0; i < ring; ++i)
{
double theta_ = theta + thetad * (i + 1);
for(unsigned int j = 0; j < seg; ++j)
{
points.push_back(Vec3f(r * sin(theta_) * cos(phi + j * phid), r * sin(theta_) * sin(phi + j * phid), r * cos(theta_)));
}
}
points.push_back(Vec3f(0, 0, r));
points.push_back(Vec3f(0, 0, -r));
for(unsigned int i = 0; i < ring - 1; ++i)
{
for(unsigned int j = 0; j < seg; ++j)
{
unsigned int a, b, c, d;
a = i * seg + j;
b = (j == seg - 1) ? (i * seg) : (i * seg + j + 1);
c = (i + 1) * seg + j;
d = (j == seg - 1) ? ((i + 1) * seg) : ((i + 1) * seg + j + 1);
tri_indices.push_back(Triangle(a, c, b));
tri_indices.push_back(Triangle(b, c, d));
}
}
for(unsigned int j = 0; j < seg; ++j)
{
unsigned int a, b;
a = j;
b = (j == seg - 1) ? 0 : (j + 1);
tri_indices.push_back(Triangle(ring * seg, a, b));
a = (ring - 1) * seg + j;
b = (j == seg - 1) ? (ring - 1) * seg : ((ring - 1) * seg + j + 1);
tri_indices.push_back(Triangle(a, ring * seg + 1, b));
}
for(unsigned int i = 0; i < points.size(); ++i)
{
Vec3f v = matMulVec(shape.getLocalRotation(), points[i]) + shape.getLocalTranslation();
v = matMulVec(shape.getRotation(), v) + shape.getTranslation();
points[i] = v;
}
model.beginModel();
model.addSubModel(points, tri_indices);
model.endModel();
model.computeLocalAABB();
}
/** \brief Generate BVH model from cylinder */
template<typename BV>
void generateBVHModel(BVHModel<BV>& model, const Cylinder& shape, unsigned int tot = 16)
{
std::vector<Vec3f> points;
std::vector<Triangle> tri_indices;
double r = shape.radius;
double h = shape.lz;
double phi, phid;
const double pi = boost::math::constants::pi<double>();
phid = pi * 2 / tot;
phi = 0;
double circle_edge = phid * r;
unsigned int h_num = ceil(h / circle_edge);
double hd = h / h_num;
for(unsigned int i = 0; i < tot; ++i)
points.push_back(Vec3f(r * cos(phi + phid * i), r * sin(phi + phid * i), h / 2));
for(unsigned int i = 0; i < h_num - 1; ++i)
{
for(unsigned int j = 0; j < tot; ++j)
{
points.push_back(Vec3f(r * cos(phi + phid * j), r * sin(phi + phid * j), h / 2 - (i + 1) * hd));
}
}
for(unsigned int i = 0; i < tot; ++i)
points.push_back(Vec3f(r * cos(phi + phid * i), r * sin(phi + phid * i), - h / 2));
points.push_back(Vec3f(0, 0, h / 2));
points.push_back(Vec3f(0, 0, -h / 2));
for(unsigned int i = 0; i < tot; ++i)
{
Triangle tmp((h_num + 1) * tot, i, ((i == tot - 1) ? 0 : (i + 1)));
tri_indices.push_back(tmp);
}
for(unsigned int i = 0; i < tot; ++i)
{
Triangle tmp((h_num + 1) * tot + 1, h_num * tot + ((i == tot - 1) ? 0 : (i + 1)), h_num * tot + i);
tri_indices.push_back(tmp);
}
for(unsigned int i = 0; i < h_num; ++i)
{
for(unsigned int j = 0; j < tot; ++j)
{
int a, b, c, d;
a = j;
b = (j == tot - 1) ? 0 : (j + 1);
c = j + tot;
d = (j == tot - 1) ? tot : (j + 1 + tot);
int start = i * tot;
tri_indices.push_back(Triangle(start + b, start + a, start + c));
tri_indices.push_back(Triangle(start + b, start + c, start + d));
}
}
for(unsigned int i = 0; i < points.size(); ++i)
{
Vec3f v = matMulVec(shape.getLocalRotation(), points[i]) + shape.getLocalTranslation();
v = matMulVec(shape.getRotation(), v) + shape.getTranslation();
points[i] = v;
}
model.beginModel();
model.addSubModel(points, tri_indices);
model.endModel();
model.computeLocalAABB();
}
/** \brief Generate BVH model from cone */
template<typename BV>
void generateBVHModel(BVHModel<BV>& model, const Cone& shape, unsigned int tot = 16)
{
std::vector<Vec3f> points;
std::vector<Triangle> tri_indices;
double r = shape.radius;
double h = shape.lz;
double phi, phid;
const double pi = boost::math::constants::pi<double>();
phid = pi * 2 / tot;
phi = 0;
double circle_edge = phid * r;
unsigned int h_num = ceil(h / circle_edge);
double hd = h / h_num;
for(unsigned int i = 0; i < h_num - 1; ++i)
{
for(unsigned int j = 0; j < tot; ++j)
{
points.push_back(Vec3f(r * cos(phi + phid * j), r * sin(phi + phid * j), h / 2 - (i + 1) * hd));
}
}
for(unsigned int i = 0; i < tot; ++i)
points.push_back(Vec3f(r * cos(phi + phid * i), r * sin(phi + phid * i), - h / 2));
points.push_back(Vec3f(0, 0, h / 2));
points.push_back(Vec3f(0, 0, -h / 2));
for(unsigned int i = 0; i < tot; ++i)
{
Triangle tmp(h_num * tot, i, (i == tot - 1) ? 0 : (i + 1));
tri_indices.push_back(tmp);
}
for(unsigned int i = 0; i < tot; ++i)
{
Triangle tmp(h_num * tot + 1, (h_num - 1) * tot + (i == tot - 1) ? 0 : (i + 1), (h_num - 1) * tot + i);
tri_indices.push_back(tmp);
}
for(unsigned int i = 0; i < h_num - 1; ++i)
{
for(unsigned int j = 0; j < tot; ++j)
{
int a, b, c, d;
a = j;
b = (j == tot - 1) ? 0 : (j + 1);
c = j + tot;
d = (j == tot - 1) ? tot : (j + 1 + tot);
int start = i * tot;
tri_indices.push_back(Triangle(start + b, start + a, start + c));
tri_indices.push_back(Triangle(start + b, start + c, start + d));
}
}
for(unsigned int i = 0; i < points.size(); ++i)
{
Vec3f v = matMulVec(shape.getLocalRotation(), points[i]) + shape.getLocalTranslation();
v = matMulVec(shape.getRotation(), v) + shape.getTranslation();
points[i] = v;
}
model.beginModel();
model.addSubModel(points, tri_indices);
model.endModel();
model.computeLocalAABB();
}
}
#endif

View File

@ -0,0 +1,332 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_GEOMETRIC_SHAPES_H
#define FCL_GEOMETRIC_SHAPES_H
#include "fcl/collision_object.h"
#include "fcl/vec_3f.h"
#include <string.h>
/** \brief Main namespace */
namespace fcl
{
/** \brief Base class for all basic geometric shapes */
class ShapeBase : public CollisionObject
{
public:
/** \brief Default Constructor */
ShapeBase()
{
Rloc[0][0] = 1;
Rloc[1][1] = 1;
Rloc[2][2] = 1;
}
/** \brief Set the local frame of the shape */
void setLocalTransform(const Vec3f R_[3], const Vec3f& T_)
{
Rloc[0] = R_[0];
Rloc[1] = R_[1];
Rloc[2] = R_[2];
Tloc = T_;
}
/** \brief Set the local orientation of the shape */
void setLocalRotation(const Vec3f R[3])
{
Rloc[0] = R[0];
Rloc[1] = R[1];
Rloc[2] = R[2];
}
/** \brief Set the local position of the shape */
void setLocalTranslation(const Vec3f& T)
{
Tloc = T;
}
/** \brief Append additional transform to the local transform */
void appendLocalTransform(const Vec3f R[3], const Vec3f& T)
{
Vec3f R0[3];
for(int i = 0; i < 3; ++i)
R0[i] = Rloc[i];
matMulMat(R, R0, Rloc);
Tloc = matMulVec(R, Tloc) + T;
}
/** \brief Get local transform */
void getLocalTransform(Vec3f R[3], Vec3f& T) const
{
T = Tloc;
R[0] = Rloc[0];
R[1] = Rloc[1];
R[2] = Rloc[2];
}
/** \brief Get local position */
inline const Vec3f& getLocalTranslation() const
{
return Tloc;
}
/** \brief Get local orientation */
inline const Vec3f* getLocalRotation() const
{
return Rloc;
}
/** \brief Get object type: a geometric shape */
OBJECT_TYPE getObjectType() const { return OT_GEOM; }
protected:
Vec3f Rloc[3];
Vec3f Tloc;
};
/** Center at zero point, axis aligned box */
class Box : public ShapeBase
{
public:
Box(BVH_REAL x, BVH_REAL y, BVH_REAL z) : ShapeBase(), side(x, y, z) {}
/** box side length */
Vec3f side;
/** \brief Compute AABB */
void computeLocalAABB();
/** \brief Get node type: a box */
NODE_TYPE getNodeType() const { return GEOM_BOX; }
};
/** Center at zero point sphere */
class Sphere : public ShapeBase
{
public:
Sphere(BVH_REAL radius_) : ShapeBase(), radius(radius_) {}
/** \brief Radius of the sphere */
BVH_REAL radius;
/** \brief Compute AABB */
void computeLocalAABB();
/** \brief Get node type: a sphere */
NODE_TYPE getNodeType() const { return GEOM_SPHERE; }
};
/** Center at zero point capsule */
class Capsule : public ShapeBase
{
public:
Capsule(BVH_REAL radius_, BVH_REAL lz_) : ShapeBase(), radius(radius_), lz(lz_) {}
/** \brief Radius of capsule */
BVH_REAL radius;
/** \brief Length along z axis */
BVH_REAL lz;
/** \brief Compute AABB */
void computeLocalAABB();
/** \brief Get node type: a capsule */
NODE_TYPE getNodeType() const { return GEOM_CAPSULE; }
};
/** Center at zero cone */
class Cone : public ShapeBase
{
public:
Cone(BVH_REAL radius_, BVH_REAL lz_) : ShapeBase(), radius(radius_), lz(lz_) {}
/** \brief Radius of the cone */
BVH_REAL radius;
/** \brief Length along z axis */
BVH_REAL lz;
/** \brief Compute AABB */
void computeLocalAABB();
/** \brief Get node type: a cone */
NODE_TYPE getNodeType() const { return GEOM_CONE; }
};
/** Center at zero cylinder */
class Cylinder : public ShapeBase
{
public:
Cylinder(BVH_REAL radius_, BVH_REAL lz_) : ShapeBase(), radius(radius_), lz(lz_) {}
/** \brief Radius of the cylinder */
BVH_REAL radius;
/** \brief Length along z axis */
BVH_REAL lz;
/** \brief Compute AABB */
void computeLocalAABB();
/** \brief Get node type: a cylinder */
NODE_TYPE getNodeType() const { return GEOM_CYLINDER; }
};
/** Convex polytope */
class Convex : public ShapeBase
{
public:
/** Constructing a convex, providing normal and offset of each polytype surface, and the points and shape topology information */
Convex(Vec3f* plane_normals_,
BVH_REAL* plane_dis_,
int num_planes_,
Vec3f* points_,
int /*num_points_*/,
int* polygons_) : ShapeBase()
{
plane_normals = plane_normals_;
plane_dis = plane_dis_;
num_planes = num_planes_;
points = points_;
polygons = polygons_;
edges = NULL;
Vec3f sum;
for(int i = 0; i < num_points; ++i)
{
sum += points[i];
}
center = sum * (BVH_REAL)(1.0 / num_points);
fillEdges();
}
/** Copy constructor */
Convex(const Convex& other) : ShapeBase(other)
{
plane_normals = other.plane_normals;
plane_dis = other.plane_dis;
num_planes = other.num_planes;
points = other.points;
polygons = other.polygons;
edges = new Edge[other.num_edges];
memcpy(edges, other.edges, sizeof(Edge) * num_edges);
}
~Convex()
{
delete [] edges;
}
/** Compute AABB */
void computeLocalAABB();
/** Get node type: a conex polytope */
NODE_TYPE getNodeType() const { return GEOM_CONVEX; }
Vec3f* plane_normals;
BVH_REAL* plane_dis;
/** An array of indices to the points of each polygon, it should be the number of vertices
* followed by that amount of indices to "points" in counter clockwise order
*/
int* polygons;
Vec3f* points;
int num_points;
int num_edges;
int num_planes;
struct Edge
{
int first, second;
};
Edge* edges;
/** \brief center of the convex polytope, this is used for collision: center is guaranteed in the internal of the polytope (as it is convex) */
Vec3f center;
protected:
/** \brief Get edge information */
void fillEdges();
};
/** Infinite plane */
class Plane : public ShapeBase
{
public:
/** \brief Construct a plane with normal direction and offset */
Plane(const Vec3f& n_, BVH_REAL d_) : ShapeBase(), n(n_), d(d_) { unitNormalTest(); }
/** \brief Construct a plane with normal direction and offset */
Plane(BVH_REAL a, BVH_REAL b, BVH_REAL c, BVH_REAL d_)
{
n = Vec3f(a, b, c);
d = d_;
unitNormalTest();
}
/** \brief Compute AABB */
void computeLocalAABB();
/** \brief Get node type: a plane */
NODE_TYPE getNodeType() const { return GEOM_PLANE; }
/** \brief Plane normal */
Vec3f n;
/** \brief Plane offset */
BVH_REAL d;
protected:
/** \brief Turn non-unit normal into unit */
void unitNormalTest();
};
}
#endif

View File

@ -0,0 +1,206 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_GEOMETRIC_SHAPES_INTERSECT_H
#define FCL_GEOMETRIC_SHAPES_INTERSECT_H
#include "fcl/geometric_shapes.h"
#include "fcl/transform.h"
#include <ccd/ccd.h>
#include <ccd/quat.h>
namespace fcl
{
/** \brief recall function used by GJK algorithm */
typedef void (*GJKSupportFunction)(const void* obj, const ccd_vec3_t* dir_, ccd_vec3_t* v);
typedef void (*GJKCenterFunction)(const void* obj, ccd_vec3_t* c);
/** \brief initialize GJK stuffs */
template<typename T>
class GJKInitializer
{
public:
/** \brief Get GJK support function */
static GJKSupportFunction getSupportFunction() { return NULL; }
/** \brief Get GJK center function */
static GJKCenterFunction getCenterFunction() { return NULL; }
/** \brief Get GJK object from a shape
* Notice that only local transformation is applied.
* Gloal transformation are considered later
*/
static void* createGJKObject(const T& /*s*/) { return NULL; }
/** \brief Delete GJK object */
static void deleteGJKObject(void* /*o*/) {}
};
/** \brief initialize GJK Cylinder */
template<>
class GJKInitializer<Cylinder>
{
public:
static GJKSupportFunction getSupportFunction();
static GJKCenterFunction getCenterFunction();
static void* createGJKObject(const Cylinder& s);
static void deleteGJKObject(void* o);
};
/** \brief initialize GJK Sphere */
template<>
class GJKInitializer<Sphere>
{
public:
static GJKSupportFunction getSupportFunction();
static GJKCenterFunction getCenterFunction();
static void* createGJKObject(const Sphere& s);
static void deleteGJKObject(void* o);
};
/** \brief initialize GJK Box */
template<>
class GJKInitializer<Box>
{
public:
static GJKSupportFunction getSupportFunction();
static GJKCenterFunction getCenterFunction();
static void* createGJKObject(const Box& s);
static void deleteGJKObject(void* o);
};
/** \brief initialize GJK Capsule */
template<>
class GJKInitializer<Capsule>
{
public:
static GJKSupportFunction getSupportFunction();
static GJKCenterFunction getCenterFunction();
static void* createGJKObject(const Capsule& s);
static void deleteGJKObject(void* o);
};
/** \brief initialize GJK Cone */
template<>
class GJKInitializer<Cone>
{
public:
static GJKSupportFunction getSupportFunction();
static GJKCenterFunction getCenterFunction();
static void* createGJKObject(const Cone& s);
static void deleteGJKObject(void* o);
};
/** \brief initialize GJK Convex */
template<>
class GJKInitializer<Convex>
{
public:
static GJKSupportFunction getSupportFunction();
static GJKCenterFunction getCenterFunction();
static void* createGJKObject(const Convex& s);
static void deleteGJKObject(void* o);
};
/** \brief initialize GJK Triangle */
GJKSupportFunction triGetSupportFunction();
GJKCenterFunction triGetCenterFunction();
void* triCreateGJKObject(const Vec3f& P1, const Vec3f& P2, const Vec3f& P3);
void* triCreateGJKObject(const Vec3f& P1, const Vec3f& P2, const Vec3f& P3, const Vec3f R[3], const Vec3f& T);
void triDeleteGJKObject(void* o);
/** \brief GJK collision algorithm */
bool GJKCollide(void* obj1, ccd_support_fn supp1, ccd_center_fn cen1,
void* obj2, ccd_support_fn supp2, ccd_center_fn cen2,
Vec3f* contact_points, BVH_REAL* penetration_depth, Vec3f* normal);
/** intersection checking between two shapes */
template<typename S1, typename S2>
bool shapeIntersect(const S1& s1, const S2& s2, Vec3f* contact_points = NULL, BVH_REAL* penetration_depth = NULL, Vec3f* normal = NULL)
{
void* o1 = GJKInitializer<S1>::createGJKObject(s1);
void* o2 = GJKInitializer<S2>::createGJKObject(s2);
return GJKCollide(o1, GJKInitializer<S1>::getSupportFunction(), GJKInitializer<S1>::getCenterFunction(),
o2, GJKInitializer<S2>::getSupportFunction(), GJKInitializer<S2>::getCenterFunction(),
contact_points, penetration_depth, normal);
GJKInitializer<S1>::deleteGJKObject(o1);
GJKInitializer<S2>::deleteGJKObject(o2);
}
/** \brief intersection checking between one shape and a triangle */
template<typename S>
bool shapeTriangleIntersect(const S& s, const Vec3f& P1, const Vec3f& P2, const Vec3f& P3, Vec3f* contact_points = NULL, BVH_REAL* penetration_depth = NULL, Vec3f* normal = NULL)
{
void* o1 = GJKInitializer<S>::createGJKObject(s);
void* o2 = triCreateGJKObject(P1, P2, P3);
return GJKCollide(o1, GJKInitializer<S>::getSupportFunction(), GJKInitializer<S>::getCenterFunction(),
o2, triGetSupportFunction(), triGetCenterFunction(),
contact_points, penetration_depth, normal);
GJKInitializer<S>::deleteGJKObject(o1);
triDeleteGJKObject(o2);
}
/** \brief intersection checking between one shape and a triangle with transformation */
template<typename S>
bool shapeTriangleIntersect(const S& s, const Vec3f& P1, const Vec3f& P2, const Vec3f& P3, const Vec3f R[3], const Vec3f& T,
Vec3f* contact_points = NULL, BVH_REAL* penetration_depth = NULL, Vec3f* normal = NULL)
{
void* o1 = GJKInitializer<S>::createGJKObject(s);
void* o2 = triCreateGJKObject(P1, P2, P3, R, T);
return GJKCollide(o1, GJKInitializer<S>::getSupportFunction(), GJKInitializer<S>::getCenterFunction(),
o2, triGetSupportFunction(), triGetCenterFunction(),
contact_points, penetration_depth, normal);
GJKInitializer<S>::deleteGJKObject(o1);
triDeleteGJKObject(o2);
}
}
#endif

View File

@ -0,0 +1,116 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_GEOMETRIC_SHAPES_UTILITY_H
#define FCL_GEOMETRIC_SHAPES_UTILITY_H
#include "fcl/geometric_shapes.h"
#include "fcl/BV.h"
namespace fcl
{
template<typename BV>
void computeBV(const Box& /*s*/, BV& /*bv*/) {}
template<typename BV>
void computeBV(const Sphere& /*s*/, BV& /*bv*/) {}
template<typename BV>
void computeBV(const Capsule& /*s*/, BV& /*bv*/) {}
template<typename BV>
void computeBV(const Cone& /*s*/, BV& /*bv*/) {}
template<typename BV>
void computeBV(const Cylinder& /*s*/, BV& /*bv*/) {}
template<typename BV>
void computeBV(const Convex& /*s*/, BV& /*bv*/) {}
/** the bounding volume for half space back of plane
* for OBB, it is the plane itself
*/
template<typename BV>
void computeBV(const Plane& /*s*/, BV& /*bv*/) {}
/** For AABB */
template<>
void computeBV<AABB>(const Box& s, AABB& bv);
template<>
void computeBV<AABB>(const Sphere& s, AABB& bv);
template<>
void computeBV<AABB>(const Capsule& s, AABB& bv);
template<>
void computeBV<AABB>(const Cone& s, AABB& bv);
template<>
void computeBV<AABB>(const Cylinder& s, AABB& bv);
template<>
void computeBV<AABB>(const Convex& s, AABB& bv);
template<>
void computeBV<AABB>(const Plane& s, AABB& bv);
template<>
void computeBV<OBB>(const Box& s, OBB& bv);
template<>
void computeBV<OBB>(const Sphere& s, OBB& bv);
template<>
void computeBV<OBB>(const Capsule& s, OBB& bv);
template<>
void computeBV<OBB>(const Cone& s, OBB& bv);
template<>
void computeBV<OBB>(const Cylinder& s, OBB& bv);
template<>
void computeBV<OBB>(const Convex& s, OBB& bv);
template<>
void computeBV<OBB>(const Plane& s, OBB& bv);
// TODO: implement computeBV for RSS and KDOP
}
#endif

View File

@ -0,0 +1,336 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_INTERSECT_H
#define FCL_INTERSECT_H
#include "fcl/vec_3f.h"
#include "fcl/BVH_internal.h"
#include "fcl/primitive.h"
#if USE_SVMLIGHT
extern "C"
{
# include <svm_light/svm_common.h>
# include <svm_light/svm_learn.h>
}
#endif
/** \brief Main namespace */
namespace fcl
{
/** \brief A class solves polynomial degree (1,2,3) equations */
class PolySolver
{
public:
/** \brief Solve a linear equation with coefficients c, return roots s and number of roots */
static int solveLinear(BVH_REAL c[2], BVH_REAL s[1]);
/** \brief Solve a quadratic function with coefficients c, return roots s and number of roots */
static int solveQuadric(BVH_REAL c[3], BVH_REAL s[2]);
/** \brief Solve a cubic function with coefficients c, return roots s and number of roots */
static int solveCubic(BVH_REAL c[4], BVH_REAL s[3]);
private:
/** \brief Check whether v is zero */
static inline bool isZero(BVH_REAL v);
/** \brief Compute v^{1/3} */
static inline bool cbrt(BVH_REAL v);
static const BVH_REAL NEAR_ZERO_THRESHOLD;
};
#if USE_SVMLIGHT
class CloudClassifierParam
{
public:
LEARN_PARM learn_parm;
KERNEL_PARM kernel_parm;
CloudClassifierParam();
};
#endif
/** \brief CCD intersect kernel among primitives */
class Intersect
{
public:
/** \brief CCD intersect between one vertex and one face
* [a0, b0, c0] and [a1, b1, c1] are points for the triangle face in time t0 and t1
* p0 and p1 are points for vertex in time t0 and t1
* p_i returns the coordinate of the collision point
*/
static bool intersect_VF(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& p0,
const Vec3f& a1, const Vec3f& b1, const Vec3f& c1, const Vec3f& p1,
BVH_REAL* collision_time, Vec3f* p_i, bool useNewton = true);
/** \brief CCD intersect between two edges
* [a0, b0] and [a1, b1] are points for one edge in time t0 and t1
* [c0, d0] and [c1, d1] are points for the other edge in time t0 and t1
* p_i returns the coordinate of the collision point
*/
static bool intersect_EE(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& d0,
const Vec3f& a1, const Vec3f& b1, const Vec3f& c1, const Vec3f& d1,
BVH_REAL* collision_time, Vec3f* p_i, bool useNewton = true);
/** \brief CCD intersect between one vertex and one face, using additional filter */
static bool intersect_VF_filtered(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& p0,
const Vec3f& a1, const Vec3f& b1, const Vec3f& c1, const Vec3f& p1,
BVH_REAL* collision_time, Vec3f* p_i, bool useNewton = true);
/** \brief CCD intersect between two edges, using additional filter */
static bool intersect_EE_filtered(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& d0,
const Vec3f& a1, const Vec3f& b1, const Vec3f& c1, const Vec3f& d1,
BVH_REAL* collision_time, Vec3f* p_i, bool useNewton = true);
/** \brief CCD intersect between one vertex and and one edge */
static bool intersect_VE(const Vec3f& a0, const Vec3f& b0, const Vec3f& p0,
const Vec3f& a1, const Vec3f& b1, const Vec3f& p1,
const Vec3f& L);
/** \brief CD intersect between two triangles [P1, P2, P3] and [Q1, Q2, Q3] */
static bool intersect_Triangle(const Vec3f& P1, const Vec3f& P2, const Vec3f& P3,
const Vec3f& Q1, const Vec3f& Q2, const Vec3f& Q3,
Vec3f* contact_points = NULL,
unsigned int* num_contact_points = NULL,
BVH_REAL* penetration_depth = NULL,
Vec3f* normal = NULL);
static bool intersect_Triangle(const Vec3f& P1, const Vec3f& P2, const Vec3f& P3,
const Vec3f& Q1, const Vec3f& Q2, const Vec3f& Q3,
const Vec3f R[3], const Vec3f& T,
Vec3f* contact_points = NULL,
unsigned int* num_contact_points = NULL,
BVH_REAL* penetration_depth = NULL,
Vec3f* normal = NULL);
#if USE_SVMLIGHT
static BVH_REAL intersect_PointClouds(Vec3f* cloud1, Uncertainty* uc1, int size_cloud1,
Vec3f* cloud2, Uncertainty* uc2, int size_cloud2,
const CloudClassifierParam& solver, bool scaling = true);
static BVH_REAL intersect_PointClouds(Vec3f* cloud1, Uncertainty* uc1, int size_cloud1,
Vec3f* cloud2, Uncertainty* uc2, int size_cloud2,
const Vec3f R[3], const Vec3f& T, const CloudClassifierParam& solver, bool scaling = true);
static BVH_REAL intersect_PointCloudsTriangle(Vec3f* cloud1, Uncertainty* uc1, int size_cloud1,
const Vec3f& Q1, const Vec3f& Q2, const Vec3f& Q3);
static BVH_REAL intersect_PointCloudsTriangle(Vec3f* cloud1, Uncertainty* uc1, int size_cloud1,
const Vec3f& Q1, const Vec3f& Q2, const Vec3f& Q3,
const Vec3f R[3], const Vec3f& T);
#endif
private:
/** \brief Project function used in intersect_Triangle() */
static int project6(const Vec3f& ax,
const Vec3f& p1, const Vec3f& p2, const Vec3f& p3,
const Vec3f& q1, const Vec3f& q2, const Vec3f& q3);
/** \brief Check whether one value is zero */
static inline bool isZero(BVH_REAL v);
/** \brief Solve the cubic function using Newton method, also satisfies the interval restriction */
static bool solveCubicWithIntervalNewton(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& d0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vc, const Vec3f& vd,
BVH_REAL& l, BVH_REAL& r, bool bVF, BVH_REAL coeffs[], Vec3f* data = NULL);
/** \brief Check whether one point p is within triangle [a, b, c] */
static bool insideTriangle(const Vec3f& a, const Vec3f& b, const Vec3f& c, const Vec3f&p);
/** \brief Check whether one point p is within a line segment [a, b] */
static bool insideLineSegment(const Vec3f& a, const Vec3f& b, const Vec3f& p);
/** \brief Calculate the line segment papb that is the shortest route between
* two lines p1p2 and p3p4. Calculate also the values of mua and mub where
* pa = p1 + mua (p2 - p1)
* pb = p3 + mub (p4 - p3)
* return FALSE if no solution exists.
*/
static bool linelineIntersect(const Vec3f& p1, const Vec3f& p2, const Vec3f& p3, const Vec3f& p4,
Vec3f* pa, Vec3f* pb, BVH_REAL* mua, BVH_REAL* mub);
/** \brief Check whether a root for VF intersection is valid (i.e. within the triangle at intersection t */
static bool checkRootValidity_VF(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& p0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vc, const Vec3f& vp,
BVH_REAL t);
/** \brief Check whether a root for EE intersection is valid (i.e. within the two edges intersected at the given time */
static bool checkRootValidity_EE(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& d0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vc, const Vec3f& vd,
BVH_REAL t, Vec3f* q_i = NULL);
/** \brief Check whether a root for VE intersection is valid */
static bool checkRootValidity_VE(const Vec3f& a0, const Vec3f& b0, const Vec3f& p0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vp,
BVH_REAL t);
/** \brief Solve a square function for EE intersection (with interval restriction) */
static bool solveSquare(BVH_REAL a, BVH_REAL b, BVH_REAL c,
const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& d0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vc, const Vec3f& vd,
bool bVF,
BVH_REAL* ret);
/** \brief Solve a square function for VE intersection (with interval restriction) */
static bool solveSquare(BVH_REAL a, BVH_REAL b, BVH_REAL c,
const Vec3f& a0, const Vec3f& b0, const Vec3f& p0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vp);
/** \brief Compute the cubic coefficients for VF intersection
* See Paper "Interactive Continuous Collision Detection between Deformable Models using Connectivity-Based Culling", Equation 1.
*/
static void computeCubicCoeff_VF(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& p0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vc, const Vec3f& vp,
BVH_REAL* a, BVH_REAL* b, BVH_REAL* c, BVH_REAL* d);
/** \brief Compute the cubic coefficients for EE intersection */
static void computeCubicCoeff_EE(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& d0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vc, const Vec3f& vd,
BVH_REAL* a, BVH_REAL* b, BVH_REAL* c, BVH_REAL* d);
/** \brief Compute the cubic coefficients for VE intersection */
static void computeCubicCoeff_VE(const Vec3f& a0, const Vec3f& b0, const Vec3f& p0,
const Vec3f& va, const Vec3f& vb, const Vec3f& vp,
const Vec3f& L,
BVH_REAL* a, BVH_REAL* b, BVH_REAL* c);
/** \brief filter for intersection, works for both VF and EE */
static bool intersectPreFiltering(const Vec3f& a0, const Vec3f& b0, const Vec3f& c0, const Vec3f& d0,
const Vec3f& a1, const Vec3f& b1, const Vec3f& c1, const Vec3f& d1);
/** \brief distance of point v to a plane n * x - t = 0 */
static BVH_REAL distanceToPlane(const Vec3f& n, BVH_REAL t, const Vec3f& v);
/** \brief check wether points v1, v2, v2 are on the same side of plane n * x - t = 0 */
static bool sameSideOfPlane(const Vec3f& v1, const Vec3f& v2, const Vec3f& v3, const Vec3f& n, BVH_REAL t);
/** \brief clip triangle v1, v2, v3 by the prism made by t1, t2 and t3. The normal of the prism is tn and is cutted up by to */
static void clipTriangleByTriangleAndEdgePlanes(const Vec3f& v1, const Vec3f& v2, const Vec3f& v3,
const Vec3f& t1, const Vec3f& t2, const Vec3f& t3,
const Vec3f& tn, BVH_REAL to,
Vec3f clipped_points[], unsigned int* num_clipped_points, bool clip_triangle = false);
/** \brief build a plane passed through triangle v1 v2 v3 */
static bool buildTrianglePlane(const Vec3f& v1, const Vec3f& v2, const Vec3f& v3, Vec3f* n, BVH_REAL* t);
/** \brief build a plane pass through edge v1 and v2, normal is tn */
static bool buildEdgePlane(const Vec3f& v1, const Vec3f& v2, const Vec3f& tn, Vec3f* n, BVH_REAL* t);
/** \brief compute the points which has deepest penetration depth */
static void computeDeepestPoints(Vec3f* clipped_points, unsigned int num_clipped_points, const Vec3f& n, BVH_REAL t, BVH_REAL* penetration_depth, Vec3f* deepest_points, unsigned int* num_deepest_points);
/** \brief clip polygon by plane */
static void clipPolygonByPlane(Vec3f* polygon_points, unsigned int num_polygon_points, const Vec3f& n, BVH_REAL t, Vec3f clipped_points[], unsigned int* num_clipped_points);
/** \brief clip a line segment by plane */
static void clipSegmentByPlane(const Vec3f& v1, const Vec3f& v2, const Vec3f& n, BVH_REAL t, Vec3f* clipped_point);
/** \brief compute the cdf(x) */
static BVH_REAL gaussianCDF(BVH_REAL x)
{
return 0.5 * erfc(-x / sqrt(2.0));
}
#if USE_SVMLIGHT
/** \brief compute the d K(x0, x) / dx, where x is one 3d point on or near the classification surface, and K is a composite kernel */
static void kernelGradient(KERNEL_PARM *kernel_parm, DOC *a, DOC *b, Vec3f& g);
/** \brief compute the d K(x0, x) / dx, where x is one 3d point on or near the classification surface, and K is a single kernel */
static void singleKernelGradient(KERNEL_PARM *kernel_parm, SVECTOR *a, SVECTOR *b, Vec3f& g);
#endif
static const BVH_REAL EPSILON;
static const BVH_REAL NEAR_ZERO_THRESHOLD;
static const BVH_REAL CCD_RESOLUTION;
static const unsigned int MAX_TRIANGLE_CLIPS = 8;
};
class TriangleDistance
{
public:
/** \brief Returns closest points between an segment pair.
* The first segment is P + t * A
* The second segment is Q + t * B
* X, Y are the closest points on the two segments
* VEC is the vector between X and Y
*/
static void segPoints(const Vec3f& P, const Vec3f& A, const Vec3f& Q, const Vec3f& B,
Vec3f& VEC, Vec3f& X, Vec3f& Y);
/** \brief Compute the closest points on two triangles given their absolute coordinate, and returns the distance between them
* S and T are two triangles
* If the triangles are disjoint, P and Q give the closet points of S and T respectively. However,
* if the triangles overlap, P and Q are basically a random pair of points from the triangles, not
* coincident points on the intersection of the triangles, as might be expected.
*/
static BVH_REAL triDistance(const Vec3f S[3], const Vec3f T[3], Vec3f& P, Vec3f& Q);
static BVH_REAL triDistance(const Vec3f& S1, const Vec3f& S2, const Vec3f& S3,
const Vec3f& T1, const Vec3f& T2, const Vec3f& T3,
Vec3f& P, Vec3f& Q);
/** \brief Compute the closest points on two triangles given the relative transform between them, and returns the distance between them
* S and T are two triangles
* If the triangles are disjoint, P and Q give the closet points of S and T respectively. However,
* if the triangles overlap, P and Q are basically a random pair of points from the triangles, not
* coincident points on the intersection of the triangles, as might be expected.
* The returned P and Q are both in the coordinate of the first triangle's coordinate
*/
static BVH_REAL triDistance(const Vec3f S[3], const Vec3f T[3],
const Vec3f R[3], const Vec3f& Tl,
Vec3f& P, Vec3f& Q);
static BVH_REAL triDistance(const Vec3f& S1, const Vec3f& S2, const Vec3f& S3,
const Vec3f& T1, const Vec3f& T2, const Vec3f& T3,
const Vec3f R[3], const Vec3f& Tl,
Vec3f& P, Vec3f& Q);
};
}
#endif

View File

@ -0,0 +1,171 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_INTERVAL_TREE_H
#define FCL_INTERVAL_TREE_H
#include <deque>
#include <limits>
/** \brief Main namespace */
namespace fcl
{
/** \brief Interval trees implemented using red-black-trees as described in
* the book Introduction_To_Algorithms_ by Cormen, Leisserson, and Rivest.
* Can be replaced in part by boost::icl::interval_set, which is only supported after boost 1.46 and does not support delete node routine.
*/
struct Interval
{
public:
Interval() {}
virtual ~Interval() {}
virtual void print() {}
double low, high;
};
class IntervalTreeNode
{
friend class IntervalTree;
public:
/** \brief Print the interval node information: set left = nil and right = root */
void print(IntervalTreeNode* left, IntervalTreeNode* right) const;
IntervalTreeNode();
IntervalTreeNode(Interval* new_interval);
~IntervalTreeNode();
protected:
Interval* stored_interval;
double key;
double high;
double max_high;
bool red; /* if red = false then the node is black */
IntervalTreeNode* left;
IntervalTreeNode* right;
IntervalTreeNode* parent;
};
/** \brief Class describes the information needed when we take the
* right branch in searching for intervals but possibly come back
* and check the left branch as well.
*/
struct it_recursion_node
{
public:
IntervalTreeNode* start_node;
unsigned int parent_index;
bool try_right_branch;
};
/** \brief Interval tree */
class IntervalTree
{
public:
IntervalTree();
~IntervalTree();
/** \brief Print the whole interval tree */
void print() const;
/** \brief Delete one node of the interval tree */
Interval* deleteNode(IntervalTreeNode* node);
/** \brief Insert one node of the interval tree */
IntervalTreeNode* insert(Interval* new_interval);
/** \brief get the predecessor of a given node */
IntervalTreeNode* getPredecessor(IntervalTreeNode* node) const;
/** \brief Get the successor of a given node */
IntervalTreeNode* getSuccessor(IntervalTreeNode* node) const;
/** \brief Return result for a given query */
std::deque<Interval*> query(double low, double high);
protected:
IntervalTreeNode* root;
IntervalTreeNode* nil;
/** \brief left rotation of tree node */
void leftRotate(IntervalTreeNode* node);
/** \brief right rotation of tree node */
void rightRotate(IntervalTreeNode* node);
/** \brief recursively insert a node */
void recursiveInsert(IntervalTreeNode* node);
/** \brief recursively print a subtree */
void recursivePrint(IntervalTreeNode* node) const;
/** \brief Travels up to the root fixing the max_high fields after an insertion or deletion */
void fixupMaxHigh(IntervalTreeNode* node);
void deleteFixup(IntervalTreeNode* node);
private:
unsigned int recursion_node_stack_size;
it_recursion_node* recursion_node_stack;
unsigned int current_parent;
unsigned int recursion_node_stack_top;
};
}
#endif

View File

@ -0,0 +1,294 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_KDOP_H
#define FCL_KDOP_H
#include "fcl/BVH_internal.h"
#include "fcl/vec_3f.h"
#include <cstdlib>
#include <limits>
#include <iostream>
/** \brief Main namespace */
namespace fcl
{
/** \brief Find the smaller and larger one of two values */
inline void minmax(BVH_REAL a, BVH_REAL b, BVH_REAL& minv, BVH_REAL& maxv)
{
if(a > b)
{
minv = b;
maxv = a;
}
else
{
minv = a;
maxv = b;
}
}
/** \brief Merge the interval [minv, maxv] and value p */
inline void minmax(BVH_REAL p, BVH_REAL& minv, BVH_REAL& maxv)
{
if(p > maxv) maxv = p;
if(p < minv) minv = p;
}
/** \brief Compute the distances to planes with normals from KDOP vectors except those of AABB face planes */
template<size_t N>
void getDistances(const Vec3f& p, BVH_REAL d[]) {}
/** \brief Specification of getDistances */
template<>
inline void getDistances<5>(const Vec3f& p, BVH_REAL d[])
{
d[0] = p[0] + p[1];
d[1] = p[0] + p[2];
d[2] = p[1] + p[2];
d[3] = p[0] - p[1];
d[4] = p[0] - p[2];
}
template<>
inline void getDistances<6>(const Vec3f& p, BVH_REAL d[])
{
d[0] = p[0] + p[1];
d[1] = p[0] + p[2];
d[2] = p[1] + p[2];
d[3] = p[0] - p[1];
d[4] = p[0] - p[2];
d[5] = p[1] - p[2];
}
template<>
inline void getDistances<9>(const Vec3f& p, BVH_REAL d[])
{
d[0] = p[0] + p[1];
d[1] = p[0] + p[2];
d[2] = p[1] + p[2];
d[3] = p[0] - p[1];
d[4] = p[0] - p[2];
d[5] = p[1] - p[2];
d[6] = p[0] + p[1] - p[2];
d[7] = p[0] + p[2] - p[1];
d[8] = p[1] + p[2] - p[0];
}
/** \brief KDOP class describes the KDOP collision structures. K is set as the template parameter, which should be 16, 18, or 24
The KDOP structure is defined by some pairs of parallel planes defined by some axes.
For K = 18, the planes are 6 AABB planes and 12 diagonal planes that cut off some space of the edges:
(-1,0,0) and (1,0,0) -> indices 0 and 9
(0,-1,0) and (0,1,0) -> indices 1 and 10
(0,0,-1) and (0,0,1) -> indices 2 and 11
(-1,-1,0) and (1,1,0) -> indices 3 and 12
(-1,0,-1) and (1,0,1) -> indices 4 and 13
(0,-1,-1) and (0,1,1) -> indices 5 and 14
(-1,1,0) and (1,-1,0) -> indices 6 and 15
(-1,0,1) and (1,0,-1) -> indices 7 and 16
(0,-1,1) and (0,1,-1) -> indices 8 and 17
*/
template<size_t N>
class KDOP
{
public:
KDOP()
{
BVH_REAL real_max = std::numeric_limits<BVH_REAL>::max();
for(size_t i = 0; i < N / 2; ++i)
{
dist_[i] = real_max;
dist_[i + N / 2] = -real_max;
}
}
KDOP(const Vec3f& v)
{
for(size_t i = 0; i < 3; ++i)
{
dist_[i] = dist_[N / 2 + i] = v[i];
}
BVH_REAL d[(N - 6) / 2];
getDistances<(N - 6) / 2>(v, d);
for(size_t i = 0; i < (N - 6) / 2; ++i)
{
dist_[3 + i] = dist_[3 + i + N / 2] = d[i];
}
}
KDOP(const Vec3f& a, const Vec3f& b)
{
for(size_t i = 0; i < 3; ++i)
{
minmax(a[i], b[i], dist_[i], dist_[i + N / 2]);
}
BVH_REAL ad[(N - 6) / 2], bd[(N - 6) / 2];
getDistances<(N - 6) / 2>(a, ad);
getDistances<(N - 6) / 2>(b, bd);
for(size_t i = 0; i < (N - 6) / 2; ++i)
{
minmax(ad[i], bd[i], dist_[3 + i], dist_[3 + i + N / 2]);
}
}
/** \brief Check whether two KDOPs are overlapped */
inline bool overlap(const KDOP<N>& other) const
{
for(size_t i = 0; i < N / 2; ++i)
{
if(dist_[i] > other.dist_[i + N / 2]) return false;
if(dist_[i + N / 2] < other.dist_[i]) return false;
}
return true;
}
/** \brief Check whether one point is inside the KDOP */
inline bool inside(const Vec3f& p) const
{
for(size_t i = 0; i < 3; ++i)
{
if(p[i] < dist_[i] || p[i] > dist_[i + N / 2])
return false;
}
BVH_REAL d[(N - 6) / 2];
getDistances(p, d);
for(size_t i = 0; i < (N - 6) / 2; ++i)
{
if(d[i] < dist_[3 + i] || d[i] > dist_[i + 3 + N / 2])
return false;
}
return true;
}
/** \brief Merge the point and the KDOP */
inline KDOP<N>& operator += (const Vec3f& p)
{
for(size_t i = 0; i < 3; ++i)
{
minmax(p[i], dist_[i], dist_[N / 2 + i]);
}
BVH_REAL pd[(N - 6) / 2];
getDistances<(N - 6) / 2>(p, pd);
for(size_t i = 0; i < (N - 6) / 2; ++i)
{
minmax(pd[i], dist_[3 + i], dist_[3 + N / 2 + i]);
}
return *this;
}
/** \brief Merge two KDOPs */
inline KDOP<N>& operator += (const KDOP<N>& other)
{
for(size_t i = 0; i < N / 2; ++i)
{
dist_[i] = std::min(other.dist_[i], dist_[i]);
dist_[i + N / 2] = std::max(other.dist_[i + N / 2], dist_[i + N / 2]);
}
return *this;
}
/** \brief Create a KDOP by mergin two KDOPs */
inline KDOP<N> operator + (const KDOP<N>& other) const
{
KDOP<N> res(*this);
return res += other;
}
/** \brief The (AABB) width */
inline BVH_REAL width() const
{
return dist_[N / 2] - dist_[0];
}
/** \brief The (AABB) height */
inline BVH_REAL height() const
{
return dist_[N / 2 + 1] - dist_[1];
}
/** \brief The (AABB) depth */
inline BVH_REAL depth() const
{
return dist_[N / 2 + 2] - dist_[2];
}
/** \brief The (AABB) volume */
inline BVH_REAL volume() const
{
return width() * height() * depth();
}
inline BVH_REAL size() const
{
return width() * width() + height() * height() + depth() * depth();
}
/** \brief The (AABB) center */
inline Vec3f center() const
{
return Vec3f(dist_[0] + dist_[N / 2], dist_[1] + dist_[N / 2 + 1], dist_[2] + dist_[N / 2 + 2]) * 0.5;
}
/** \brief The distance between two KDOP<N>
* Not implemented.
*/
BVH_REAL distance(const KDOP<N>& other, Vec3f* P = NULL, Vec3f* Q = NULL) const
{
std::cerr << "KDOP distance not implemented!" << std::endl;
return 0.0;
}
private:
/** \brief distances to N KDOP planes */
BVH_REAL dist_[N];
};
}
#endif

View File

@ -0,0 +1,660 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_MOTION_H
#define FCL_MOTION_H
#include "fcl/vec_3f.h"
#include "fcl/RSS.h"
#include "fcl/transform.h"
#include "fcl/motion_base.h"
#include "fcl/intersect.h"
#include <iostream>
#include <vector>
namespace fcl
{
template<typename BV>
class SplineMotion : public MotionBase<BV>
{
public:
/** \brief Construct motion from 4 deBoor points */
SplineMotion(const Vec3f& Td0, const Vec3f& Td1, const Vec3f& Td2, const Vec3f& Td3,
const Vec3f& Rd0, const Vec3f& Rd1, const Vec3f& Rd2, const Vec3f& Rd3)
{
Td[0] = Td0;
Td[1] = Td1;
Td[2] = Td2;
Td[3] = Td3;
Rd[0] = Rd0;
Rd[1] = Rd1;
Rd[2] = Rd2;
Rd[3] = Rd3;
Rd0Rd0 = Rd[0].dot(Rd[0]);
Rd0Rd1 = Rd[0].dot(Rd[1]);
Rd0Rd2 = Rd[0].dot(Rd[2]);
Rd0Rd3 = Rd[0].dot(Rd[3]);
Rd1Rd1 = Rd[1].dot(Rd[1]);
Rd1Rd2 = Rd[1].dot(Rd[2]);
Rd1Rd3 = Rd[1].dot(Rd[3]);
Rd2Rd2 = Rd[2].dot(Rd[2]);
Rd2Rd3 = Rd[2].dot(Rd[3]);
Rd3Rd3 = Rd[3].dot(Rd[3]);
TA = Td[1] * 3 - Td[2] * 3 + Td[3] - Td[0];
TB = (Td[0] - Td[1] * 2 + Td[2]) * 3;
TC = (Td[2] - Td[0]) * 3;
RA = Rd[1] * 3 - Rd[2] * 3 + Rd[3] - Rd[0];
RB = (Rd[0] - Rd[1] * 2 + Rd[2]) * 3;
RC = (Rd[2] - Rd[0]) * 3;
integrate(0.0);
}
/** \brief Integrate the motion from 0 to dt
* We compute the current transformation from zero point instead of from last integrate time, for precision.
*/
bool integrate(double dt)
{
if(dt > 1) dt = 1;
Vec3f cur_T = Td[0] * getWeight0(dt) + Td[1] * getWeight1(dt) + Td[2] * getWeight2(dt) + Td[3] * getWeight3(dt);
Vec3f cur_w = Rd[0] * getWeight0(dt) + Rd[1] * getWeight1(dt) + Rd[2] * getWeight2(dt) + Rd[3] * getWeight3(dt);
BVH_REAL cur_angle = cur_w.length();
cur_w.normalize();
SimpleQuaternion cur_q;
cur_q.fromAxisAngle(cur_w, cur_angle);
tf.setTransform(cur_q, cur_T);
tf_t = dt;
return true;
}
/** \brief Compute the motion bound for a bounding volume along a given direction n
* For general BV, not implemented so return trivial 0
*/
BVH_REAL computeMotionBound(const BV& bv, const Vec3f& n) const { return 0.0; }
BVH_REAL computeMotionBound(const Vec3f& a, const Vec3f& b, const Vec3f& c, const Vec3f& n) const
{
BVH_REAL T_bound = computeTBound(n);
BVH_REAL R_bound = fabs(a.dot(n)) + a.length() + (a.cross(n)).length();
BVH_REAL R_bound_tmp = fabs(b.dot(n)) + b.length() + (b.cross(n)).length();
if(R_bound_tmp > R_bound) R_bound = R_bound_tmp;
R_bound_tmp = fabs(c.dot(n)) + c.length() + (c.cross(n)).length();
if(R_bound_tmp > R_bound) R_bound = R_bound_tmp;
BVH_REAL dWdW_max = computeDWMax();
BVH_REAL ratio = std::min(1 - tf_t, dWdW_max);
R_bound *= 2 * ratio;
// std::cout << R_bound << " " << T_bound << std::endl;
return R_bound + T_bound;
}
/** \brief Get the rotation and translation in current step */
void getCurrentTransform(Vec3f R[3], Vec3f& T) const
{
for(int i = 0; i < 3; ++i)
{
R[i] = tf.getRotation()[i];
}
T = tf.getTranslation();
}
void getCurrentRotation(Vec3f R[3]) const
{
for(int i = 0; i < 3; ++i)
R[i] = tf.getRotation()[i];
}
void getCurrentTranslation(Vec3f& T) const
{
T = tf.getTranslation();
}
protected:
void computeSplineParameter()
{
}
BVH_REAL getWeight0(BVH_REAL t) const
{
return (1 - 3 * t + 3 * t * t - t * t * t) / 6.0;
}
BVH_REAL getWeight1(BVH_REAL t) const
{
return (4 - 6 * t * t + 3 * t * t * t) / 6.0;
}
BVH_REAL getWeight2(BVH_REAL t) const
{
return (1 + 3 * t + 3 * t * t - 3 * t * t * t) / 6.0;
}
BVH_REAL getWeight3(BVH_REAL t) const
{
return t * t * t / 6.0;
}
BVH_REAL computeTBound(const Vec3f& n) const
{
BVH_REAL Ta = TA.dot(n);
BVH_REAL Tb = TB.dot(n);
BVH_REAL Tc = TC.dot(n);
std::vector<BVH_REAL> T_potential;
T_potential.push_back(tf_t);
T_potential.push_back(1);
if(Tb * Tb - 3 * Ta * Tc >= 0)
{
if(Ta == 0)
{
if(Tb != 0)
{
BVH_REAL tmp = -Tc / (2 * Tb);
if(tmp < 1 && tmp > tf_t)
T_potential.push_back(tmp);
}
}
else
{
BVH_REAL tmp_delta = sqrt(Tb * Tb - 3 * Ta * Tc);
BVH_REAL tmp1 = (-Tb + tmp_delta) / (3 * Ta);
BVH_REAL tmp2 = (-Tb - tmp_delta) / (3 * Ta);
if(tmp1 < 1 && tmp1 > tf_t)
T_potential.push_back(tmp1);
if(tmp2 < 1 && tmp2 > tf_t)
T_potential.push_back(tmp2);
}
}
BVH_REAL T_bound = Ta * T_potential[0] * T_potential[0] * T_potential[0] + Tb * T_potential[0] * T_potential[0] + Tc * T_potential[0];
for(unsigned int i = 1; i < T_potential.size(); ++i)
{
BVH_REAL T_bound_tmp = Ta * T_potential[i] * T_potential[i] * T_potential[i] + Tb * T_potential[i] * T_potential[i] + Tc * T_potential[i];
if(T_bound_tmp > T_bound) T_bound = T_bound_tmp;
}
BVH_REAL cur_delta = Ta * tf_t * tf_t * tf_t + Tb * tf_t * tf_t + Tc * tf_t;
T_bound -= cur_delta;
T_bound /= 6.0;
return T_bound;
}
BVH_REAL computeDWMax() const
{
// first compute ||w'||
int a00[5] = {1,-4,6,-4,1};
int a01[5] = {-3,10,-11,4,0};
int a02[5] = {3,-8,6,0,-1};
int a03[5] = {-1,2,-1,0,0};
int a11[5] = {9,-24,16,0,0};
int a12[5] = {-9,18,-5,-4,0};
int a13[5] = {3,-4,0,0,0};
int a22[5] = {9,-12,-2,4,1};
int a23[5] = {-3,2,1,0,0};
int a33[5] = {1,0,0,0,0};
BVH_REAL a[5];
for(int i = 0; i < 5; ++i)
{
a[i] = Rd0Rd0 * a00[i] + Rd0Rd1 * a01[i] + Rd0Rd2 * a02[i] + Rd0Rd3 * a03[i]
+ Rd0Rd1 * a01[i] + Rd1Rd1 * a11[i] + Rd1Rd2 * a12[i] + Rd1Rd3 * a13[i]
+ Rd0Rd2 * a02[i] + Rd1Rd2 * a12[i] + Rd2Rd2 * a22[i] + Rd2Rd3 * a23[i]
+ Rd0Rd3 * a03[i] + Rd1Rd3 * a13[i] + Rd2Rd3 * a23[i] + Rd3Rd3 * a33[i];
a[i] /= 4.0;
}
// compute polynomial for ||w'||'
int da00[4] = {4,-12,12,-4};
int da01[4] = {-12,30,-22,4};
int da02[4] = {12,-24,12,0};
int da03[4] = {-4,6,-2,0};
int da11[4] = {36,-72,32,0};
int da12[4] = {-36,54,-10,-4};
int da13[4] = {12,-12,0,0};
int da22[4] = {36,-36,-4,4};
int da23[4] = {-12,6,2,0};
int da33[4] = {4,0,0,0};
BVH_REAL da[4];
for(int i = 0; i < 4; ++i)
{
da[i] = Rd0Rd0 * da00[i] + Rd0Rd1 * da01[i] + Rd0Rd2 * da02[i] + Rd0Rd3 * da03[i]
+ Rd0Rd1 * da01[i] + Rd1Rd1 * da11[i] + Rd1Rd2 * da12[i] + Rd1Rd3 * da13[i]
+ Rd0Rd2 * da02[i] + Rd1Rd2 * da12[i] + Rd2Rd2 * da22[i] + Rd2Rd3 * da23[i]
+ Rd0Rd3 * da03[i] + Rd1Rd3 * da13[i] + Rd2Rd3 * da23[i] + Rd3Rd3 * da33[i];
da[i] /= 4.0;
}
BVH_REAL roots[3];
int root_num = PolySolver::solveCubic(da, roots);
BVH_REAL dWdW_max = a[0] * tf_t * tf_t * tf_t + a[1] * tf_t * tf_t * tf_t + a[2] * tf_t * tf_t + a[3] * tf_t + a[4];
BVH_REAL dWdW_1 = a[0] + a[1] + a[2] + a[3] + a[4];
if(dWdW_max < dWdW_1) dWdW_max = dWdW_1;
for(int i = 0; i < root_num; ++i)
{
BVH_REAL v = roots[i];
if(v >= tf_t && v <= 1)
{
BVH_REAL value = a[0] * v * v * v * v + a[1] * v * v * v + a[2] * v * v + a[3] * v + a[4];
if(value > dWdW_max) dWdW_max = value;
}
}
return sqrt(dWdW_max);
}
Vec3f Td[4];
Vec3f Rd[4];
Vec3f TA, TB, TC;
Vec3f RA, RB, RC;
BVH_REAL Rd0Rd0, Rd0Rd1, Rd0Rd2, Rd0Rd3, Rd1Rd1, Rd1Rd2, Rd1Rd3, Rd2Rd2, Rd2Rd3, Rd3Rd3;
/** \brief The transformation at current time t */
SimpleTransform tf;
/** \brief The time related with tf */
BVH_REAL tf_t;
};
template<typename BV>
class ScrewMotion : public MotionBase<BV>
{
public:
/** Default transformations are all identities */
ScrewMotion()
{
/** Default angular velocity is zero */
axis = Vec3f(1, 0, 0);
angular_vel = 0;
/** Default reference point is local zero point */
/** Default linear velocity is zero */
linear_vel = 0;
}
/** \brief Construct motion from the initial rotation/translation and goal rotation/translation */
ScrewMotion(const Vec3f R1[3], const Vec3f& T1,
const Vec3f R2[3], const Vec3f& T2)
{
tf1 = SimpleTransform(R1, T1);
tf2 = SimpleTransform(R2, T2);
/** Current time is zero, so the transformation is t1 */
tf = tf1;
computeScrewParameter();
}
/** \brief Integrate the motion from 0 to dt
* We compute the current transformation from zero point instead of from last integrate time, for precision.
*/
bool integrate(double dt)
{
if(dt > 1) dt = 1;
tf.setQuatRotation(absoluteRotation(dt));
SimpleQuaternion delta_rot = deltaRotation(dt);
tf.setTranslation(p + axis * (dt * linear_vel) + delta_rot.transform(tf1.getTranslation() - p));
return true;
}
/** \brief Compute the motion bound for a bounding volume along a given direction n
* For general BV, not implemented so return trivial 0
*/
BVH_REAL computeMotionBound(const BV& bv, const Vec3f& n) const { return 0.0; }
BVH_REAL computeMotionBound(const Vec3f& a, const Vec3f& b, const Vec3f& c, const Vec3f& n) const
{
BVH_REAL proj_max = ((tf.getQuatRotation().transform(a) + tf.getTranslation() - p).cross(axis)).sqrLength();
BVH_REAL tmp;
tmp = ((tf.getQuatRotation().transform(b) + tf.getTranslation() - p).cross(axis)).sqrLength();
if(tmp > proj_max) proj_max = tmp;
tmp = ((tf.getQuatRotation().transform(c) + tf.getTranslation() - p).cross(axis)).sqrLength();
if(tmp > proj_max) proj_max = tmp;
proj_max = sqrt(proj_max);
BVH_REAL v_dot_n = axis.dot(n) * linear_vel;
BVH_REAL w_cross_n = (axis.cross(n)).length() * angular_vel;
BVH_REAL mu = v_dot_n + w_cross_n * proj_max;
return mu;
}
/** \brief Get the rotation and translation in current step */
void getCurrentTransform(Vec3f R[3], Vec3f& T) const
{
for(int i = 0; i < 3; ++i)
{
R[i] = tf.getRotation()[i];
}
T = tf.getTranslation();
}
void getCurrentRotation(Vec3f R[3]) const
{
for(int i = 0; i < 3; ++i)
R[i] = tf.getRotation()[i];
}
void getCurrentTranslation(Vec3f& T) const
{
T = tf.getTranslation();
}
protected:
void computeScrewParameter()
{
SimpleQuaternion deltaq = tf2.getQuatRotation() * tf1.getQuatRotation().inverse();
deltaq.toAxisAngle(axis, angular_vel);
if(angular_vel < 0)
{
angular_vel = -angular_vel;
axis = -axis;
}
if(angular_vel < 1e-10)
{
angular_vel = 0;
axis = tf2.getTranslation() - tf1.getTranslation();
linear_vel = axis.length();
p = tf1.getTranslation();
}
else
{
Vec3f o = tf2.getTranslation() - tf1.getTranslation();
p = (tf1.getTranslation() + tf2.getTranslation() + axis.cross(o) * (1.0 / tan(angular_vel / 2.0))) * 0.5;
linear_vel = o.dot(axis);
}
}
SimpleQuaternion deltaRotation(BVH_REAL dt) const
{
SimpleQuaternion res;
res.fromAxisAngle(axis, (BVH_REAL)(dt * angular_vel));
return res;
}
SimpleQuaternion absoluteRotation(BVH_REAL dt) const
{
SimpleQuaternion delta_t = deltaRotation(dt);
return delta_t * tf1.getQuatRotation();
}
/** \brief The transformation at time 0 */
SimpleTransform tf1;
/** \brief The transformation at time 1 */
SimpleTransform tf2;
/** \brief The transformation at current time t */
SimpleTransform tf;
/** \brief screw axis */
Vec3f axis;
/** \brief A point on the axis S */
Vec3f p;
/** \brief linear velocity along the axis */
BVH_REAL linear_vel;
/** \brief angular velocity */
BVH_REAL angular_vel;
};
/** \brief Compute the motion bound for a bounding volume along a given direction n
* according to mu < |v * n| + ||w x n||(r + max(||ci*||)) where ||ci*|| = ||R0(ci) x w||. w is the angular axis (normalized)
* and ci are the endpoints of the generator primitives of RSS.
* Notice that all bv parameters are in the local frame of the object, but n should be in the global frame (the reason is that the motion (t1, t2 and t) is in global frame)
*/
template<>
BVH_REAL ScrewMotion<RSS>::computeMotionBound(const RSS& bv, const Vec3f& n) const;
/** \brief Linear interpolation motion
* Each Motion is assumed to have constant linear velocity and angular velocity
* The motion is R(t)(p - p_ref) + p_ref + T(t)
* Therefore, R(0) = R0, R(1) = R1
* T(0) = T0 + R0 p_ref - p_ref
* T(1) = T1 + R1 p_ref - p_ref
*/
template<typename BV>
class InterpMotion : public MotionBase<BV>
{
public:
/** \brief Default transformations are all identities */
InterpMotion()
{
/** Default angular velocity is zero */
angular_axis = Vec3f(1, 0, 0);
angular_vel = 0;
/** Default reference point is local zero point */
/** Default linear velocity is zero */
}
/** \brief Construct motion from the initial rotation/translation and goal rotation/translation */
InterpMotion(const Vec3f R1[3], const Vec3f& T1,
const Vec3f R2[3], const Vec3f& T2)
{
tf1 = SimpleTransform(R1, T1);
tf2 = SimpleTransform(R2, T2);
/** Current time is zero, so the transformation is t1 */
tf = tf1;
/** Default reference point is local zero point */
/** Compute the velocities for the motion */
computeVelocity();
}
/** \brief Construct motion from the initial rotation/translation and goal rotation/translation related to some rotation center
*/
InterpMotion(const Vec3f R1[3], const Vec3f& T1,
const Vec3f R2[3], const Vec3f& T2,
const Vec3f& O)
{
tf1 = SimpleTransform(R1, T1);
tf2 = SimpleTransform(R2, T2);
tf = tf1;
reference_p = O;
/** Compute the velocities for the motion */
computeVelocity();
}
/** \brief Integrate the motion from 0 to dt
* We compute the current transformation from zero point instead of from last integrate time, for precision.
*/
bool integrate(double dt)
{
if(dt > 1) dt = 1;
tf.setQuatRotation(absoluteRotation(dt));
tf.setTranslation(linear_vel * dt + tf1.transform(reference_p) - tf.getQuatRotation().transform(reference_p));
return true;
}
/** \brief Compute the motion bound for a bounding volume along a given direction n
* For general BV, not implemented so return trivial 0
*/
BVH_REAL computeMotionBound(const BV& bv, const Vec3f& n) const { return 0.0; }
/** \brief Compute the motion bound for a triangle along a given direction n
* according to mu < |v * n| + ||w x n||(max||ci*||) where ||ci*|| = ||R0(ci) x w|| / \|w\|. w is the angular velocity
* and ci are the triangle vertex coordinates.
* Notice that the triangle is in the local frame of the object, but n should be in the global frame (the reason is that the motion (t1, t2 and t) is in global frame)
*/
BVH_REAL computeMotionBound(const Vec3f& a, const Vec3f& b, const Vec3f& c, const Vec3f& n) const
{
BVH_REAL proj_max = ((tf.getQuatRotation().transform(a - reference_p)).cross(angular_axis)).sqrLength();
BVH_REAL tmp;
tmp = ((tf.getQuatRotation().transform(b - reference_p)).cross(angular_axis)).sqrLength();
if(tmp > proj_max) proj_max = tmp;
tmp = ((tf.getQuatRotation().transform(c - reference_p)).cross(angular_axis)).sqrLength();
if(tmp > proj_max) proj_max = tmp;
proj_max = sqrt(proj_max);
BVH_REAL v_dot_n = linear_vel.dot(n);
BVH_REAL w_cross_n = (angular_axis.cross(n)).length() * angular_vel;
BVH_REAL mu = v_dot_n + w_cross_n * proj_max;
return mu;
}
/** \brief Get the rotation and translation in current step */
void getCurrentTransform(Vec3f R[3], Vec3f& T) const
{
for(int i = 0; i < 3; ++i)
{
R[i] = tf.getRotation()[i];
}
T = tf.getTranslation();
}
void getCurrentRotation(Vec3f R[3]) const
{
for(int i = 0; i < 3; ++i)
R[i] = tf.getRotation()[i];
}
void getCurrentTranslation(Vec3f& T) const
{
T = tf.getTranslation();
}
protected:
void computeVelocity()
{
linear_vel = tf2.transform(reference_p) - tf1.transform(reference_p);
SimpleQuaternion deltaq = tf2.getQuatRotation() * tf1.getQuatRotation().inverse();
deltaq.toAxisAngle(angular_axis, angular_vel);
if(angular_vel < 0)
{
angular_vel = -angular_vel;
angular_axis = -angular_axis;
}
}
SimpleQuaternion deltaRotation(BVH_REAL dt) const
{
SimpleQuaternion res;
res.fromAxisAngle(angular_axis, (BVH_REAL)(dt * angular_vel));
return res;
}
SimpleQuaternion absoluteRotation(BVH_REAL dt) const
{
SimpleQuaternion delta_t = deltaRotation(dt);
return delta_t * tf1.getQuatRotation();
}
/** \brief The transformation at time 0 */
SimpleTransform tf1;
/** \brief The transformation at time 1 */
SimpleTransform tf2;
/** \brief The transformation at current time t */
SimpleTransform tf;
/** \brief Linear velocity */
Vec3f linear_vel;
/** \brief Angular speed */
BVH_REAL angular_vel;
/** \brief Angular velocity axis */
Vec3f angular_axis;
/** \brief Reference point for the motion (in the object's local frame) */
Vec3f reference_p;
};
/** \brief Compute the motion bound for a bounding volume along a given direction n
* according to mu < |v * n| + ||w x n||(r + max(||ci*||)) where ||ci*|| = ||R0(ci) x w||. w is the angular axis (normalized)
* and ci are the endpoints of the generator primitives of RSS.
* Notice that all bv parameters are in the local frame of the object, but n should be in the global frame (the reason is that the motion (t1, t2 and t) is in global frame)
*/
template<>
BVH_REAL InterpMotion<RSS>::computeMotionBound(const RSS& bv, const Vec3f& n) const;
}
#endif

View File

@ -0,0 +1,72 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_MOTION_BASE_H
#define FCL_MOTION_BASE_H
#include "fcl/vec_3f.h"
#include "fcl/RSS.h"
namespace fcl
{
template<typename BV>
class MotionBase
{
public:
virtual ~MotionBase() {}
/** \brief Integrate the motion from 0 to dt */
virtual bool integrate(double dt) = 0;
/** \brief Compute the motion bound for a bounding volume, given the closest direction n between two query objects */
virtual BVH_REAL computeMotionBound(const BV& bv, const Vec3f& n) const = 0;
/** \brief Compute the motion bound for a triangle, given the closest direction n between two query objects */
virtual BVH_REAL computeMotionBound(const Vec3f& a, const Vec3f& b, const Vec3f& c, const Vec3f& n) const = 0;
/** \brief Get the rotation and translation in current step */
virtual void getCurrentTransform(Vec3f R[3], Vec3f& T) const = 0;
virtual void getCurrentRotation(Vec3f R[3]) const = 0;
virtual void getCurrentTranslation(Vec3f& T) const = 0;
};
}
#endif

View File

@ -0,0 +1,167 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef COLLISION_CHECKING_PRIMITIVE_H
#define COLLISION_CHECKING_PRIMITIVE_H
#include "fcl/BVH_internal.h"
#include "fcl/vec_3f.h"
/** \brief Main namespace */
namespace fcl
{
/** \brief Uncertainty information */
struct Uncertainty
{
Uncertainty() {}
Uncertainty(Vec3f Sigma_[3])
{
for(int i = 0; i < 3; ++i)
Sigma[i] = Sigma_[i];
preprocess();
}
/** preprocess performs the eigen decomposition on the Sigma matrix */
void preprocess()
{
matEigen(Sigma, sigma, axis);
}
/** sqrt performs the sqrt of Sigma matrix based on the eigen decomposition result, this is useful when the uncertainty matrix is initialized
* as a square variation matrix
*/
void sqrt()
{
for(int i = 0; i < 3; ++i)
{
if(sigma[i] < 0) sigma[i] = 0;
sigma[i] = std::sqrt(sigma[i]);
}
for(int i = 0; i < 3; ++i)
{
for(int j = 0; j < 3; ++j)
{
Sigma[i][j] = 0;
}
}
for(int i = 0; i < 3; ++i)
{
for(int j = 0; j < 3; ++j)
{
Sigma[i][j] += sigma[0] * axis[0][i] * axis[0][j];
Sigma[i][j] += sigma[1] * axis[1][i] * axis[1][j];
Sigma[i][j] += sigma[2] * axis[2][i] * axis[2][j];
}
}
}
/** \brief Variation matrix for uncertainty */
Vec3f Sigma[3];
/** \brief Variations along the eigen axes */
BVH_REAL sigma[3];
/** \brief eigen axes of uncertainty matrix */
Vec3f axis[3];
};
/** \brief Simple triangle with 3 indices for points */
struct Triangle
{
unsigned int vids[3];
Triangle() {}
Triangle(unsigned int p1, unsigned int p2, unsigned int p3)
{
set(p1, p2, p3);
}
inline void set(unsigned int p1, unsigned int p2, unsigned int p3)
{
vids[0] = p1; vids[1] = p2; vids[2] = p3;
}
inline unsigned int operator[](int i) const { return vids[i]; }
inline unsigned int& operator[](int i) { return vids[i]; }
};
/** \brief Simple edge with two indices for its endpoints */
struct Edge
{
unsigned int vids[2];
unsigned int fids[2];
Edge()
{
vids[0] = -1; vids[1] = -1;
fids[0] = -1; fids[1] = -1;
}
Edge(unsigned int vid0, unsigned int vid1, unsigned int fid)
{
vids[0] = vid0;
vids[1] = vid1;
fids[0] = fid;
}
/** \brief Whether two edges are the same, assuming belongs to the same object */
bool operator == (const Edge& other) const
{
return (vids[0] == other.vids[0]) && (vids[1] == other.vids[1]);
}
bool operator < (const Edge& other) const
{
if(vids[0] == other.vids[0])
return vids[1] < other.vids[1];
return vids[0] < other.vids[0];
}
};
}
#endif

View File

@ -0,0 +1,648 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_SIMPLE_SETUP_H
#define FCL_SIMPLE_SETUP_H
#include "fcl/traversal_node_bvhs.h"
#include "fcl/traversal_node_shapes.h"
#include "fcl/traversal_node_bvh_shape.h"
#include "fcl/BVH_utility.h"
/** \brief Main namespace */
namespace fcl
{
/** \brief Initialize traversal node for collision between two geometric shapes */
template<typename S1, typename S2>
bool initialize(ShapeCollisionTraversalNode<S1, S2>& node, const S1& shape1, const S2& shape2, bool enable_contact = false)
{
node.enable_contact = enable_contact;
node.model1 = &shape1;
node.model2 = &shape2;
return true;
}
/** \brief Initialize traversal node for collision between one mesh and one shape, given current object transform */
template<typename BV, typename S>
bool initialize(MeshShapeCollisionTraversalNode<BV, S>& node, BVHModel<BV>& model1, const S& model2,
int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false,
bool use_refit = false, bool refit_bottomup = false)
{
if(model1.getModelType() != BVH_MODEL_TRIANGLES)
return false;
node.model1 = &model1;
node.model2 = &model2;
if(!model1.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed(model1.num_vertices);
for(int i = 0; i < model1.num_vertices; ++i)
{
Vec3f& p = model1.vertices[i];
Vec3f new_v = matMulVec(model1.getRotation(), p) + model1.getTranslation();
vertices_transformed[i] = new_v;
}
model1.beginReplaceModel();
model1.replaceSubModel(vertices_transformed);
model1.endReplaceModel(use_refit, refit_bottomup);
model1.setIdentityTransform();
}
node.vertices = model1.vertices;
node.tri_indices = model1.tri_indices;
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
return true;
}
/** \brief Initialize traversal node for collision between one mesh and one shape, given current object transform */
template<typename S, typename BV>
bool initialize(ShapeMeshCollisionTraversalNode<S, BV>& node, const S& model1, BVHModel<BV>& model2,
int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false,
bool use_refit = false, bool refit_bottomup = false)
{
if(model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
node.model1 = &model1;
node.model2 = &model2;
if(!model2.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed(model2.num_vertices);
for(int i = 0; i < model2.num_vertices; ++i)
{
Vec3f& p = model2.vertices[i];
Vec3f new_v = matMulVec(model2.getRotation(), p) + model2.getTranslation();
vertices_transformed[i] = new_v;
}
model2.beginReplaceModel();
model2.replaceSubModel(vertices_transformed);
model2.endReplaceModel(use_refit, refit_bottomup);
model2.setIdentityTransform();
}
node.vertices = model2.vertices;
node.tri_indices = model2.tri_indices;
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
return true;
}
/** \brief Initialize the traversal node for collision between one mesh and one shape, specialized for OBB type */
template<typename S>
bool initialize(MeshShapeCollisionTraversalNodeOBB<S>& node, const BVHModel<OBB>& model1, const S& model2, int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false)
{
if(model1.getModelType() != BVH_MODEL_TRIANGLES)
return false;
node.model1 = &model1;
node.model2 = &model2;
node.vertices = model1.vertices;
node.tri_indices = model1.tri_indices;
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
for(int i = 0; i < 3; ++i)
node.R[i] = model1.getRotation()[i];
node.T = model1.getTranslation();
return true;
}
/** \brief Initialize the traversal node for collision between one mesh and one shape, specialized for OBB type */
template<typename S>
bool initialize(ShapeMeshCollisionTraversalNodeOBB<S>& node, const S& model1, const BVHModel<OBB>& model2, int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false)
{
if(model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
node.model1 = &model1;
node.model2 = &model2;
node.vertices = model2.vertices;
node.tri_indices = model2.tri_indices;
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
for(int i = 0; i < 3; ++i)
node.R[i] = model2.getRotation()[i];
node.T = model2.getTranslation();
return true;
}
/** \brief Initialize traversal node for collision between two meshes, given the current transforms */
template<typename BV>
bool initialize(MeshCollisionTraversalNode<BV>& node, BVHModel<BV>& model1, BVHModel<BV>& model2,
int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false,
bool use_refit = false, bool refit_bottomup = false)
{
if(model1.getModelType() != BVH_MODEL_TRIANGLES || model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
if(!model1.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed1(model1.num_vertices);
for(int i = 0; i < model1.num_vertices; ++i)
{
Vec3f& p = model1.vertices[i];
Vec3f new_v = matMulVec(model1.getRotation(), p) + model1.getTranslation();
vertices_transformed1[i] = new_v;
}
model1.beginReplaceModel();
model1.replaceSubModel(vertices_transformed1);
model1.endReplaceModel(use_refit, refit_bottomup);
model1.setIdentityTransform();
}
if(!model2.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed2(model2.num_vertices);
for(int i = 0; i < model2.num_vertices; ++i)
{
Vec3f& p = model2.vertices[i];
Vec3f new_v = matMulVec(model2.getRotation(), p) + model2.getTranslation();
vertices_transformed2[i] = new_v;
}
model2.beginReplaceModel();
model2.replaceSubModel(vertices_transformed2);
model2.endReplaceModel(use_refit, refit_bottomup);
model2.setIdentityTransform();
}
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.tri_indices1 = model1.tri_indices;
node.tri_indices2 = model2.tri_indices;
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
return true;
}
/** \brief Initialize traversal node for collision between two meshes, specialized for OBB type */
bool initialize(MeshCollisionTraversalNodeOBB& node, const BVHModel<OBB>& model1, const BVHModel<OBB>& model2,
int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false);
bool initialize(MeshCollisionTraversalNodeRSS& node, const BVHModel<RSS>& model1, const BVHModel<RSS>& model2,
int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false);
#if USE_SVMLIGHT
/** \brief Initialize traversal node for collision between two point clouds, given current transforms */
template<typename BV, bool use_refit, bool refit_bottomup>
bool initialize(PointCloudCollisionTraversalNode<BV>& node, BVHModel<BV>& model1, BVHModel<BV>& model2,
BVH_REAL collision_prob_threshold = 0.5,
int leaf_size_threshold = 1,
int num_max_contacts = 1,
bool exhaustive = false,
bool enable_contact = false,
BVH_REAL expand_r = 1)
{
if(!(model1.getModelType() == BVH_MODEL_TRIANGLES || model1.getModelType() == BVH_MODEL_POINTCLOUD)
|| !(model2.getModelType() == BVH_MODEL_TRIANGLES || model2.getModelType() == BVH_MODEL_POINTCLOUD))
return false;
if(!model1.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed1(model1.num_vertices);
for(int i = 0; i < model1.num_vertices; ++i)
{
Vec3f& p = model1.vertices[i];
Vec3f new_v = matMulVec(model1.getRotation(), p) + model1.getTranslation();
vertices_transformed1[i] = new_v;
}
model1.beginReplaceModel();
model1.replaceSubModel(vertices_transformed1);
model1.endReplaceModel(use_refit, refit_bottomup);
model1.setIdentityTransform();
}
if(!model2.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed2(model2.num_vertices);
for(int i = 0; i < model2.num_vertices; ++i)
{
Vec3f& p = model2.vertices[i];
Vec3f new_v = matMulVec(model2.getRotation(), p) + model2.getTranslation();
vertices_transformed2[i] = new_v;
}
model2.beginReplaceModel();
model2.replaceSubModel(vertices_transformed2);
model2.endReplaceModel(use_refit, refit_bottomup);
model2.setIdentityTransform();
}
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.uc1.reset(new Uncertainty[model1.num_vertices]);
node.uc2.reset(new Uncertainty[model2.num_vertices]);
estimateSamplingUncertainty(model1.vertices, model1.num_vertices, node.uc1.get());
estimateSamplingUncertainty(model2.vertices, model2.num_vertices, node.uc2.get());
BVHExpand(model1, node.uc1.get(), expand_r);
BVHExpand(model2, node.uc2.get(), expand_r);
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
node.collision_prob_threshold = collision_prob_threshold;
node.leaf_size_threshold = leaf_size_threshold;
return true;
}
/** \brief Initialize traversal node for collision between two point clouds, given current transforms, specialized for OBB type */
bool initialize(PointCloudCollisionTraversalNodeOBB& node, BVHModel<OBB>& model1, BVHModel<OBB>& model2,
BVH_REAL collision_prob_threshold = 0.5,
int leaf_size_threshold = 1,
int num_max_contacts = 1,
bool exhaustive = false,
bool enable_contact = false,
BVH_REAL expand_r = 1);
/** \brief Initialize traversal node for collision between two point clouds, given current transforms, specialized for RSS type */
bool initialize(PointCloudCollisionTraversalNodeRSS& node, BVHModel<RSS>& model1, BVHModel<RSS>& model2,
BVH_REAL collision_prob_threshold = 0.5,
int leaf_size_threshold = 1,
int num_max_contacts = 1,
bool exhaustive = false,
bool enable_contact = false,
BVH_REAL expand_r = 1);
/** \brief Initialize traversal node for collision between one point cloud and one mesh, given current transforms */
template<typename BV, bool use_refit, bool refit_bottomup>
bool initialize(PointCloudMeshCollisionTraversalNode<BV>& node, BVHModel<BV>& model1, BVHModel<BV>& model2,
BVH_REAL collision_prob_threshold = 0.5,
int leaf_size_threshold = 1,
int num_max_contacts = 1,
bool exhaustive = false,
bool enable_contact = false,
BVH_REAL expand_r = 1)
{
if(!(model1.getModelType() == BVH_MODEL_TRIANGLES || model1.getModelType() == BVH_MODEL_POINTCLOUD) || model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
if(!model1.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed1(model1.num_vertices);
for(int i = 0; i < model1.num_vertices; ++i)
{
Vec3f& p = model1.vertices[i];
Vec3f new_v = matMulVec(model1.getRotation(), p) + model1.getTranslation();
vertices_transformed1[i] = new_v;
}
model1.beginReplaceModel();
model1.replaceSubModel(vertices_transformed1);
model1.endReplaceModel(use_refit, refit_bottomup);
model1.setIdentityTransform();
}
if(!model2.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed2(model2.num_vertices);
for(int i = 0; i < model2.num_vertices; ++i)
{
Vec3f& p = model2.vertices[i];
Vec3f new_v = matMulVec(model2.getRotation(), p) + model2.getTranslation();
vertices_transformed2[i] = new_v;
}
model2.beginReplaceModel();
model2.replaceSubModel(vertices_transformed2);
model2.endReplaceModel(use_refit, refit_bottomup);
model2.setIdentityTransform();
}
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.tri_indices2 = model2.tri_indices;
node.uc1.reset(new Uncertainty[model1.num_vertices]);
estimateSamplingUncertainty(model1.vertices, model1.num_vertices, node.uc1.get());
BVHExpand(model1, node.uc1.get(), expand_r);
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
node.collision_prob_threshold = collision_prob_threshold;
node.leaf_size_threshold = leaf_size_threshold;
return true;
}
/** \brief Initialize traversal node for collision between one point cloud and one mesh, given current transforms, specialized for OBB type */
bool initialize(PointCloudMeshCollisionTraversalNodeOBB& node, BVHModel<OBB>& model1, const BVHModel<OBB>& model2,
BVH_REAL collision_prob_threshold = 0.5,
int leaf_size_threshold = 1,
int num_max_contacts = 1,
bool exhaustive = false,
bool enable_contact = false,
BVH_REAL expand_r = 1);
/** \brief Initialize traversal node for collision between one point cloud and one mesh, given current transforms, specialized for RSS type */
bool initialize(PointCloudMeshCollisionTraversalNodeRSS& node, BVHModel<RSS>& model1, const BVHModel<RSS>& model2,
BVH_REAL collision_prob_threshold = 0.5,
int leaf_size_threshold = 1,
int num_max_contacts = 1,
bool exhaustive = false,
bool enable_contact = false,
BVH_REAL expand_r = 1);
#endif
/** \brief Initialize traversal node for distance computation between two meshes, given the current transforms */
template<typename BV>
bool initialize(MeshDistanceTraversalNode<BV>& node, BVHModel<BV>& model1, BVHModel<BV>& model2,
bool use_refit = false, bool refit_bottomup = false)
{
if(model1.getModelType() != BVH_MODEL_TRIANGLES || model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
if(!model1.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed1(model1.num_vertices);
for(int i = 0; i < model1.num_vertices; ++i)
{
Vec3f& p = model1.vertices[i];
Vec3f new_v = matMulVec(model1.getRotation(), p) + model1.getTranslation();
vertices_transformed1[i] = new_v;
}
model1.beginReplaceModel();
model1.replaceSubModel(vertices_transformed1);
model1.endReplaceModel(use_refit, refit_bottomup);
model1.setIdentityTransform();
}
if(!model2.isIdentityTransform())
{
std::vector<Vec3f> vertices_transformed2(model2.num_vertices);
for(int i = 0; i < model2.num_vertices; ++i)
{
Vec3f& p = model2.vertices[i];
Vec3f new_v = matMulVec(model2.getRotation(), p) + model2.getTranslation();
vertices_transformed2[i] = new_v;
}
model2.beginReplaceModel();
model2.replaceSubModel(vertices_transformed2);
model2.endReplaceModel(use_refit, refit_bottomup);
model2.setIdentityTransform();
}
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.tri_indices1 = model1.tri_indices;
node.tri_indices2 = model2.tri_indices;
return true;
}
/** \brief Initialize traversal node for distance computation between two meshes, given the current transforms */
bool initialize(MeshDistanceTraversalNodeRSS& node, const BVHModel<RSS>& model1, const BVHModel<RSS>& model2);
/** \brief Initialize traversal node for continuous collision detection between two meshes */
template<typename BV>
bool initialize(MeshContinuousCollisionTraversalNode<BV>& node, const BVHModel<BV>& model1, const BVHModel<BV>& model2, int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false)
{
if(model1.getModelType() != BVH_MODEL_TRIANGLES || model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.tri_indices1 = model1.tri_indices;
node.tri_indices2 = model2.tri_indices;
node.prev_vertices1 = model1.prev_vertices;
node.prev_vertices2 = model2.prev_vertices;
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
return true;
}
/** \brief Initialize traversal node for continuous collision detection between one mesh and one point cloud */
template<typename BV>
bool initialize(MeshPointCloudContinuousCollisionTraversalNode<BV>& node, const BVHModel<BV>& model1, const BVHModel<BV>& model2, int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false)
{
if(model1.getModelType() != BVH_MODEL_TRIANGLES || !(model2.getModelType() == BVH_MODEL_TRIANGLES || model2.getModelType() == BVH_MODEL_POINTCLOUD))
return false;
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.tri_indices1 = model1.tri_indices;
node.prev_vertices1 = model1.prev_vertices;
node.prev_vertices2 = model2.prev_vertices;
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
return true;
}
/** \brief Initialize traversal node for continuous collision detection between one point cloud and one mesh */
template<typename BV>
bool initialize(PointCloudMeshContinuousCollisionTraversalNode<BV>& node, const BVHModel<BV>& model1, const BVHModel<BV>& model2, int num_max_contacts = 1, bool exhaustive = false, bool enable_contact = false)
{
if(!(model1.getModelType() == BVH_MODEL_TRIANGLES || model1.getModelType() == BVH_MODEL_POINTCLOUD) || model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.tri_indices2 = model2.tri_indices;
node.prev_vertices1 = model1.prev_vertices;
node.prev_vertices2 = model2.prev_vertices;
node.num_max_contacts = num_max_contacts;
node.exhaustive = exhaustive;
node.enable_contact = enable_contact;
return true;
}
/** \brief Initialize traversal node for conservative advancement computation between two meshes, given the current transforms */
template<typename BV>
bool initialize(MeshConservativeAdvancementTraversalNode<BV>& node, BVHModel<BV>& model1, BVHModel<BV>& model2,
const Vec3f R1[3], const Vec3f& T1, const Vec3f R2[3], const Vec3f& T2, BVH_REAL w = 1,
bool use_refit = false, bool refit_bottomup = false)
{
if(model1.getModelType() != BVH_MODEL_TRIANGLES || model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
std::vector<Vec3f> vertices_transformed1(model1.num_vertices);
for(int i = 0; i < model1.num_vertices; ++i)
{
Vec3f& p = model1.vertices[i];
Vec3f new_v = matMulVec(R1, p) + T1;
vertices_transformed1[i] = new_v;
}
std::vector<Vec3f> vertices_transformed2(model2.num_vertices);
for(int i = 0; i < model2.num_vertices; ++i)
{
Vec3f& p = model2.vertices[i];
Vec3f new_v = matMulVec(R2, p) + T2;
vertices_transformed2[i] = new_v;
}
model1.beginReplaceModel();
model1.replaceSubModel(vertices_transformed1);
model1.endReplaceModel(use_refit, refit_bottomup);
model2.beginReplaceModel();
model2.replaceSubModel(vertices_transformed2);
model2.endReplaceModel(use_refit, refit_bottomup);
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.tri_indices1 = model1.tri_indices;
node.tri_indices2 = model2.tri_indices;
node.w = w;
return true;
}
/** \brief Initialize traversal node for conservative advancement computation between two meshes, given the current transforms, specialized for RSS */
inline bool initialize(MeshConservativeAdvancementTraversalNodeRSS& node, const BVHModel<RSS>& model1, const BVHModel<RSS>& model2,
const Vec3f R1[3], const Vec3f& T1, const Vec3f R2[3], const Vec3f& T2, BVH_REAL w = 1)
{
if(model1.getModelType() != BVH_MODEL_TRIANGLES || model2.getModelType() != BVH_MODEL_TRIANGLES)
return false;
node.model1 = &model1;
node.model2 = &model2;
node.vertices1 = model1.vertices;
node.vertices2 = model2.vertices;
node.tri_indices1 = model1.tri_indices;
node.tri_indices2 = model2.tri_indices;
node.w = w;
relativeTransform(R1, T1, R2, T2, node.R, node.T);
return true;
}
}
#endif

View File

@ -0,0 +1,228 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_TRANSFORM_H
#define FCL_TRANSFORM_H
#include "fcl/vec_3f.h"
namespace fcl
{
/** \brief Quaternion used locally by InterpMotion */
class SimpleQuaternion
{
public:
/** \brief Default quaternion is identity rotation */
SimpleQuaternion()
{
data[0] = 1;
data[1] = 0;
data[2] = 0;
data[3] = 0;
}
SimpleQuaternion(BVH_REAL a, BVH_REAL b, BVH_REAL c, BVH_REAL d)
{
data[0] = a; // w
data[1] = b; // x
data[2] = c; // y
data[3] = d; // z
}
/** \brief Matrix to quaternion */
void fromRotation(const Vec3f R[3]);
/** \brief Quaternion to matrix */
void toRotation(Vec3f R[3]) const;
/** \brief Axes to quaternion */
void fromAxes(const Vec3f axis[3]);
/** \brief Axes to matrix */
void toAxes(Vec3f axis[3]) const;
/** \brief Axis and angle to quaternion */
void fromAxisAngle(const Vec3f& axis, BVH_REAL angle);
/** \brief Quaternion to axis and angle */
void toAxisAngle(Vec3f& axis, BVH_REAL& angle) const;
/** \brief Dot product between quaternions */
BVH_REAL dot(const SimpleQuaternion& other) const;
/** \brief addition */
SimpleQuaternion operator + (const SimpleQuaternion& other) const;
/** \brief minus */
SimpleQuaternion operator - (const SimpleQuaternion& other) const;
/** \brief multiplication */
SimpleQuaternion operator * (const SimpleQuaternion& other) const;
/** \brief division */
SimpleQuaternion operator - () const;
/** \brief scalar multiplication */
SimpleQuaternion operator * (BVH_REAL t) const;
/** \brief conjugate */
SimpleQuaternion conj() const;
/** \brief inverse */
SimpleQuaternion inverse() const;
/** \brief rotate a vector */
Vec3f transform(const Vec3f& v) const;
inline const BVH_REAL& getW() const { return data[0]; }
inline const BVH_REAL& getX() const { return data[1]; }
inline const BVH_REAL& getY() const { return data[2]; }
inline const BVH_REAL& getZ() const { return data[3]; }
inline BVH_REAL& getW() { return data[0]; }
inline BVH_REAL& getX() { return data[1]; }
inline BVH_REAL& getY() { return data[2]; }
inline BVH_REAL& getZ() { return data[3]; }
private:
BVH_REAL data[4];
};
/** \brief Simple transform class used locally by InterpMotion */
class SimpleTransform
{
/** \brief Rotation matrix and translation vector */
Vec3f R[3];
Vec3f T;
/** \brief Quaternion representation for R */
SimpleQuaternion q;
public:
/** \brief Default transform is no movement */
SimpleTransform()
{
R[0][0] = 1; R[1][1] = 1; R[2][2] = 1;
}
SimpleTransform(const Vec3f R_[3], const Vec3f& T_)
{
for(int i = 0; i < 3; ++i)
R[i] = R_[i];
T = T_;
q.fromRotation(R_);
}
inline const Vec3f& getTranslation() const
{
return T;
}
inline const Vec3f* getRotation() const
{
return R;
}
inline const SimpleQuaternion& getQuatRotation() const
{
return q;
}
inline void setTransform(const Vec3f R_[3], const Vec3f& T_)
{
for(int i = 0; i < 3; ++i)
R[i] = R_[i];
T = T_;
q.fromRotation(R_);
}
inline void setTransform(const SimpleQuaternion& q_, const Vec3f& T_)
{
q = q_;
T = T_;
q.toRotation(R);
}
inline void setRotation(const Vec3f R_[3])
{
for(int i = 0; i < 3; ++i)
R[i] = R_[i];
q.fromRotation(R_);
}
inline void setTranslation(const Vec3f& T_)
{
T = T_;
}
inline void setQuatRotation(const SimpleQuaternion& q_)
{
q = q_;
q.toRotation(R);
}
Vec3f transform(const Vec3f& v) const
{
return q.transform(v) + T;
}
bool isIdentity() const
{
return (R[0][0] == 1) && (R[0][1] == 0) && (R[0][2] == 0) && (R[1][0] == 0) && (R[1][1] == 1) && (R[1][2] == 0) && (R[2][0] == 0) && (R[2][1] == 0) && (R[2][2] == 1)
&& (T[0] == 0) && (T[1] == 0) && (T[2] == 0);
}
void setIdentity()
{
R[0] = Vec3f(1, 0, 0);
R[1] = Vec3f(0, 1, 0);
R[2] = Vec3f(0, 0, 1);
T = Vec3f();
q = SimpleQuaternion();
}
};
}
#endif

View File

@ -0,0 +1,117 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_TRAVERSAL_NODE_BASE_H
#define FCL_TRAVERSAL_NODE_BASE_H
#include "fcl/vec_3f.h"
#include "fcl/primitive.h"
#include "fcl/BVH_front.h"
#include "fcl/BVH_model.h"
#include <vector>
/** \brief Main namespace */
namespace fcl
{
class TraversalNodeBase
{
public:
TraversalNodeBase() : enable_statistics(false) {}
virtual ~TraversalNodeBase();
/** \brief Whether b is a leaf node in the first BVH tree */
virtual bool isFirstNodeLeaf(int b) const;
/** \brief Whether b is a leaf node in the second BVH tree */
virtual bool isSecondNodeLeaf(int b) const;
/** \brief Traverse the subtree of the node in the first tree first */
virtual bool firstOverSecond(int b1, int b2) const;
/** \brief Get the left child of the node b in the first tree */
virtual int getFirstLeftChild(int b) const;
/** \brief Get the right child of the node b in the first tree */
virtual int getFirstRightChild(int b) const;
/** \brief Get the left child of the node b in the second tree */
virtual int getSecondLeftChild(int b) const;
/** \brief Get the right child of the node b in the second tree */
virtual int getSecondRightChild(int b) const;
void enableStatistics(bool enable) { enable_statistics = enable; }
bool enable_statistics;
};
class CollisionTraversalNodeBase : public TraversalNodeBase
{
public:
CollisionTraversalNodeBase() : TraversalNodeBase() {}
virtual ~CollisionTraversalNodeBase();
/** \brief BV test between b1 and b2 */
virtual bool BVTesting(int b1, int b2) const;
/** \brief Leaf test between node b1 and b2, if they are both leafs */
virtual void leafTesting(int b1, int b2) const;
/** \brief Check whether the traversal can stop */
virtual bool canStop() const;
};
class DistanceTraversalNodeBase : public TraversalNodeBase
{
public:
DistanceTraversalNodeBase() : TraversalNodeBase() {}
virtual ~DistanceTraversalNodeBase();
virtual BVH_REAL BVTesting(int b1, int b2) const;
virtual void leafTesting(int b1, int b2) const;
virtual bool canStop(BVH_REAL c) const;
};
}
#endif

View File

@ -0,0 +1,423 @@
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/** \author Jia Pan */
#ifndef FCL_TRAVERSAL_NODE_MESH_SHAPE_H
#define FCL_TRAVERSAL_NODE_MESH_SHAPE_H
#include "fcl/geometric_shapes.h"
#include "fcl/traversal_node_base.h"
#include "fcl/BVH_model.h"
#include "fcl/geometric_shapes_utility.h"
namespace fcl
{
template<typename BV, typename S>
class BVHShapeCollisionTraversalNode : public CollisionTraversalNodeBase
{
public:
BVHShapeCollisionTraversalNode()
{
model1 = NULL;
model2 = NULL;
num_bv_tests = 0;
num_leaf_tests = 0;
query_time_seconds = 0.0;
}
bool isFirstNodeLeaf(int b) const
{
return model1->getBV(b).isLeaf();
}
int getFirstLeftChild(int b) const
{
return model1->getBV(b).leftChild();
}
int getFirstRightChild(int b) const
{
return model1->getBV(b).rightChild();
}
bool BVTesting(int b1, int /*b2*/) const
{
if(this->enable_statistics) num_bv_tests++;
BV bv_shape;
computeBV(*model2, bv_shape);
return !model1->getBV(b1).bv.overlap(bv_shape);
}
const BVHModel<BV>* model1;
const S* model2;
mutable int num_bv_tests;
mutable int num_leaf_tests;
mutable BVH_REAL query_time_seconds;
};
template<typename S, typename BV>
class ShapeBVHCollisionTraversalNode : public CollisionTraversalNodeBase
{
public:
ShapeBVHCollisionTraversalNode()
{
model1 = NULL;
model2 = NULL;
num_bv_tests = 0;
num_leaf_tests = 0;
query_time_seconds = 0.0;
}
bool firstOverSecond(int, int) const
{
return false;
}
bool isSecondNodeLeaf(int b) const
{
return model2->getBV(b).isLeaf();
}
int getSecondLeftChild(int b) const
{
return model2->getBV(b).leftChild();
}
int getSecondRightChild(int b) const
{
return model2->getBV(b).rightChild();
}
bool BVTesting(int b1, int b2) const
{
if(this->enable_statistics) num_bv_tests++;
BV bv_shape;
computeBV(*model1, bv_shape);
return !model2->getBV(b2).bv.overlap(bv_shape);
}
const S* model1;
const BVHModel<BV>* model2;
mutable int num_bv_tests;
mutable int num_leaf_tests;
mutable BVH_REAL query_time_seconds;
};
/** \brief The indices of in-collision primitives of objects */
struct BVHShapeCollisionPair
{
BVHShapeCollisionPair() {}
BVHShapeCollisionPair(int id_) : id(id_) {}
BVHShapeCollisionPair(int id_, const Vec3f& n, const Vec3f& contactp, BVH_REAL depth) : id(id_),
normal(n), contact_point(contactp), penetration_depth(depth) {}
/** \brief The index of BVH's in-collision primitive */
int id;
/** \brief Contact normal */
Vec3f normal;
/** \brief Contact points */
Vec3f contact_point;
/** \brief Penetration depth for two triangles */
BVH_REAL penetration_depth;
};
struct BVHShapeCollisionPairComp
{
bool operator()(const BVHShapeCollisionPair& a, const BVHShapeCollisionPair& b)
{
return a.id < b.id;
}
};
template<typename BV, typename S>
class MeshShapeCollisionTraversalNode : public BVHShapeCollisionTraversalNode<BV, S>
{
public:
MeshShapeCollisionTraversalNode() : BVHShapeCollisionTraversalNode<BV, S>()
{
vertices = NULL;
tri_indices = NULL;
num_max_contacts = 1;
exhaustive = false;
enable_contact = false;
}
void leafTesting(int b1, int /*b2*/) const
{
if(this->enable_statistics) this->num_leaf_tests++;
const BVNode<BV>& node = this->model1->getBV(b1);
int primitive_id = node.primitiveId();
const Triangle& tri_id = tri_indices[primitive_id];
const Vec3f& p1 = vertices[tri_id[0]];
const Vec3f& p2 = vertices[tri_id[1]];
const Vec3f& p3 = vertices[tri_id[2]];
BVH_REAL penetration;
Vec3f normal;
Vec3f contactp;
if(!enable_contact) // only interested in collision or not
{
if(shapeTriangleIntersect(*(this->model2), p1, p2, p3))
{
pairs.push_back(BVHShapeCollisionPair(primitive_id));
}
}
else
{
if(shapeTriangleIntersect(*(this->model2), p1, p2, p3, &contactp, &penetration, &normal))
{
pairs.push_back(BVHShapeCollisionPair(primitive_id, normal, contactp, penetration));
}
}
}
bool canStop() const
{
return (pairs.size() > 0) && (!exhaustive) && (num_max_contacts <= (int)pairs.size());
}
Vec3f* vertices;
Triangle* tri_indices;
int num_max_contacts;
bool exhaustive;
bool enable_contact;
mutable std::vector<BVHShapeCollisionPair> pairs;
};
template<typename S>
class MeshShapeCollisionTraversalNodeOBB : public MeshShapeCollisionTraversalNode<OBB, S>
{
public:
MeshShapeCollisionTraversalNodeOBB() : MeshShapeCollisionTraversalNode<OBB, S>()
{
R[0] = Vec3f(1, 0, 0);
R[1] = Vec3f(0, 1, 0);
R[2] = Vec3f(0, 0, 1);
}
bool BVTesting(int b1, int /*b2*/) const
{
if(this->enable_statistics) this->num_bv_tests++;
OBB bv_shape;
computeBV(*this->model2, bv_shape);
return !overlap(R, T, bv_shape, this->model1->getBV(b1).bv);
}
void leafTesting(int b1, int /*b2*/) const
{
if(this->enable_statistics) this->num_leaf_tests++;
const BVNode<OBB>& node = this->model1->getBV(b1);
int primitive_id = node.primitiveId();
const Triangle& tri_id = this->tri_indices[primitive_id];
const Vec3f& p1 = this->vertices[tri_id[0]];
const Vec3f& p2 = this->vertices[tri_id[1]];
const Vec3f& p3 = this->vertices[tri_id[2]];
BVH_REAL penetration;
Vec3f normal;
Vec3f contactp;
if(!this->enable_contact) // only interested in collision or not
{
if(shapeTriangleIntersect(*(this->model2), p1, p2, p3, R, T))
{
this->pairs.push_back(BVHShapeCollisionPair(primitive_id));
}
}
else
{
if(shapeTriangleIntersect(*(this->model2), p1, p2, p3, R, T, &contactp, &penetration, &normal))
{
this->pairs.push_back(BVHShapeCollisionPair(primitive_id, normal, contactp, penetration));
}
}
}
// R, T is the transformation of bvh model
Vec3f R[3];
Vec3f T;
};
template<typename S, typename BV>
class ShapeMeshCollisionTraversalNode : public ShapeBVHCollisionTraversalNode<S, BV>
{
public:
ShapeMeshCollisionTraversalNode() : ShapeBVHCollisionTraversalNode<S, BV>()
{
vertices = NULL;
tri_indices = NULL;
num_max_contacts = 1;
exhaustive = false;
enable_contact = false;
}
void leafTesting(int b1, int b2) const
{
if(this->enable_statistics) this->num_leaf_tests++;
const BVNode<BV>& node = this->model2->getBV(b2);
int primitive_id = node.primitiveId();
const Triangle& tri_id = tri_indices[primitive_id];
const Vec3f& p1 = vertices[tri_id[0]];
const Vec3f& p2 = vertices[tri_id[1]];
const Vec3f& p3 = vertices[tri_id[2]];
BVH_REAL penetration;
Vec3f normal;
Vec3f contactp;
if(!enable_contact) // only interested in collision or not
{
if(shapeTriangleIntersect(*(this->model1), p1, p2, p3))
{
pairs.push_back(BVHShapeCollisionPair(primitive_id));
}
}
else
{
if(shapeTriangleIntersect(*(this->model1), p1, p2, p3, &contactp, &penetration, &normal))
{
pairs.push_back(BVHShapeCollisionPair(primitive_id, normal, contactp, penetration));
}
}
}
bool canStop() const
{
return (pairs.size() > 0) && (!exhaustive) && (num_max_contacts <= (int)pairs.size());
}
Vec3f* vertices;
Triangle* tri_indices;
int num_max_contacts;
bool exhaustive;
bool enable_contact;
mutable std::vector<BVHShapeCollisionPair> pairs;
};
template<typename S>
class ShapeMeshCollisionTraversalNodeOBB : public ShapeMeshCollisionTraversalNode<S, OBB>
{
public:
ShapeMeshCollisionTraversalNodeOBB() : ShapeMeshCollisionTraversalNode<S, OBB>()
{
R[0] = Vec3f(1, 0, 0);
R[1] = Vec3f(0, 1, 0);
R[2] = Vec3f(0, 0, 1);
}
bool BVTesting(int b1, int b2) const
{
if(this->enable_statistics) this->num_bv_tests++;
OBB bv_shape;
computeBV(*this->model1, bv_shape);
return !overlap(R, T, bv_shape, this->model2->getBV(b2).bv);
}
void leafTesting(int b1, int b2) const
{
if(this->enable_statistics) this->num_leaf_tests++;
const BVNode<OBB>& node = this->model2->getBV(b2);
int primitive_id = node.primitiveId();
const Triangle& tri_id = this->tri_indices[primitive_id];
const Vec3f& p1 = this->vertices[tri_id[0]];
const Vec3f& p2 = this->vertices[tri_id[1]];
const Vec3f& p3 = this->vertices[tri_id[2]];
BVH_REAL penetration;
Vec3f normal;
Vec3f contactp;
if(!this->enable_contact) // only interested in collision or not
{
if(shapeTriangleIntersect(*(this->model1), p1, p2, p3, R, T))
{
this->pairs.push_back(BVHShapeCollisionPair(primitive_id));
}
}
else
{
if(shapeTriangleIntersect(*(this->model1), p1, p2, p3, R, T, &contactp, &penetration, &normal))
{
this->pairs.push_back(BVHShapeCollisionPair(primitive_id, normal, contactp, penetration));
}
}
}
// R, T is the transformation of bvh model
Vec3f R[3];
Vec3f T;
};
}
#endif

Some files were not shown because too many files have changed in this diff Show More