diff --git a/.gitignore b/.gitignore index 53c1fb056bb..ea8f1b22cce 100644 --- a/.gitignore +++ b/.gitignore @@ -78,6 +78,7 @@ distribute/* python/caffe/proto/ cmake_build .cmake_build +nbproject/* # Generated documentation docs/_site @@ -93,3 +94,7 @@ LOCK LOG* CURRENT MANIFEST-* + +# Ignore MKL files (except prepare_mkl.sh) +/external/mkl/* +!/external/mkl/prepare_mkl.sh diff --git a/CMakeLists.txt b/CMakeLists.txt index da7142c9b3c..49d8ee81020 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,7 +27,10 @@ include(cmake/ConfigGen.cmake) # ---[ Options caffe_option(CPU_ONLY "Build Caffe without CUDA support" OFF) # TODO: rename to USE_CUDA +caffe_option(USE_OPENMP "Build Caffe with OpenMP support" ON ) caffe_option(USE_CUDNN "Build Caffe with cuDNN library support" ON IF NOT CPU_ONLY) +caffe_option(USE_MKL2017_AS_DEFAULT_ENGINE "Use MKL2017 primitives for supported layers" ON) +caffe_option(USE_MKLDNN_AS_DEFAULT_ENGINE "Use MKL-DNN primitives for supported layers" OFF) caffe_option(BUILD_SHARED_LIBS "Build shared libraries" ON) caffe_option(BUILD_python "Build Python wrapper" ON) set(python_version "2" CACHE STRING "Specify which Python version to use") @@ -38,13 +41,53 @@ caffe_option(USE_OPENCV "Build with OpenCV support" ON) caffe_option(USE_LEVELDB "Build with levelDB" ON) caffe_option(USE_LMDB "Build with lmdb" ON) caffe_option(ALLOW_LMDB_NOLOCK "Allow MDB_NOLOCK when reading LMDB files (only if necessary)" OFF) - +caffe_option(USE_SYSTEMTAP "Build for SystemTap" OFF) +caffe_option(PERFORMANCE_MONITORING "Build Caffe with PERFORMANCE_MONITORING " OFF) +#caffe_option(USE_GITHUB_MKLDNN "Download and use MKL-DNN available on github" OFF) + # ---[ Dependencies +include(cmake/MKLDNN.cmake) include(cmake/Dependencies.cmake) # ---[ Flags if(UNIX OR APPLE) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall") + + # Linker flags. + if( ${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU" OR ${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel") + # GCC specific flags. ICC is compatible with them. + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -z noexecstack -z relro -z now") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -z noexecstack -z relro -z now") + elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") + # In Clang, -z flags are not compatible, they need to be passed to linker via -Wl. + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now") + endif() + + # Compiler flags. + if( ${CMAKE_CXX_COMPILER_ID} STREQUAL "GNU") + # GCC specific flags. + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 4.9 OR CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL 4.9) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector-strong") + else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector") + endif() + elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Clang") + # Clang is compatbile with some of the flags. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIE -fstack-protector") + elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel" ) + # Same as above, with exception that ICC compilation crashes with -fPIE option, even + # though it uses -pie linker option that require -fPIE during compilation. Checksec + # shows that it generates correct PIE anyway if only -pie is provided. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstack-protector") + endif() + + # Generic flags. + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -fno-operator-names -Wformat -Wformat-security -Wall") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + # Dot not forward c++11 flag to GPU beucause it is not supported + set( CUDA_PROPAGATE_HOST_FLAGS OFF ) + set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -D_FORTIFY_SOURCE=2") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie") endif() caffe_set_caffe_link() @@ -65,16 +108,20 @@ configure_file(cmake/Templates/caffe_config.h.in "${PROJECT_BINARY_DIR}/caffe_co # ---[ Includes set(Caffe_INCLUDE_DIR ${PROJECT_SOURCE_DIR}/include) include_directories(${Caffe_INCLUDE_DIR} ${PROJECT_BINARY_DIR}) -include_directories(BEFORE src) # This is needed for gtest. +include_directories(BEFORE src/gtest/include src/gmock/include) # This is needed for gtest. # ---[ Subdirectories -add_subdirectory(src/gtest) +#add_subdirectory(src/gtest) +set(BUILD_SHARED_LIBS off) +add_subdirectory(src/gmock) +set(BUILD_SHARED_LIBS on) add_subdirectory(src/caffe) add_subdirectory(tools) add_subdirectory(examples) add_subdirectory(python) add_subdirectory(matlab) add_subdirectory(docs) +add_subdirectory(scripts/SystemTap) # ---[ Linter target add_custom_target(lint COMMAND ${CMAKE_COMMAND} -P ${PROJECT_SOURCE_DIR}/cmake/lint.cmake) diff --git a/LICENSE b/LICENSE index d69d16f5bc7..3316218f5ca 100644 --- a/LICENSE +++ b/LICENSE @@ -1,5 +1,14 @@ + COPYRIGHT +All modification made by Intel Corporation: © 2017 Intel Corporation. + +All new contributions compared to the original branch: +Copyright (c) 2015, 2016 Wei Liu (UNC Chapel Hill), Dragomir Anguelov (Zoox), +Dumitru Erhan (Google), Christian Szegedy (Google), Scott Reed (UMich Ann Arbor), +Cheng-Yang Fu (UNC Chapel Hill), Alexander C. Berg (UNC Chapel Hill). +All rights reserved. + All contributions by the University of California: Copyright (c) 2014, 2015, The Regents of the University of California (Regents) All rights reserved. diff --git a/Makefile b/Makefile index 403e00a38a1..fd6e78bc808 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,41 @@ -PROJECT := caffe +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +PROJECT := caffe CONFIG_FILE := Makefile.config # Explicitly check for the config file, otherwise make -k will proceed anyway. ifeq ($(wildcard $(CONFIG_FILE)),) @@ -24,6 +60,33 @@ else OTHER_BUILD_DIR := $(DEBUG_BUILD_DIR) endif + +#################### MLSL #################### + +ifeq ($(USE_MLSL), 1) + RETURN_STRING=$(shell ./external/mlsl/prepare_mlsl.sh) + MLSL_ROOT=$(firstword $(RETURN_STRING)) + MLSL_LDFLAGS=$(lastword $(RETURN_STRING)) + COMMON_FLAGS += -DUSE_MLSL=1 + LIBRARIES += mlsl + INCLUDE_DIRS += $(MLSL_ROOT)/intel64/include + LIBRARY_DIRS += $(MLSL_ROOT)/intel64/lib + +ifeq ($(CAFFE_PER_LAYER_TIMINGS), 1) + COMMON_FLAGS += -DCAFFE_PER_LAYER_TIMINGS +endif + +ifeq ($(CAFFE_MLSL_SHUFFLE), 1) + COMMON_FLAGS += -DCAFFE_MLSL_SHUFFLE +endif + +ifeq ($(FW_OVERLAP_OPT), 1) + COMMON_FLAGS += -DFW_OVERLAP_OPT +endif +endif +#################### MLSL #################### + + # All of the directories containing code. SRC_DIRS := $(shell find * -type d -exec bash -c "find {} -maxdepth 1 \ \( -name '*.cpp' -o -name '*.proto' \) | grep -q ." \; -print) @@ -53,7 +116,7 @@ TEST_MAIN_SRC := src/$(PROJECT)/test/test_caffe_main.cpp TEST_SRCS := $(shell find src/$(PROJECT) -name "test_*.cpp") TEST_SRCS := $(filter-out $(TEST_MAIN_SRC), $(TEST_SRCS)) TEST_CU_SRCS := $(shell find src/$(PROJECT) -name "test_*.cu") -GTEST_SRC := src/gtest/gtest-all.cpp +GTEST_SRCS := $(shell find src/gtest src/gmock -name "*all*.cpp") # TOOL_SRCS are the source files for the tool binaries TOOL_SRCS := $(shell find tools -name "*.cpp") # EXAMPLE_SRCS are the source files for the example binaries @@ -122,7 +185,7 @@ TEST_CU_BUILD_DIR := $(BUILD_DIR)/cuda/src/$(PROJECT)/test TEST_CXX_OBJS := $(addprefix $(BUILD_DIR)/, ${TEST_SRCS:.cpp=.o}) TEST_CU_OBJS := $(addprefix $(BUILD_DIR)/cuda/, ${TEST_CU_SRCS:.cu=.o}) TEST_OBJS := $(TEST_CXX_OBJS) $(TEST_CU_OBJS) -GTEST_OBJ := $(addprefix $(BUILD_DIR)/, ${GTEST_SRC:.cpp=.o}) +GTEST_OBJS := $(addprefix $(BUILD_DIR)/, ${GTEST_SRCS:.cpp=.o}) EXAMPLE_OBJS := $(addprefix $(BUILD_DIR)/, ${EXAMPLE_SRCS:.cpp=.o}) # Output files for automatic dependency generation DEPS := ${CXX_OBJS:.o=.d} ${CU_OBJS:.o=.d} ${TEST_CXX_OBJS:.o=.d} \ @@ -172,13 +235,16 @@ endif CUDA_LIB_DIR += $(CUDA_DIR)/lib INCLUDE_DIRS += $(BUILD_INCLUDE_DIR) ./src ./include +INCLUDE_DIRS += ./src/gtest/include ./src/gtest/ +INCLUDE_DIRS += ./src/gmock/include ./src/gmock/ ifneq ($(CPU_ONLY), 1) INCLUDE_DIRS += $(CUDA_INCLUDE_DIR) LIBRARY_DIRS += $(CUDA_LIB_DIR) LIBRARIES := cudart cublas curand endif -LIBRARIES += glog gflags protobuf boost_system boost_filesystem m hdf5_hl hdf5 +LIBRARIES += glog gflags protobuf m hdf5_hl hdf5 +BOOST_LIBRARIES += boost_system boost_filesystem boost_regex # handle IO dependencies USE_LEVELDB ?= 1 @@ -195,9 +261,9 @@ ifeq ($(USE_OPENCV), 1) LIBRARIES += opencv_core opencv_highgui opencv_imgproc ifeq ($(OPENCV_VERSION), 3) - LIBRARIES += opencv_imgcodecs + LIBRARIES += opencv_imgcodecs opencv_videoio endif - + endif PYTHON_LIBRARIES ?= boost_python python2.7 WARNINGS := -Wall -Wno-sign-compare @@ -304,11 +370,53 @@ ifdef CUSTOM_CXX CXX := $(CUSTOM_CXX) endif +# Compiler flags +ifneq (,$(findstring icpc,$(CXX))) + CXX_HARDENING_FLAGS += -fstack-protector +else ifneq (,$(findstring clang++,$(CXX))) + CXX_HARDENING_FLAGS += -fPIE -fstack-protector +else ifneq (,$(findstring g++,$(CXX))) + ifeq ($(shell echo | awk '{exit $(GCCVERSION) >= 4.9;}'), 1) + CXX_HARDENING_FLAGS += -fPIE -fstack-protector-strong + else + CXX_HARDENING_FLAGS += -fPIE -fstack-protector + endif +endif + +# Linker flags +ifneq (,$(findstring clang++,$(CXX))) + # In Clang, -z flags are not compatible, they need to be passed to linker via -Wl. + LINKER_SHARED_HARDENING_FLAGS += -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now + LINKER_EXEC_HARDENING_FLAGS += -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now +else + # GCC specific flags. ICC is compatible with them. + LINKER_SHARED_HARDENING_FLAGS += -z noexecstack -z relro -z now + LINKER_EXEC_HARDENING_FLAGS += -z noexecstack -z relro -z now +endif + +# Generic flags +CXX_HARDENING_FLAGS += -fPIC -fno-operator-names -Wformat -Wformat-security -Wall +LINKER_EXEC_HARDENING_FLAGS += -pie + +# Release-only flag +ifneq ($(DEBUG), 1) + CXX_HARDENING_FLAGS += -D_FORTIFY_SOURCE=2 +endif + +# Disable unused-local-typedefs warning for g++ +ifneq (,$(findstring g++,$(CXX))) + CXXFLAGS += -Wno-unused-local-typedefs +endif + # Static linking ifneq (,$(findstring clang++,$(CXX))) STATIC_LINK_COMMAND := -Wl,-force_load $(STATIC_NAME) else ifneq (,$(findstring g++,$(CXX))) STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else ifneq (,$(findstring mpi,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive +else ifneq (,$(findstring icpc,$(CXX))) + STATIC_LINK_COMMAND := -Wl,--whole-archive $(STATIC_NAME) -Wl,--no-whole-archive else # The following line must not be indented with a tab, since we are not inside a target $(error Cannot static link with the $(CXX) compiler) @@ -318,8 +426,10 @@ endif ifeq ($(DEBUG), 1) COMMON_FLAGS += -DDEBUG -g -O0 NVCCFLAGS += -G +else ifneq (,$(findstring icpc,$(CXX))) + COMMON_FLAGS += -DNDEBUG -O3 -xCORE-AVX2 -no-prec-div -fp-model fast=2 else - COMMON_FLAGS += -DNDEBUG -O2 + COMMON_FLAGS += -DNDEBUG -O3 endif # cuDNN acceleration configuration. @@ -358,21 +468,55 @@ ifeq ($(WITH_PYTHON_LAYER), 1) LIBRARIES += $(PYTHON_LIBRARIES) endif -# BLAS configuration (default = ATLAS) -BLAS ?= atlas +# Performance monitoring +ifeq ($(PERFORMANCE_MONITORING), 1) + CXXFLAGS += -DPERFORMANCE_MONITORING +endif + +include Makefile.mkldnn +ifeq ($(USE_MKLDNN_AS_DEFAULT_ENGINE), 1) + CXXFLAGS += -DUSE_MKLDNN_AS_DEFAULT_ENGINE +endif + +# BOOST configuration +# detect support for custom boost version +BOOST_LDFLAGS += $(foreach boost_lib,$(BOOST_LIBRARIES),-l$(boost_lib)) +ifneq ($(origin BOOST_ROOT), undefined) + INCLUDE_DIRS += $(BOOST_ROOT) + BOOST_LDFLAGS+=-L$(BOOST_ROOT)/stage/lib -Wl,-rpath,$(BOOST_ROOT)/stage/lib +endif + +# BLAS configuration (default = MKL) +MKL_LDFLAGS= +MKL_EXTERNAL := 0 +BLAS ?= mkl ifeq ($(BLAS), mkl) # MKL - LIBRARIES += mkl_rt + RETURN_STRING=$(shell ./external/mkl/prepare_mkl.sh) + MKLROOT=$(firstword $(RETURN_STRING)) + MKL_LDFLAGS=-l$(word 2, $(RETURN_STRING)) + MKL_EXTERNAL=$(lastword $(RETURN_STRING)) +ifeq ($(MKL_EXTERNAL), 1) + MKL_LDFLAGS+=-Wl,-rpath,$(MKLROOT)/lib +endif + COMMON_FLAGS += -DUSE_MKL - MKLROOT ?= /opt/intel/mkl BLAS_INCLUDE ?= $(MKLROOT)/include BLAS_LIB ?= $(MKLROOT)/lib $(MKLROOT)/lib/intel64 + + # detect support for mkl2017 primitives + ifneq ("$(wildcard $(BLAS_INCLUDE)/mkl_dnn.h)","") + CXXFLAGS += -DMKL2017_SUPPORTED + ifeq ($(USE_MKL2017_AS_DEFAULT_ENGINE), 1) + CXXFLAGS += -DUSE_MKL2017_AS_DEFAULT_ENGINE + endif + endif else ifeq ($(BLAS), open) # OpenBLAS LIBRARIES += openblas else # ATLAS - ifeq ($(LINUX), 1) + ifeq ($(LINUX), 0) ifeq ($(BLAS), atlas) # Linux simply has cblas and atlas LIBRARIES += cblas atlas @@ -395,6 +539,9 @@ endif INCLUDE_DIRS += $(BLAS_INCLUDE) LIBRARY_DIRS += $(BLAS_LIB) +INCLUDE_DIRS += $(MKLDNN_INCLUDE) +LIBRARY_DIRS += $(MKLDNN_LIB) + LIBRARY_DIRS += $(LIB_BUILD_DIR) # Automatic dependency generation (nvcc is handled separately) @@ -402,7 +549,7 @@ CXXFLAGS += -MMD -MP # Complete build flags. COMMON_FLAGS += $(foreach includedir,$(INCLUDE_DIRS),-I$(includedir)) -CXXFLAGS += -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) +CXXFLAGS += -std=c++11 -pthread -fPIC $(COMMON_FLAGS) $(WARNINGS) NVCCFLAGS += -ccbin=$(CXX) -Xcompiler -fPIC $(COMMON_FLAGS) # mex may invoke an older gcc that is too liberal with -Wuninitalized MATLAB_CXXFLAGS := $(CXXFLAGS) -Wno-uninitialized @@ -415,7 +562,8 @@ else PKG_CONFIG := endif LDFLAGS += $(foreach librarydir,$(LIBRARY_DIRS),-L$(librarydir)) $(PKG_CONFIG) \ - $(foreach library,$(LIBRARIES),-l$(library)) + $(foreach library,$(LIBRARIES),-l$(library)) -Wl,--as-needed + PYTHON_LDFLAGS := $(LDFLAGS) $(foreach library,$(PYTHON_LIBRARIES),-l$(library)) # 'superclean' target recursively* deletes all files ending with an extension @@ -441,8 +589,42 @@ endif ############################## .PHONY: all lib test clean docs linecount lint lintclean tools examples $(DIST_ALIASES) \ py mat py$(PROJECT) mat$(PROJECT) proto runtest \ - superclean supercleanlist supercleanfiles warn everything + superclean supercleanlist supercleanfiles warn everything mkldnn mkldnn_clean + +.DEFAULT_GOAL := all + +# Following section detects if compiler supports OpenMP and updated compilation/linking flags accordingly +# if no openmp is supported in compiler then openmp compiler flags are not to be updated +# TODO: FIX for ICC? +USE_OPENMP ?= 1 +ifeq ($(USE_OPENMP), 1) + DUMMY_OPENMP_BINARY := $(shell mktemp) + DUMMY_OPENMP_FILE := $(shell mktemp).cpp + ifeq ($(MKL_EXTERNAL), 1) + INTEL_OMP_DIR ?= $(shell find ${MKLROOT} -readable -name libiomp5.so 2>/dev/null | grep -v mic | xargs dirname) + endif + INTEL_OMP_DIR ?= $(shell find ${MKLROOT}/.. -readable -name libiomp5.so 2>/dev/null | grep -v mic | grep -m 1 intel64 | xargs dirname) + define OPENMP_VERIFYING_CODE + "#include \n int main() { \n #ifdef _OPENMP \n return 0; \n #else \n break_if_openmp_not_supported \n #endif \n }" + endef + ifeq ($(BLAS), mkl) + OPENMP_VERIFYING_COMPILE_FLAGS = -Wl,--as-needed -liomp5 -L$(INTEL_OMP_DIR) + endif + OPENMP_VERIFYING_COMPILE_COMMAND = $(CXX) -fopenmp $(DUMMY_OPENMP_FILE) $(OPENMP_VERIFYING_COMPILE_FLAGS) -o $(DUMMY_OPENMP_BINARY) 2>/dev/null + OPENMP_VERIFYING_COMMAND = printf $(OPENMP_VERIFYING_CODE) > $(DUMMY_OPENMP_FILE) && $(OPENMP_VERIFYING_COMPILE_COMMAND) && echo 1 || echo 0 + IS_OPENMP_PRESENT = $(shell $(OPENMP_VERIFYING_COMMAND)) + + ifeq ($(IS_OPENMP_PRESENT), 1) + CXXFLAGS += -fopenmp + LINKFLAGS += -fopenmp + ifeq ($(BLAS), mkl) + LIBRARIES += iomp5 + LIBRARY_DIRS += $(INTEL_OMP_DIR) + endif + endif +endif +# set_env should be at the end all: lib tools examples lib: $(STATIC_NAME) $(DYNAMIC_NAME) @@ -484,6 +666,7 @@ $(LINT_OUTPUTS): $(LINT_OUTPUT_DIR)/%.lint.txt : % $(LINT_SCRIPT) | $(LINT_OUTPU > $@ \ || true + test: $(TEST_ALL_BIN) $(TEST_ALL_DYNLINK_BIN) $(TEST_BINS) tools: $(TOOL_BINS) $(TOOL_BIN_LINKS) @@ -496,8 +679,8 @@ py: $(PY$(PROJECT)_SO) $(PROTO_GEN_PY) $(PY$(PROJECT)_SO): $(PY$(PROJECT)_SRC) $(PY$(PROJECT)_HXX) | $(DYNAMIC_NAME) @ echo CXX/LD -o $@ $< - $(Q)$(CXX) -shared -o $@ $(PY$(PROJECT)_SRC) \ - -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(PYTHON_LDFLAGS) \ + $(Q)$(CXX) -std=c++11 -shared -o $@ $(PY$(PROJECT)_SRC) \ + -o $@ $(LINKFLAGS) $(CXX_HARDENING_FLAGS) $(LINKER_SHARED_HARDENING_FLAGS) -l$(LIBRARY_NAME) $(PYTHON_LDFLAGS) \ -Wl,-rpath,$(ORIGIN)/../../build/lib mat$(PROJECT): mat @@ -561,23 +744,23 @@ $(ALL_BUILD_DIRS): | $(BUILD_DIR_LINK) $(DYNAMIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) @ echo LD -o $@ - $(Q)$(CXX) -shared -o $@ $(OBJS) $(VERSIONFLAGS) $(LINKFLAGS) $(LDFLAGS) + $(Q)$(CXX) -shared -o $@ $(OBJS) $(VERSIONFLAGS) $(BOOST_LDFLAGS) $(LINKFLAGS) $(MKL_LDFLAGS) $(MKLDNN_LDFLAGS) $(CXX_HARDENING_FLAGS) $(LINKER_SHARED_HARDENING_FLAGS) $(LDFLAGS) @ cd $(BUILD_DIR)/lib; rm -f $(DYNAMIC_NAME_SHORT); ln -s $(DYNAMIC_VERSIONED_NAME_SHORT) $(DYNAMIC_NAME_SHORT) $(STATIC_NAME): $(OBJS) | $(LIB_BUILD_DIR) @ echo AR -o $@ $(Q)ar rcs $@ $(OBJS) -$(BUILD_DIR)/%.o: %.cpp | $(ALL_BUILD_DIRS) +$(BUILD_DIR)/%.o: %.cpp | mkldnn $(ALL_BUILD_DIRS) @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + $(Q)$(CXX) $< $(CXX_HARDENING_FLAGS) $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) $(PROTO_BUILD_DIR)/%.pb.o: $(PROTO_BUILD_DIR)/%.pb.cc $(PROTO_GEN_HEADER) \ | $(PROTO_BUILD_DIR) @ echo CXX $< - $(Q)$(CXX) $< $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ + $(Q)$(CXX) $< $(CXX_HARDENING_FLAGS) $(CXXFLAGS) -c -o $@ 2> $@.$(WARNS_EXT) \ || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) @@ -589,23 +772,23 @@ $(BUILD_DIR)/cuda/%.o: %.cu | $(ALL_BUILD_DIRS) || (cat $@.$(WARNS_EXT); exit 1) @ cat $@.$(WARNS_EXT) -$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ +$(TEST_ALL_BIN): $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJS) \ | $(DYNAMIC_NAME) $(TEST_BIN_DIR) @ echo CXX/LD -o $@ $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib + $(Q)$(CXX) -std=c++11 $(TEST_MAIN_SRC) $(TEST_OBJS) $(GTEST_OBJS) \ + -o $@ $(BOOST_LDFLAGS) $(LINKFLAGS) $(MKL_LDFLAGS) $(MKLDNN_LDFLAGS) $(CXX_HARDENING_FLAGS) $(LINKER_EXEC_HARDENING_FLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib $(TEST_CU_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CU_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + $(GTEST_OBJS) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib + $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJS) \ + -o $@ $(LINKFLAGS) $(CXX_HARDENING_FLAGS) $(LINKER_EXEC_HARDENING_FLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib $(TEST_CXX_BINS): $(TEST_BIN_DIR)/%.testbin: $(TEST_CXX_BUILD_DIR)/%.o \ - $(GTEST_OBJ) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) + $(GTEST_OBJS) | $(DYNAMIC_NAME) $(TEST_BIN_DIR) @ echo LD $< - $(Q)$(CXX) $(TEST_MAIN_SRC) $< $(GTEST_OBJ) \ - -o $@ $(LINKFLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib + $(Q)$(CXX) -std=c++11 $(TEST_MAIN_SRC) $< $(GTEST_OBJS) \ + -o $@ $(BOOST_LDFLAGS) $(LINKFLAGS) $(MKL_LDFLAGS) $(MKLDNN_LDFLAGS) $(CXX_HARDENING_FLAGS) $(LINKER_EXEC_HARDENING_FLAGS) $(LDFLAGS) -l$(LIBRARY_NAME) -Wl,-rpath,$(ORIGIN)/../lib # Target for extension-less symlinks to tool binaries with extension '*.bin'. $(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) @@ -614,12 +797,12 @@ $(TOOL_BUILD_DIR)/%: $(TOOL_BUILD_DIR)/%.bin | $(TOOL_BUILD_DIR) $(TOOL_BINS): %.bin : %.o | $(DYNAMIC_NAME) @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \ + $(Q)$(CXX) $< -o $@ $(BOOST_LDFLAGS) $(LINKFLAGS) $(MKL_LDFLAGS) $(MKLDNN_LDFLAGS) $(CXX_HARDENING_FLAGS) $(LINKER_EXEC_HARDENING_FLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \ -Wl,-rpath,$(ORIGIN)/../lib $(EXAMPLE_BINS): %.bin : %.o | $(DYNAMIC_NAME) @ echo CXX/LD -o $@ - $(Q)$(CXX) $< -o $@ $(LINKFLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \ + $(Q)$(CXX) $< -o $@ $(BOOST_LDFLAGS) $(LINKFLAGS) $(MKL_LDFLAGS) $(MKLDNN_LDFLAGS) $(CXX_HARDENING_FLAGS) $(LINKER_EXEC_HARDENING_FLAGS) -l$(LIBRARY_NAME) $(LDFLAGS) \ -Wl,-rpath,$(ORIGIN)/../../lib proto: $(PROTO_GEN_CC) $(PROTO_GEN_HEADER) @@ -637,7 +820,7 @@ $(PY_PROTO_BUILD_DIR)/%_pb2.py : $(PROTO_SRC_DIR)/%.proto \ $(PY_PROTO_INIT): | $(PY_PROTO_BUILD_DIR) touch $(PY_PROTO_INIT) -clean: +clean: mkldnn_clean @- $(RM) -rf $(ALL_BUILD_DIRS) @- $(RM) -rf $(OTHER_BUILD_DIR) @- $(RM) -rf $(BUILD_DIR_LINK) diff --git a/Makefile.config.example b/Makefile.config.example index 07bed63ae40..8bfcc57a3b7 100644 --- a/Makefile.config.example +++ b/Makefile.config.example @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ## Refer to http://caffe.berkeleyvision.org/installation.html # Contributions simplifying and improving our build system are welcome! @@ -5,7 +41,19 @@ # USE_CUDNN := 1 # CPU-only switch (uncomment to build without GPU support). -# CPU_ONLY := 1 +CPU_ONLY := 1 + +USE_MKL2017_AS_DEFAULT_ENGINE := 1 +# or put this at the top your train_val.protoxt or solver.prototxt file: +# engine: "MKL2017" +# or use this option with caffe tool: +# -engine "MKL2017" + +# USE_MKLDNN_AS_DEFAULT_ENGINE flag is OBSOLETE +# Put this at the top your train_val.protoxt or solver.prototxt file: +# engine: "MKLDNN" +# or use this option with caffe tool: +# -engine "MKLDNN" # uncomment to disable IO dependencies and corresponding data layers # USE_OPENCV := 0 @@ -24,6 +72,14 @@ # N.B. the default for Linux is g++ and the default for OSX is clang++ # CUSTOM_CXX := g++ +# If you use Intel compiler define a path to newer boost if not used +# already. +# BOOST_ROOT := + +# Intel(r) Machine Learning Scaling Library (uncomment to build +# with MLSL for multi-node training) +# USE_MLSL :=1 + # CUDA directory contains bin/ and lib/ directories that we need. CUDA_DIR := /usr/local/cuda # On Ubuntu 14.04, if cuda tools are installed via @@ -33,17 +89,17 @@ CUDA_DIR := /usr/local/cuda # CUDA architecture setting: going with all of them. # For CUDA < 6.0, comment the *_50 lines for compatibility. CUDA_ARCH := -gencode arch=compute_20,code=sm_20 \ - -gencode arch=compute_20,code=sm_21 \ - -gencode arch=compute_30,code=sm_30 \ - -gencode arch=compute_35,code=sm_35 \ - -gencode arch=compute_50,code=sm_50 \ - -gencode arch=compute_50,code=compute_50 + -gencode arch=compute_20,code=sm_21 \ + -gencode arch=compute_30,code=sm_30 \ + -gencode arch=compute_35,code=sm_35 \ + -gencode arch=compute_50,code=sm_50 \ + -gencode arch=compute_50,code=compute_50 # BLAS choice: # atlas for ATLAS (default) # mkl for MKL # open for OpenBlas -BLAS := atlas +BLAS := mkl # Custom (MKL/ATLAS/OpenBLAS) include and lib directories. # Leave commented to accept the defaults for your choice of BLAS # (which should work)! @@ -102,9 +158,15 @@ LIBRARY_DIRS := $(PYTHON_LIB) /usr/local/lib /usr/lib BUILD_DIR := build DISTRIBUTE_DIR := distribute +# Uncomment to enable training performance monitoring +# PERFORMANCE_MONITORING := 1 + # Uncomment for debugging. Does not work on OSX due to https://github.com/BVLC/caffe/issues/171 # DEBUG := 1 +# Uncomment to disable OpenMP support. +# USE_OPENMP := 0 + # The ID of the GPU that 'make runtest' will use to run unit tests. TEST_GPUID := 0 diff --git a/Makefile.mkldnn b/Makefile.mkldnn new file mode 100644 index 00000000000..ec1a70bc505 --- /dev/null +++ b/Makefile.mkldnn @@ -0,0 +1,64 @@ +CAFFE_ROOTDIR := $(shell pwd) +MKLDNN_ROOTDIR := external/mkldnn +MKLDNN_TMPDIR := $(MKLDNN_ROOTDIR)/tmp +MKLDNN_SRCDIR := $(MKLDNN_ROOTDIR)/src +MKLDNN_BUILDDIR := $(MKLDNN_ROOTDIR)/build +MKLDNN_INSTALLDIR := $(MKLDNN_ROOTDIR)/install +MKLDNN_COMMIT := `cat ${CAFFE_ROOTDIR}/mkldnn.commit` +MKLDNN_CXX := $(CXX) +MKLDNN_CC := $(CC) + +RETURN_STRING=$(shell ./external/mkl/prepare_mkl.sh) +MKLROOT=$(firstword $(RETURN_STRING)) +MKL_ROOTDIR := $(MKLROOT) + +# We do this because earlier versions of CMake have problems with ccache +ifneq (,$(findstring ccache,$(CXX))) + MKLDNN_CXX := $(lastword $(CXX)) +endif + +ifneq (,$(findstring ccache,$(CC))) + MKLDNN_CC := $(lastword $(CC)) +endif + +MKLDNN_GITHUB := https://github.com/01org/mkl-dnn.git +MKLDNN_CMAKE_FLAGS += $(MKLDNN_SRCDIR) -DCMAKE_INSTALL_PREFIX=$(CAFFE_ROOTDIR)/$(MKLDNN_INSTALLDIR) -DMKLROOT=${MKL_ROOTDIR} -B$(CAFFE_ROOTDIR)/$(MKLDNN_BUILDDIR) -DCMAKE_CXX_COMPILER="$(MKLDNN_CXX)" -DCMAKE_C_COMPILER="$(MKLDNN_CC)" + +ifeq ("$(wildcard $(MKLDNN_INSTALLDIR)/include/mkldnn.hpp)", "") +mkldnn_download: + git clone --no-checkout $(MKLDNN_GITHUB) $(MKLDNN_TMPDIR) + rsync -a $(MKLDNN_TMPDIR)/ $(MKLDNN_SRCDIR) && rm -rf $(MKLDNN_TMPDIR) + cd $(MKLDNN_SRCDIR) && git reset --hard $(MKLDNN_COMMIT) + +mkldnn_build: mkldnn_download + cmake $(MKLDNN_CMAKE_FLAGS) + make -C $(CAFFE_ROOTDIR)/$(MKLDNN_BUILDDIR) -j$(shell cat /proc/cpuinfo |grep 'processor'|wc -l) + make -C $(CAFFE_ROOTDIR)/$(MKLDNN_BUILDDIR) install +else +mkldnn_download: +mkldnn_build: +endif + +mkldnn_clean: + @rm -rf $(MKLDNN_SRCDIR) $(MKLDNN_BUILDDIR) $(MKLDNN_INSTALLDIR) $(MKLDNN_TMPDIR) + +mkldnnroot_set: + $(eval CXXFLAGS += -DMKLDNN_SUPPORTED) \ + $(eval INCLUDE_DIRS += $(MKLDNNROOT)/include) + $(eval MKLDNN_LDFLAGS += -lmkldnn -L$(MKLDNNROOT)/lib) \ + $(eval MKLDNN_LDFLAGS += -Wl,-rpath,$(MKLDNNROOT)/lib) \ + + +mkldnnroot_notset: mkldnn_build + $(eval CXXFLAGS += -DMKLDNN_SUPPORTED) \ + $(eval INCLUDE_DIRS += $(MKLDNN_INSTALLDIR)/include) \ + $(eval MKLDNN_LDFLAGS += -lmkldnn) \ + $(eval MKLDNN_LDFLAGS += -L$(MKLDNN_INSTALLDIR)/lib -Wl,-rpath,${MKLDNN_INSTALLDIR}/lib) + +ifneq ($(origin MKLDNNROOT), undefined) +ifdef MKLDNNROOT +mkldnn: mkldnnroot_set +endif +else +mkldnn: mkldnnroot_notset +endif diff --git a/README.md b/README.md index 44b9e62c157..dbc117df6ef 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ # Caffe - [![Build Status](https://travis-ci.org/BVLC/caffe.svg?branch=master)](https://travis-ci.org/BVLC/caffe) [![License](https://img.shields.io/badge/license-BSD-blue.svg)](LICENSE) @@ -7,7 +6,6 @@ Caffe is a deep learning framework made with expression, speed, and modularity i It is developed by the Berkeley Vision and Learning Center ([BVLC](http://bvlc.eecs.berkeley.edu)) and community contributors. Check out the [project site](http://caffe.berkeleyvision.org) for all the details like - - [DIY Deep Learning for Vision with Caffe](https://docs.google.com/presentation/d/1UeKXVgRvvxg9OUdh_UiC5G71UMscNPlvArsWER41PsU/edit#slide=id.p) - [Tutorial Documentation](http://caffe.berkeleyvision.org/tutorial/) - [BVLC reference models](http://caffe.berkeleyvision.org/model_zoo.html) and the [community model zoo](https://github.com/BVLC/caffe/wiki/Model-Zoo) @@ -22,8 +20,43 @@ Framework development discussions and thorough bug reports are collected on [Iss Happy brewing! -## License and Citation +# SSD: Single Shot MultiBox Detector +This repository contains merged code issued as pull request to BVLC caffe written by: +[Wei Liu](http://www.cs.unc.edu/~wliu/), [Dragomir Anguelov](https://www.linkedin.com/in/dragomiranguelov), [Dumitru Erhan](http://research.google.com/pubs/DumitruErhan.html), [Christian Szegedy](http://research.google.com/pubs/ChristianSzegedy.html), [Scott Reed](http://www-personal.umich.edu/~reedscot/), [Cheng-Yang Fu](http://www.cs.unc.edu/~cyfu/), [Alexander C. Berg](http://acberg.com). + +Original branch can be found at https://github.com/weiliu89/caffe/tree/ssd. + +Read our [wiki page](https://github.com/intel/caffe/wiki/SSD:-Single-Shot-MultiBox-Detector) for more details. + +# Intel® Distribution of Caffe* +This fork is dedicated to improving Caffe performance when running on CPU, in particular Intel® Xeon processors (HSW, BDW, Xeon Phi) + +## Building +Build procedure is the same as on bvlc-caffe-master branch. Both Make and CMake can be used. +When OpenMP is available will be used automatically. + +## Running +Run procedure is the same as on bvlc-caffe-master branch. + +Current implementation uses OpenMP threads. By default the number of OpenMP threads is set +to the number of CPU cores. Each one thread is bound to a single core to achieve best +performance results. It is however possible to use own configuration by providing right +one through OpenMP environmental variables like OMP_NUM_THREADS or GOMP_CPU_AFFINITY. + +If some system tool like numactl is used to control CPU affinity, by default caffe will prevent +to use more than one thread per core. When less than required cores are specified, caffe will +limit execution of OpenMP threads to specified cores only. + +## Best performance solution +Please read [our Wiki](https://github.com/intel/caffe/wiki/Recommendations-to-achieve-best-performance) for our recommendations and configuration to achieve best performance on Intel CPUs. + +## Multinode Training +Intel® Distribution of Caffe* multi-node allows you to execute deep neural network training on multiple machines. + +To understand how it works and read some tutorials, go to our Wiki. Start from [Multinode guide](https://github.com/intel/caffe/wiki/Multinode-guide). + +## License and Citation Caffe is released under the [BSD 2-Clause license](https://github.com/BVLC/caffe/blob/master/LICENSE). The BVLC reference models are released for unrestricted use. @@ -35,3 +68,9 @@ Please cite Caffe in your publications if it helps your research: Title = {Caffe: Convolutional Architecture for Fast Feature Embedding}, Year = {2014} } + +*** + *Other names and brands may be claimed as the property of others + + + diff --git a/cmake/ConfigGen.cmake b/cmake/ConfigGen.cmake index 056371110b5..5bfda8c1a1a 100644 --- a/cmake/ConfigGen.cmake +++ b/cmake/ConfigGen.cmake @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ################################################################################################ # Helper function to fetch caffe includes which will be passed to dependent projects diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake index c7b6a17aa69..67adf4ba701 100644 --- a/cmake/Dependencies.cmake +++ b/cmake/Dependencies.cmake @@ -2,7 +2,7 @@ set(Caffe_LINKER_LIBS "") # ---[ Boost -find_package(Boost 1.46 REQUIRED COMPONENTS system thread filesystem) +find_package(Boost 1.46 REQUIRED COMPONENTS system thread filesystem regex) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) list(APPEND Caffe_LINKER_LIBS ${Boost_LIBRARIES}) @@ -10,6 +10,21 @@ list(APPEND Caffe_LINKER_LIBS ${Boost_LIBRARIES}) find_package(Threads REQUIRED) list(APPEND Caffe_LINKER_LIBS ${CMAKE_THREAD_LIBS_INIT}) +# ---[ OpenMP +if(USE_OPENMP) + find_package(OpenMP) + if(OPENMP_FOUND) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}") + else() + set(USE_OPENMP "OFF") # compiler is not supporting OpenMP then do not use it + endif() +endif() + +# ---[ PERFORMANCE_MONITORING +if(PERFORMANCE_MONITORING) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DPERFORMANCE_MONITORING") +endif() + # ---[ Google-glog include("cmake/External/glog.cmake") include_directories(SYSTEM ${GLOG_INCLUDE_DIRS}) @@ -26,7 +41,7 @@ include(cmake/ProtoBuf.cmake) # ---[ HDF5 find_package(HDF5 COMPONENTS HL REQUIRED) include_directories(SYSTEM ${HDF5_INCLUDE_DIRS} ${HDF5_HL_INCLUDE_DIR}) -list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES}) +list(APPEND Caffe_LINKER_LIBS ${HDF5_LIBRARIES} ${HDF5_HL_LIBRARIES}) # ---[ LMDB if(USE_LMDB) @@ -69,7 +84,7 @@ endif() # ---[ OpenCV if(USE_OPENCV) - find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs) + find_package(OpenCV QUIET COMPONENTS core highgui imgproc imgcodecs videoio) if(NOT OpenCV_FOUND) # if not OpenCV 3.x, then imgcodecs are not found find_package(OpenCV REQUIRED COMPONENTS core highgui imgproc) endif() @@ -79,9 +94,44 @@ if(USE_OPENCV) add_definitions(-DUSE_OPENCV) endif() +# ---[ MLSL +if(USE_MLSL) + #--find mlsl in external/mkl + set(script_cmd "./external/mlsl/prepare_mlsl.sh" ) + execute_process(COMMAND ${script_cmd} + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + RESULT_VARIABLE script_result + OUTPUT_VARIABLE RETURN_STRING) + separate_arguments(RETURN_STRING) + list(GET RETURN_STRING 0 MLSL_ROOT_DIR) + list(GET RETURN_STRING 1 MLSL_LIBRARIES) + set(MLSL_ROOT "${MLSL_ROOT_DIR}") + #set(MLSL_ROOT "$ENV{MLSL_ROOT}") + if(NOT MLSL_ROOT) + message(FATAL_ERROR "Unable to find MLSL package installation directory!") + endif() + message(STATUS "Machine Learning Scaling Library (MLSL) found (${MLSL_ROOT}/intel64)") + add_definitions("-DUSE_MLSL=1") + include_directories(SYSTEM "${MLSL_ROOT}/intel64/include") + link_directories(SYSTEM "${MLSL_ROOT}/intel64/lib") + list(APPEND Caffe_LINKER_LIBS mlsl) + + if(CAFFE_PER_LAYER_TIMINGS) + add_definitions("-DCAFFE_PER_LAYER_TIMINGS") + endif() + if(CAFFE_MLSL_SHUFFLE) + add_definitions("-DCAFFE_MLSL_SHUFFLE") + endif() + if(FW_OVERLAP_OPT) + message(STATUS "Forward overlapping optimization is enabled!") + add_definitions("-DFW_OVERLAP_OPT") + endif() +endif() + # ---[ BLAS +set(MKL_EXTERNAL "0") if(NOT APPLE) - set(BLAS "Atlas" CACHE STRING "Selected BLAS library") + set(BLAS "MKL" CACHE STRING "Selected BLAS library") set_property(CACHE BLAS PROPERTY STRINGS "Atlas;Open;MKL") if(BLAS STREQUAL "Atlas" OR BLAS STREQUAL "atlas") @@ -93,15 +143,99 @@ if(NOT APPLE) include_directories(SYSTEM ${OpenBLAS_INCLUDE_DIR}) list(APPEND Caffe_LINKER_LIBS ${OpenBLAS_LIB}) elseif(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") - find_package(MKL REQUIRED) - include_directories(SYSTEM ${MKL_INCLUDE_DIR}) + #--find mkl in external/mkl + set(script_cmd "./external/mkl/prepare_mkl.sh" ) + execute_process(COMMAND ${script_cmd} + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + RESULT_VARIABLE script_result + OUTPUT_VARIABLE RETURN_STRING) + separate_arguments(RETURN_STRING) + list(GET RETURN_STRING 0 MKL_ROOT_DIR) + list(GET RETURN_STRING 1 MKL_LIBRARIES) + list(GET RETURN_STRING 2 MKL_EXTERNAL) + set(MKL_INCLUDE_DIR "${MKL_ROOT_DIR}/include/") + if( ${MKL_EXTERNAL} EQUAL 1 ) + set(MKL_LIBRARIES "${MKL_ROOT_DIR}/lib/lib${MKL_LIBRARIES}.so") + else() + set(MKL_LIBRARIES "${MKL_ROOT_DIR}/lib/intel64/lib${MKL_LIBRARIES}.so") + endif() + message(STATUS "Found MKL: ${MKL_INCLUDE_DIR}") + message(STATUS "Found MKL (include: ${MKL_INCLUDE_DIR}, lib: ${MKL_LIBRARIES}") + include_directories(SYSTEM ${MKL_INCLUDE_DIR}) list(APPEND Caffe_LINKER_LIBS ${MKL_LIBRARIES}) add_definitions(-DUSE_MKL) + # If MKL and OpenMP is to be used then use Intel OpenMP + if(OPENMP_FOUND) + list(APPEND Caffe_LINKER_LIBS -Wl,--as-needed iomp5) + endif() endif() elseif(APPLE) find_package(vecLib REQUIRED) include_directories(SYSTEM ${vecLib_INCLUDE_DIR}) list(APPEND Caffe_LINKER_LIBS ${vecLib_LINKER_LIBS}) + + if(VECLIB_FOUND) + if(NOT vecLib_INCLUDE_DIR MATCHES "^/System/Library/Frameworks/vecLib.framework.*") + add_definitions(-DUSE_ACCELERATE) + endif() + endif() +endif() + +# ---[ MKL2017 +if(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") + if(EXISTS ${MKL_INCLUDE_DIR}/mkl_dnn.h) + message(STATUS "Found MKL2017") + set(MKL2017_SUPPORTED ON) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMKL2017_SUPPORTED") + if(USE_MKL2017_AS_DEFAULT_ENGINE) + message(STATUS "MKL2017 engine will be used as a default engine") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_MKL2017_AS_DEFAULT_ENGINE") + endif() + else() + message(STATUS "MKL2017 not found") + set(MKL2017_SUPPORTED OFF) + if(USE_MKL2017_AS_DEFAULT_ENGINE) + message(WARNING "Flag USE_MKL2017_AS_DEFAULT_ENGINE was set, but MKL2017 not found") + endif() + endif() +endif() + +# ---[ MKLDNN +if(DEFINED ENV{MKLDNNROOT}) + set(MKLDNNROOT_DIR $ENV{MKLDNNROOT}) + if(NOT ${MKLDNNROOT_DIR} STREQUAL "") + set(MKLDNNROOT_INCLUDE_DIR "${MKLDNNROOT_DIR}/include/") + if(EXISTS ${MKLDNNROOT_INCLUDE_DIR}/mkldnn.hpp) + message(STATUS "Found MKLDNN: ${MKLDNNROOT_DIR}") + set(MKLDNN_SUPPORTED ON) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMKLDNN_SUPPORTED -std=c++11") + if(USE_MKLDNN_AS_DEFAULT_ENGINE) + message(STATUS "MKLDNN engine will be used as a default engine") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_MKLDNN_AS_DEFAULT_ENGINE") + endif() + list(APPEND Caffe_LINKER_LIBS "${MKLDNNROOT_DIR}/lib/libmkldnn.so") + include_directories(SYSTEM ${MKLDNNROOT_INCLUDE_DIR}) + else() + message(STATUS "MKLDNN not found. MKLDNN_INCLUDE_DIR = ${MKLDNNROOT_INCLUDE_DIR}") + set(MKLDNN_SUPPORTED OFF) + if(USE_MKLDNN_AS_DEFAULT_ENGINE) + message(WARNING "Flag USE_MKLDNN_AS_DEFAULT_ENGINE was set, but MKLDNN not found") + endif() + endif() + endif() +else() + Download_MKLDNN() + + message(STATUS "MKLDNN will be downloaded from github and installed in ${MKLDNN_INSTALL_DIR}") + message(STATUS "MKLDNN include directory: ${MKLDNN_INCLUDE_DIR}") + set(MKLDNN_SUPPORTED ON) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMKLDNN_SUPPORTED -std=c++11") + if(USE_MKLDNN_AS_DEFAULT_ENGINE) + message(STATUS "MKLDNN engine will be used as a default engine") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_MKLDNN_AS_DEFAULT_ENGINE") + endif() + list(APPEND Caffe_LINKER_LIBS "${MKLDNN_INSTALL_DIR}/lib/libmkldnn.so") + include_directories(${MKLDNN_INCLUDE_DIR}) endif() # ---[ Python diff --git a/cmake/MKLDNN.cmake b/cmake/MKLDNN.cmake new file mode 100644 index 00000000000..43c51f7eebe --- /dev/null +++ b/cmake/MKLDNN.cmake @@ -0,0 +1,41 @@ + +function(Download_MKLDNN) + set(EXTERNAL_DIR ${CMAKE_SOURCE_DIR}/external) + set(MKLDNN_DIR ${EXTERNAL_DIR}/mkldnn) + set(MKLDNN_SOURCE_DIR ${MKLDNN_DIR}/src) + set(MKLDNN_BUILD_DIR ${MKLDNN_DIR}/build) + set(MKLDNN_INSTALL_DIR ${MKLDNN_DIR}/install CACHE PATH "Installation path of MKLDNN") + execute_process(COMMAND cat mkldnn.commit + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} + OUTPUT_VARIABLE MKLDNN_COMMIT) + + include(ProcessorCount) + ProcessorCount(NCORE) + if(NOT NCORE EQUAL 0) + set(CTEST_BUILD_FLAGS -j${NCORE}) + set(ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${NCORE}) + endif() + + ExternalProject_add(MKLDNN_Build + SOURCE_DIR ${MKLDNN_SOURCE_DIR} + CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR} -DMKLROOT=${MKL_ROOT_DIR} +#--Download step + GIT_REPOSITORY https://github.com/01org/mkl-dnn.git + GIT_TAG ${MKLDNN_COMMIT} +#--Build step + BINARY_DIR ${MKLDNN_BUILD_DIR} + BUILD_COMMAND cmake ${MKLDNN_SOURCE_DIR} +#--Install step + INSTALL_DIR ${MKLDNN_INSTALL_DIR} + INSTALL_COMMAND make install -j${NCORE} + LOG_CONFIGURE 1 + LOG_BUILD 1 + LOG_INSTALL 1 + ) + + set(MKLDNN_INCLUDE_DIR ${MKLDNN_INSTALL_DIR}/include CACHE PATH "Include files for MKLDNN") + set(MKLDNN_LIB_DIR ${MKLDNN_INSTALL_DIR}/lib) + add_library(mkldnn SHARED IMPORTED ${MKLDNN_INSTALL_DIR}) + set_property(TARGET mkldnn PROPERTY IMPORTED_LOCATION ${MKLDNN_LIB_DIR}/libmkldnn.so) + add_dependencies(mkldnn MKLDNN_Build) +endfunction(Download_MKLDNN) diff --git a/cmake/Misc.cmake b/cmake/Misc.cmake index 9dd2609b36a..090f92da565 100644 --- a/cmake/Misc.cmake +++ b/cmake/Misc.cmake @@ -11,6 +11,10 @@ if("${CMAKE_BUILD_TYPE}" STREQUAL "") set(CMAKE_BUILD_TYPE Release) endif() +if("${DETERMINISTIC}" STREQUAL "ON") + add_definitions(-DDETERMINISTIC) +endif() + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") set(CMAKE_COMPILER_IS_CLANGXX TRUE) endif() diff --git a/cmake/Modules/FindAtlas.cmake b/cmake/Modules/FindAtlas.cmake index 6e1564351c7..9c665a47bd5 100644 --- a/cmake/Modules/FindAtlas.cmake +++ b/cmake/Modules/FindAtlas.cmake @@ -26,9 +26,9 @@ set(Atlas_LIB_SEARCH_PATHS find_path(Atlas_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) find_path(Atlas_CLAPACK_INCLUDE_DIR NAMES clapack.h PATHS ${Atlas_INCLUDE_SEARCH_PATHS}) -find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) -find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) -find_library(Atlas_LAPACK_LIBRARY NAMES alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) +find_library(Atlas_CBLAS_LIBRARY NAMES ptcblas_r ptcblas cblas_r cblas PATHS ${Atlas_LIB_SEARCH_PATHS}) +find_library(Atlas_BLAS_LIBRARY NAMES atlas_r atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) +find_library(Atlas_LAPACK_LIBRARY NAMES lapack alapack_r alapack lapack_atlas PATHS ${Atlas_LIB_SEARCH_PATHS}) set(LOOKED_FOR Atlas_CBLAS_INCLUDE_DIR diff --git a/cmake/Modules/FindMKL.cmake b/cmake/Modules/FindMKL.cmake deleted file mode 100644 index 5ab93b2d6b6..00000000000 --- a/cmake/Modules/FindMKL.cmake +++ /dev/null @@ -1,110 +0,0 @@ -# Find the MKL libraries -# -# Options: -# -# MKL_USE_SINGLE_DYNAMIC_LIBRARY : use single dynamic library interface -# MKL_USE_STATIC_LIBS : use static libraries -# MKL_MULTI_THREADED : use multi-threading -# -# This module defines the following variables: -# -# MKL_FOUND : True mkl is found -# MKL_INCLUDE_DIR : unclude directory -# MKL_LIBRARIES : the libraries to link against. - - -# ---[ Options -caffe_option(MKL_USE_SINGLE_DYNAMIC_LIBRARY "Use single dynamic library interface" ON) -caffe_option(MKL_USE_STATIC_LIBS "Use static libraries" OFF IF NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY) -caffe_option(MKL_MULTI_THREADED "Use multi-threading" ON IF NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY) - -# ---[ Root folders -set(INTEL_ROOT "/opt/intel" CACHE PATH "Folder contains intel libs") -find_path(MKL_ROOT include/mkl.h PATHS $ENV{MKLROOT} ${INTEL_ROOT}/mkl - DOC "Folder contains MKL") - -# ---[ Find include dir -find_path(MKL_INCLUDE_DIR mkl.h PATHS ${MKL_ROOT} PATH_SUFFIXES include) -set(__looked_for MKL_INCLUDE_DIR) - -# ---[ Find libraries -if(CMAKE_SIZEOF_VOID_P EQUAL 4) - set(__path_suffixes lib lib/ia32) -else() - set(__path_suffixes lib lib/intel64) -endif() - -set(__mkl_libs "") -if(MKL_USE_SINGLE_DYNAMIC_LIBRARY) - list(APPEND __mkl_libs rt) -else() - if(CMAKE_SIZEOF_VOID_P EQUAL 4) - if(WIN32) - list(APPEND __mkl_libs intel_c) - else() - list(APPEND __mkl_libs intel gf) - endif() - else() - list(APPEND __mkl_libs intel_lp64 gf_lp64) - endif() - - if(MKL_MULTI_THREADED) - list(APPEND __mkl_libs intel_thread) - else() - list(APPEND __mkl_libs sequential) - endif() - - list(APPEND __mkl_libs core cdft_core) -endif() - - -foreach (__lib ${__mkl_libs}) - set(__mkl_lib "mkl_${__lib}") - string(TOUPPER ${__mkl_lib} __mkl_lib_upper) - - if(MKL_USE_STATIC_LIBS) - set(__mkl_lib "lib${__mkl_lib}.a") - endif() - - find_library(${__mkl_lib_upper}_LIBRARY - NAMES ${__mkl_lib} - PATHS ${MKL_ROOT} "${MKL_INCLUDE_DIR}/.." - PATH_SUFFIXES ${__path_suffixes} - DOC "The path to Intel(R) MKL ${__mkl_lib} library") - mark_as_advanced(${__mkl_lib_upper}_LIBRARY) - - list(APPEND __looked_for ${__mkl_lib_upper}_LIBRARY) - list(APPEND MKL_LIBRARIES ${${__mkl_lib_upper}_LIBRARY}) -endforeach() - - -if(NOT MKL_USE_SINGLE_DYNAMIC_LIBRARY) - if (MKL_USE_STATIC_LIBS) - set(__iomp5_libs iomp5 libiomp5mt.lib) - else() - set(__iomp5_libs iomp5 libiomp5md.lib) - endif() - - if(WIN32) - find_path(INTEL_INCLUDE_DIR omp.h PATHS ${INTEL_ROOT} PATH_SUFFIXES include) - list(APPEND __looked_for INTEL_INCLUDE_DIR) - endif() - - find_library(MKL_RTL_LIBRARY ${__iomp5_libs} - PATHS ${INTEL_RTL_ROOT} ${INTEL_ROOT}/compiler ${MKL_ROOT}/.. ${MKL_ROOT}/../compiler - PATH_SUFFIXES ${__path_suffixes} - DOC "Path to Path to OpenMP runtime library") - - list(APPEND __looked_for MKL_RTL_LIBRARY) - list(APPEND MKL_LIBRARIES ${MKL_RTL_LIBRARY}) -endif() - - -include(FindPackageHandleStandardArgs) -find_package_handle_standard_args(MKL DEFAULT_MSG ${__looked_for}) - -if(MKL_FOUND) - message(STATUS "Found MKL (include: ${MKL_INCLUDE_DIR}, lib: ${MKL_LIBRARIES}") -endif() - -caffe_clear_vars(__looked_for __mkl_libs __path_suffixes __lib_suffix __iomp5_libs) diff --git a/cmake/Summary.cmake b/cmake/Summary.cmake index ba025cf81e0..ec25efac285 100644 --- a/cmake/Summary.cmake +++ b/cmake/Summary.cmake @@ -101,26 +101,33 @@ function(caffe_print_configuration_summary) caffe_status("") caffe_status("******************* Caffe Configuration Summary *******************") caffe_status("General:") - caffe_status(" Version : ${CAFFE_TARGET_VERSION}") - caffe_status(" Git : ${Caffe_GIT_VERSION}") - caffe_status(" System : ${CMAKE_SYSTEM_NAME}") - caffe_status(" C++ compiler : ${CMAKE_CXX_COMPILER}") - caffe_status(" Release CXX flags : ${__flags_rel}") - caffe_status(" Debug CXX flags : ${__flags_deb}") - caffe_status(" Build type : ${CMAKE_BUILD_TYPE}") + caffe_status(" Version : ${CAFFE_TARGET_VERSION}") + caffe_status(" Git : ${Caffe_GIT_VERSION}") + caffe_status(" System : ${CMAKE_SYSTEM_NAME}") + caffe_status(" C++ compiler : ${CMAKE_CXX_COMPILER}") + caffe_status(" Release CXX flags : ${__flags_rel}") + caffe_status(" Debug CXX flags : ${__flags_deb}") + caffe_status(" Build type : ${CMAKE_BUILD_TYPE}") caffe_status("") - caffe_status(" BUILD_SHARED_LIBS : ${BUILD_SHARED_LIBS}") - caffe_status(" BUILD_python : ${BUILD_python}") - caffe_status(" BUILD_matlab : ${BUILD_matlab}") - caffe_status(" BUILD_docs : ${BUILD_docs}") - caffe_status(" CPU_ONLY : ${CPU_ONLY}") - caffe_status(" USE_OPENCV : ${USE_OPENCV}") - caffe_status(" USE_LEVELDB : ${USE_LEVELDB}") - caffe_status(" USE_LMDB : ${USE_LMDB}") - caffe_status(" ALLOW_LMDB_NOLOCK : ${ALLOW_LMDB_NOLOCK}") + caffe_status(" BUILD_SHARED_LIBS : ${BUILD_SHARED_LIBS}") + caffe_status(" BUILD_python : ${BUILD_python}") + caffe_status(" BUILD_matlab : ${BUILD_matlab}") + caffe_status(" BUILD_docs : ${BUILD_docs}") + caffe_status(" CPU_ONLY : ${CPU_ONLY}") + caffe_status(" USE_OPENMP : ${USE_OPENMP}") + caffe_status(" USE_OPENCV : ${USE_OPENCV}") + caffe_status(" USE_LEVELDB : ${USE_LEVELDB}") + caffe_status(" USE_LMDB : ${USE_LMDB}") + caffe_status(" ALLOW_LMDB_NOLOCK : ${ALLOW_LMDB_NOLOCK}") + caffe_status(" USE_SYSTEMTAP : ${USE_SYSTEMTAP}") + caffe_status(" PERFORMANCE_MONITORING : ${PERFORMANCE_MONITORING}") caffe_status("") caffe_status("Dependencies:") caffe_status(" BLAS : " APPLE THEN "Yes (vecLib)" ELSE "Yes (${BLAS})") + if(BLAS STREQUAL "MKL" OR BLAS STREQUAL "mkl") + caffe_status(" MKL2017_SUPPORTED : " MKL2017_SUPPORTED AND USE_MKL2017_AS_DEFAULT_ENGINE THEN "ON, is a default engine" ELSE " ${MKL2017_SUPPORTED}") + endif() + caffe_status(" MKLDNN_SUPPORTED : " MKLDNN_SUPPORTED AND USE_MKLDNN_AS_DEFAULT_ENGINE THEN "ON, is a default engine" ELSE " ${MKLDNN_SUPPORTED}") caffe_status(" Boost : Yes (ver. ${Boost_MAJOR_VERSION}.${Boost_MINOR_VERSION})") caffe_status(" glog : Yes") caffe_status(" gflags : Yes") diff --git a/cmake/Targets.cmake b/cmake/Targets.cmake index a796d00548f..1d2df2690c2 100644 --- a/cmake/Targets.cmake +++ b/cmake/Targets.cmake @@ -52,15 +52,16 @@ endfunction() # caffe_pickup_caffe_sources() function(caffe_pickup_caffe_sources root) # put all files in source groups (visible as subfolder in many IDEs) - caffe_source_group("Include" GLOB "${root}/include/caffe/*.h*") - caffe_source_group("Include\\Util" GLOB "${root}/include/caffe/util/*.h*") - caffe_source_group("Include" GLOB "${PROJECT_BINARY_DIR}/caffe_config.h*") - caffe_source_group("Source" GLOB "${root}/src/caffe/*.cpp") - caffe_source_group("Source\\Util" GLOB "${root}/src/caffe/util/*.cpp") - caffe_source_group("Source\\Layers" GLOB "${root}/src/caffe/layers/*.cpp") - caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/layers/*.cu") - caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/util/*.cu") - caffe_source_group("Source\\Proto" GLOB "${root}/src/caffe/proto/*.proto") + caffe_source_group("Include" GLOB "${root}/include/caffe/*.h*") + caffe_source_group("Include\\Util" GLOB "${root}/include/caffe/util/*.h*") + caffe_source_group("Include\\Layers" GLOB "${root}/include/caffe/layers/*.h*") + caffe_source_group("Include" GLOB "${PROJECT_BINARY_DIR}/caffe_config.h*") + caffe_source_group("Source" GLOB "${root}/src/caffe/*.cpp") + caffe_source_group("Source\\Util" GLOB "${root}/src/caffe/util/*.cpp") + caffe_source_group("Source\\Layers" GLOB "${root}/src/caffe/layers/*.cpp") + caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/layers/*.cu") + caffe_source_group("Source\\Cuda" GLOB "${root}/src/caffe/util/*.cu") + caffe_source_group("Source\\Proto" GLOB "${root}/src/caffe/proto/*.proto") # source groups for test target caffe_source_group("Include" GLOB "${root}/include/caffe/test/test_*.h*") diff --git a/data/VOC0712/coco_voc_map.txt b/data/VOC0712/coco_voc_map.txt new file mode 100644 index 00000000000..7ff84d19b61 --- /dev/null +++ b/data/VOC0712/coco_voc_map.txt @@ -0,0 +1,21 @@ +0,0,background +5,1,aeroplane +2,2,bicycle +15,3,bird +9,4,boat +40,5,bottle +6,6,bus +3,7,car +16,8,cat +57,9,chair +20,10,cow +61,11,diningtable +17,12,dog +18,13,horse +4,14,motorbike +1,15,person +59,16,pottedplant +19,17,sheep +58,18,sofa +7,19,train +63,20,tvmonitor diff --git a/data/VOC0712/create_data.sh b/data/VOC0712/create_data.sh new file mode 100755 index 00000000000..41037e8972a --- /dev/null +++ b/data/VOC0712/create_data.sh @@ -0,0 +1,25 @@ +cur_dir=$(cd $( dirname ${BASH_SOURCE[0]} ) && pwd ) +root_dir=$cur_dir/../.. + +cd $root_dir + +redo=1 +data_root_dir="$DATAPATH/data/VOCdevkit" +dataset_name="VOC0712" +mapfile="$root_dir/data/$dataset_name/labelmap_voc.prototxt" +anno_type="detection" +db="lmdb" +min_dim=0 +max_dim=0 +width=0 +height=0 + +extra_cmd="--encode-type=jpg --encoded" +if [ $redo ] +then + extra_cmd="$extra_cmd --redo" +fi +for subset in test trainval +do + python $root_dir/scripts/create_annoset.py --anno-type=$anno_type --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$subset.txt $data_root_dir/$dataset_name/$db/$dataset_name"_"$subset"_"$db examples/$dataset_name +done diff --git a/data/VOC0712/create_list.sh b/data/VOC0712/create_list.sh new file mode 100755 index 00000000000..fce802e6397 --- /dev/null +++ b/data/VOC0712/create_list.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +root_dir="$DATAPATH/data/VOCdevkit" +sub_dir=ImageSets/Main +bash_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +for dataset in trainval test +do + dst_file=$bash_dir/$dataset.txt + if [ -f $dst_file ] + then + rm -f $dst_file + fi + for name in VOC2007 VOC2012 + do + if [[ $dataset == "test" && $name == "VOC2012" ]] + then + continue + fi + echo "Create list for $name $dataset..." + dataset_file=$root_dir/$name/$sub_dir/$dataset.txt + + img_file=$bash_dir/$dataset"_img.txt" + cp $dataset_file $img_file + sed -i "s/^/$name\/JPEGImages\//g" $img_file + sed -i "s/$/.jpg/g" $img_file + + label_file=$bash_dir/$dataset"_label.txt" + cp $dataset_file $label_file + sed -i "s/^/$name\/Annotations\//g" $label_file + sed -i "s/$/.xml/g" $label_file + + paste -d' ' $img_file $label_file >> $dst_file + + rm -f $label_file + rm -f $img_file + done + + # Generate image name and size infomation. + if [ $dataset == "test" ] + then + $bash_dir/../../build/tools/get_image_size $root_dir $dst_file $bash_dir/$dataset"_name_size.txt" + fi + + # Shuffle trainval file. + if [ $dataset == "trainval" ] + then + rand_file=$dst_file.random + cat $dst_file | perl -MList::Util=shuffle -e 'print shuffle();' > $rand_file + mv $rand_file $dst_file + fi +done diff --git a/data/VOC0712/labelmap_voc.prototxt b/data/VOC0712/labelmap_voc.prototxt new file mode 100644 index 00000000000..b5c177b7245 --- /dev/null +++ b/data/VOC0712/labelmap_voc.prototxt @@ -0,0 +1,105 @@ +item { + name: "none_of_the_above" + label: 0 + display_name: "background" +} +item { + name: "aeroplane" + label: 1 + display_name: "aeroplane" +} +item { + name: "bicycle" + label: 2 + display_name: "bicycle" +} +item { + name: "bird" + label: 3 + display_name: "bird" +} +item { + name: "boat" + label: 4 + display_name: "boat" +} +item { + name: "bottle" + label: 5 + display_name: "bottle" +} +item { + name: "bus" + label: 6 + display_name: "bus" +} +item { + name: "car" + label: 7 + display_name: "car" +} +item { + name: "cat" + label: 8 + display_name: "cat" +} +item { + name: "chair" + label: 9 + display_name: "chair" +} +item { + name: "cow" + label: 10 + display_name: "cow" +} +item { + name: "diningtable" + label: 11 + display_name: "diningtable" +} +item { + name: "dog" + label: 12 + display_name: "dog" +} +item { + name: "horse" + label: 13 + display_name: "horse" +} +item { + name: "motorbike" + label: 14 + display_name: "motorbike" +} +item { + name: "person" + label: 15 + display_name: "person" +} +item { + name: "pottedplant" + label: 16 + display_name: "pottedplant" +} +item { + name: "sheep" + label: 17 + display_name: "sheep" +} +item { + name: "sofa" + label: 18 + display_name: "sofa" +} +item { + name: "train" + label: 19 + display_name: "train" +} +item { + name: "tvmonitor" + label: 20 + display_name: "tvmonitor" +} diff --git a/data/coco/README.md b/data/coco/README.md new file mode 100644 index 00000000000..ae8c3c5211a --- /dev/null +++ b/data/coco/README.md @@ -0,0 +1,46 @@ +### Preparation +1. Download Images and Annotations from [MSCOCO](http://mscoco.org/dataset/#download). By default, we assume the data is stored in `$DATAPATH/data/coco` +make sure extracted image directories (train2014, val2014 test2015) are inside +'/images/' directory next to Annotations directory. +And download also: +http://www.cs.berkeley.edu/~rbg/faster-rcnn-data/instances_minival2014.json.zip +http://www.cs.berkeley.edu/~rbg/faster-rcnn-data/instances_valminusminival2014.json.zip +unzip instances_minival2014.json.zip +unzip instances_valminusminival2014.json.zip +mv instances_minival2014.json instances_valminusminival2014.json ./coco/annotations/ +2. Get the coco code. We will call the directory that you cloned coco into `$COCO_ROOT` + ```Shell + git clone https://github.com/weiliu89/coco.git + cd coco + git checkout dev + git apply $CAFFE_ROOT/data/coco/diff.patch + ``` + +3. Build the coco code. + ```Shell + cd PythonAPI + python setup.py build_ext --inplace + ``` + +4. Split the annotation to many files per image and get the image size info. + ```Shell + # Check scripts/batch_split_annotation.py and change settings accordingly. + python scripts/batch_split_annotation.py + # Create the minival2014_name_size.txt and test-dev2015_name_size.txt in $CAFFE_ROOT/data/coco + python scripts/batch_get_image_size.py + ``` + +5. Create the LMDB file. + ```Shell + cd $CAFFE_ROOT + # Create the minival.txt, testdev.txt, test.txt, train.txt in data/coco/ + python data/coco/create_list.py + # You can modify the parameters in create_data.sh if needed. + # It will create lmdb files for minival, testdev, test, and train with encoded original image: + # - $DATAPATH/data/coco/lmdb/coco_minival_lmdb + # - $DATAPATH/data/coco/lmdb/coco_testdev_lmdb + # - $DATAPATH/data/coco/lmdb/coco_test_lmdb + # - $DATAPATH/data/coco/lmdb/coco_train_lmdb + # and make soft links at examples/coco/ + ./data/coco/create_data.sh + ``` diff --git a/data/coco/create_data.sh b/data/coco/create_data.sh new file mode 100755 index 00000000000..52185961dfb --- /dev/null +++ b/data/coco/create_data.sh @@ -0,0 +1,26 @@ +cur_dir=$(cd $( dirname ${BASH_SOURCE[0]} ) && pwd ) +root_dir=$cur_dir/../.. + +cd $root_dir + +redo=false +data_root_dir="$DATAPATH/data/coco" +dataset_name="coco" +mapfile="$root_dir/data/$dataset_name/labelmap_coco.prototxt" +anno_type="detection" +label_type="json" +db="lmdb" +min_dim=0 +max_dim=0 +width=0 +height=0 + +extra_cmd="--encode-type=jpg --encoded" +if $redo +then + extra_cmd="$extra_cmd --redo" +fi +for subset in minival testdev train test +do + python $root_dir/scripts/create_annoset.py --anno-type=$anno_type --label-type=$label_type --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir $root_dir/data/$dataset_name/$subset.txt $data_root_dir/$db/$dataset_name"_"$subset"_"$db examples/$dataset_name 2>&1 | tee $root_dir/data/$dataset_name/$subset.log +done diff --git a/data/coco/create_list.py b/data/coco/create_list.py new file mode 100644 index 00000000000..f4fa23f2e33 --- /dev/null +++ b/data/coco/create_list.py @@ -0,0 +1,121 @@ +import argparse +import os +from random import shuffle +import shutil +import subprocess +import sys + +datapath = os.environ['DATAPATH'] +CURDIR = os.path.dirname(os.path.realpath(__file__)) + +# If true, re-create all list files. +redo = True +# The root directory which holds all information of the dataset. +data_dir = "{}/data/coco".format(datapath) +# The directory name which holds the image sets. +imgset_dir = "ImageSets" +# The direcotry which contains the images. +img_dir = "images" +img_ext = "jpg" +# The directory which contains the annotations. +anno_dir = "Annotations" +anno_ext = "json" + +train_list_file = "{}/train.txt".format(CURDIR) +minival_list_file = "{}/minival.txt".format(CURDIR) +testdev_list_file = "{}/testdev.txt".format(CURDIR) +test_list_file = "{}/test.txt".format(CURDIR) + +# Create training set. +# We follow Ross Girschick's split. +if redo or not os.path.exists(train_list_file): + datasets = ["train2014", "valminusminival2014"] + img_files = [] + anno_files = [] + for dataset in datasets: + imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) + with open(imgset_file, "r") as f: + for line in f.readlines(): + name = line.strip("\n") + subset = name.split("_")[1] + img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext) + assert os.path.exists("{}/{}".format(data_dir, img_file)), \ + "{}/{} does not exist".format(data_dir, img_file) + anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext) + assert os.path.exists("{}/{}".format(data_dir, anno_file)), \ + "{}/{} does not exist".format(data_dir, anno_file) + img_files.append(img_file) + anno_files.append(anno_file) + # Shuffle the images. + idx = [i for i in xrange(len(img_files))] + shuffle(idx) + with open(train_list_file, "w") as f: + for i in idx: + f.write("{} {}\n".format(img_files[i], anno_files[i])) + +if redo or not os.path.exists(minival_list_file): + datasets = ["minival2014"] + subset = "val2014" + img_files = [] + anno_files = [] + for dataset in datasets: + imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) + with open(imgset_file, "r") as f: + for line in f.readlines(): + name = line.strip("\n") + img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext) + assert os.path.exists("{}/{}".format(data_dir, img_file)), \ + "{}/{} does not exist".format(data_dir, img_file) + anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext) + assert os.path.exists("{}/{}".format(data_dir, anno_file)), \ + "{}/{} does not exist".format(data_dir, anno_file) + img_files.append(img_file) + anno_files.append(anno_file) + with open(minival_list_file, "w") as f: + for i in xrange(len(img_files)): + f.write("{} {}\n".format(img_files[i], anno_files[i])) + +if redo or not os.path.exists(testdev_list_file): + datasets = ["test-dev2015"] + subset = "test2015" + img_files = [] + anno_files = [] + for dataset in datasets: + imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) + with open(imgset_file, "r") as f: + for line in f.readlines(): + name = line.strip("\n") + img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext) + assert os.path.exists("{}/{}".format(data_dir, img_file)), \ + "{}/{} does not exist".format(data_dir, img_file) + anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext) + assert os.path.exists("{}/{}".format(data_dir, anno_file)), \ + "{}/{} does not exist".format(data_dir, anno_file) + img_files.append(img_file) + anno_files.append(anno_file) + with open(testdev_list_file, "w") as f: + for i in xrange(len(img_files)): + f.write("{} {}\n".format(img_files[i], anno_files[i])) + +if redo or not os.path.exists(test_list_file): + datasets = ["test2015"] + subset = "test2015" + img_files = [] + anno_files = [] + for dataset in datasets: + imgset_file = "{}/{}/{}.txt".format(data_dir, imgset_dir, dataset) + with open(imgset_file, "r") as f: + for line in f.readlines(): + name = line.strip("\n") + img_file = "{}/{}/{}.{}".format(img_dir, subset, name, img_ext) + assert os.path.exists("{}/{}".format(data_dir, img_file)), \ + "{}/{} does not exist".format(data_dir, img_file) + anno_file = "{}/{}/{}.{}".format(anno_dir, subset, name, anno_ext) + assert os.path.exists("{}/{}".format(data_dir, anno_file)), \ + "{}/{} does not exist".format(data_dir, anno_file) + img_files.append(img_file) + anno_files.append(anno_file) + with open(test_list_file, "w") as f: + for i in xrange(len(img_files)): + f.write("{} {}\n".format(img_files[i], anno_files[i])) + diff --git a/data/coco/diff.patch b/data/coco/diff.patch new file mode 100644 index 00000000000..75fe7d659df --- /dev/null +++ b/data/coco/diff.patch @@ -0,0 +1,276 @@ +diff --git a/PythonAPI/pycocoDemo.ipynb b/PythonAPI/pycocoDemo.ipynb +index 8bb9639..10bf57c 100644 +--- a/PythonAPI/pycocoDemo.ipynb ++++ b/PythonAPI/pycocoDemo.ipynb +@@ -4,7 +4,7 @@ + "cell_type": "code", + "execution_count": 3, + "metadata": { +- "collapsed": true ++ "collapsed": True + }, + "outputs": [], + "source": [ +@@ -21,7 +21,7 @@ + "cell_type": "code", + "execution_count": 4, + "metadata": { +- "collapsed": true ++ "collapsed": True + }, + "outputs": [], + "source": [ +@@ -34,7 +34,7 @@ + "cell_type": "code", + "execution_count": 5, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -57,8 +57,8 @@ + "cell_type": "code", + "execution_count": 6, + "metadata": { +- "collapsed": false, +- "scrolled": true ++ "collapsed": False, ++ "scrolled": True + }, + "outputs": [ + { +@@ -87,7 +87,7 @@ + "cell_type": "code", + "execution_count": 7, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [], + "source": [ +@@ -101,7 +101,7 @@ + "cell_type": "code", + "execution_count": 8, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -129,7 +129,7 @@ + "cell_type": "code", + "execution_count": 9, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -155,7 +155,7 @@ + "cell_type": "code", + "execution_count": 10, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -179,7 +179,7 @@ + "cell_type": "code", + "execution_count": 11, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -206,7 +206,7 @@ + "cell_type": "code", + "execution_count": 12, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -230,7 +230,7 @@ + "cell_type": "code", + "execution_count": 13, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +diff --git a/PythonAPI/pycocoEvalDemo.ipynb b/PythonAPI/pycocoEvalDemo.ipynb +index 8b2ff08..1c5b95e 100644 +--- a/PythonAPI/pycocoEvalDemo.ipynb ++++ b/PythonAPI/pycocoEvalDemo.ipynb +@@ -4,7 +4,7 @@ + "cell_type": "code", + "execution_count": 1, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [], + "source": [ +@@ -22,7 +22,7 @@ + "cell_type": "code", + "execution_count": 2, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -44,7 +44,7 @@ + "cell_type": "code", + "execution_count": 3, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -70,7 +70,7 @@ + "cell_type": "code", + "execution_count": 4, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -95,7 +95,7 @@ + "cell_type": "code", + "execution_count": 5, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [], + "source": [ +@@ -108,7 +108,7 @@ + "cell_type": "code", + "execution_count": 6, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +diff --git a/PythonAPI/pycocoViewDemo.ipynb b/PythonAPI/pycocoViewDemo.ipynb +index 9acd13e..e894277 100644 +--- a/PythonAPI/pycocoViewDemo.ipynb ++++ b/PythonAPI/pycocoViewDemo.ipynb +@@ -4,7 +4,7 @@ + "cell_type": "code", + "execution_count": 1, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [], + "source": [ +@@ -23,7 +23,7 @@ + "cell_type": "code", + "execution_count": 2, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -44,7 +44,7 @@ + "cell_type": "code", + "execution_count": 3, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -71,7 +71,7 @@ + "cell_type": "code", + "execution_count": 4, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [], + "source": [ +@@ -83,7 +83,7 @@ + "cell_type": "code", + "execution_count": 5, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +@@ -118,7 +118,7 @@ + "cell_type": "code", + "execution_count": 6, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [], + "source": [ +@@ -134,7 +134,7 @@ + "cell_type": "code", + "execution_count": 7, + "metadata": { +- "collapsed": false ++ "collapsed": False + }, + "outputs": [ + { +diff --git a/PythonAPI/scripts/batch_get_image_size.py b/PythonAPI/scripts/batch_get_image_size.py +index bd08ad5..8244ad2 100644 +--- a/PythonAPI/scripts/batch_get_image_size.py ++++ b/PythonAPI/scripts/batch_get_image_size.py +@@ -2,16 +2,16 @@ import os + import subprocess + import sys + +-HOMEDIR = os.path.expanduser("~") ++# The caffe root. ++CAFFE_ROOT = os.environ['CAFFE_ROOT'] ++datapath = os.environ['DATAPATH'] + CURDIR = os.path.dirname(os.path.realpath(__file__)) + + ### Modify the address and parameters accordingly ### + # If true, redo the whole thing. + redo = True +-# The caffe root. +-CAFFE_ROOT = "{}/projects/caffe".format(HOMEDIR) + # The root directory which stores the coco images, annotations, etc. +-coco_data_dir = "{}/data/coco".format(HOMEDIR) ++coco_data_dir = "{}/data/coco".format(datapath) + # The sets that we want to get the size info. + anno_sets = ["image_info_test-dev2015", "instances_minival2014"] + # The directory which contains the full annotation files for each set. +diff --git a/PythonAPI/scripts/batch_split_annotation.py b/PythonAPI/scripts/batch_split_annotation.py +index 6081fd5..dec0083 100644 +--- a/PythonAPI/scripts/batch_split_annotation.py ++++ b/PythonAPI/scripts/batch_split_annotation.py +@@ -2,14 +2,14 @@ import os + import subprocess + import sys + +-HOMEDIR = os.path.expanduser("~") ++datapath = os.environ['DATAPATH'] + CURDIR = os.path.dirname(os.path.realpath(__file__)) + + ### Modify the address and parameters accordingly ### + # If true, redo the whole thing. + redo = True + # The root directory which stores the coco images, annotations, etc. +-coco_data_dir = "{}/data/coco".format(HOMEDIR) ++coco_data_dir = "{}/data/coco".format(datapath) + # The sets that we want to split. These can be downloaded at: http://mscoco.org + # Unzip all the files after download. + anno_sets = ["image_info_test2014", "image_info_test2015", "image_info_test-dev2015", diff --git a/data/coco/labelmap_coco.prototxt b/data/coco/labelmap_coco.prototxt new file mode 100644 index 00000000000..82252d2e9dc --- /dev/null +++ b/data/coco/labelmap_coco.prototxt @@ -0,0 +1,405 @@ +item { + name: "none_of_the_above" + label: 0 + display_name: "background" +} +item { + name: "1" + label: 1 + display_name: "person" +} +item { + name: "2" + label: 2 + display_name: "bicycle" +} +item { + name: "3" + label: 3 + display_name: "car" +} +item { + name: "4" + label: 4 + display_name: "motorcycle" +} +item { + name: "5" + label: 5 + display_name: "airplane" +} +item { + name: "6" + label: 6 + display_name: "bus" +} +item { + name: "7" + label: 7 + display_name: "train" +} +item { + name: "8" + label: 8 + display_name: "truck" +} +item { + name: "9" + label: 9 + display_name: "boat" +} +item { + name: "10" + label: 10 + display_name: "traffic light" +} +item { + name: "11" + label: 11 + display_name: "fire hydrant" +} +item { + name: "13" + label: 12 + display_name: "stop sign" +} +item { + name: "14" + label: 13 + display_name: "parking meter" +} +item { + name: "15" + label: 14 + display_name: "bench" +} +item { + name: "16" + label: 15 + display_name: "bird" +} +item { + name: "17" + label: 16 + display_name: "cat" +} +item { + name: "18" + label: 17 + display_name: "dog" +} +item { + name: "19" + label: 18 + display_name: "horse" +} +item { + name: "20" + label: 19 + display_name: "sheep" +} +item { + name: "21" + label: 20 + display_name: "cow" +} +item { + name: "22" + label: 21 + display_name: "elephant" +} +item { + name: "23" + label: 22 + display_name: "bear" +} +item { + name: "24" + label: 23 + display_name: "zebra" +} +item { + name: "25" + label: 24 + display_name: "giraffe" +} +item { + name: "27" + label: 25 + display_name: "backpack" +} +item { + name: "28" + label: 26 + display_name: "umbrella" +} +item { + name: "31" + label: 27 + display_name: "handbag" +} +item { + name: "32" + label: 28 + display_name: "tie" +} +item { + name: "33" + label: 29 + display_name: "suitcase" +} +item { + name: "34" + label: 30 + display_name: "frisbee" +} +item { + name: "35" + label: 31 + display_name: "skis" +} +item { + name: "36" + label: 32 + display_name: "snowboard" +} +item { + name: "37" + label: 33 + display_name: "sports ball" +} +item { + name: "38" + label: 34 + display_name: "kite" +} +item { + name: "39" + label: 35 + display_name: "baseball bat" +} +item { + name: "40" + label: 36 + display_name: "baseball glove" +} +item { + name: "41" + label: 37 + display_name: "skateboard" +} +item { + name: "42" + label: 38 + display_name: "surfboard" +} +item { + name: "43" + label: 39 + display_name: "tennis racket" +} +item { + name: "44" + label: 40 + display_name: "bottle" +} +item { + name: "46" + label: 41 + display_name: "wine glass" +} +item { + name: "47" + label: 42 + display_name: "cup" +} +item { + name: "48" + label: 43 + display_name: "fork" +} +item { + name: "49" + label: 44 + display_name: "knife" +} +item { + name: "50" + label: 45 + display_name: "spoon" +} +item { + name: "51" + label: 46 + display_name: "bowl" +} +item { + name: "52" + label: 47 + display_name: "banana" +} +item { + name: "53" + label: 48 + display_name: "apple" +} +item { + name: "54" + label: 49 + display_name: "sandwich" +} +item { + name: "55" + label: 50 + display_name: "orange" +} +item { + name: "56" + label: 51 + display_name: "broccoli" +} +item { + name: "57" + label: 52 + display_name: "carrot" +} +item { + name: "58" + label: 53 + display_name: "hot dog" +} +item { + name: "59" + label: 54 + display_name: "pizza" +} +item { + name: "60" + label: 55 + display_name: "donut" +} +item { + name: "61" + label: 56 + display_name: "cake" +} +item { + name: "62" + label: 57 + display_name: "chair" +} +item { + name: "63" + label: 58 + display_name: "couch" +} +item { + name: "64" + label: 59 + display_name: "potted plant" +} +item { + name: "65" + label: 60 + display_name: "bed" +} +item { + name: "67" + label: 61 + display_name: "dining table" +} +item { + name: "70" + label: 62 + display_name: "toilet" +} +item { + name: "72" + label: 63 + display_name: "tv" +} +item { + name: "73" + label: 64 + display_name: "laptop" +} +item { + name: "74" + label: 65 + display_name: "mouse" +} +item { + name: "75" + label: 66 + display_name: "remote" +} +item { + name: "76" + label: 67 + display_name: "keyboard" +} +item { + name: "77" + label: 68 + display_name: "cell phone" +} +item { + name: "78" + label: 69 + display_name: "microwave" +} +item { + name: "79" + label: 70 + display_name: "oven" +} +item { + name: "80" + label: 71 + display_name: "toaster" +} +item { + name: "81" + label: 72 + display_name: "sink" +} +item { + name: "82" + label: 73 + display_name: "refrigerator" +} +item { + name: "84" + label: 74 + display_name: "book" +} +item { + name: "85" + label: 75 + display_name: "clock" +} +item { + name: "86" + label: 76 + display_name: "vase" +} +item { + name: "87" + label: 77 + display_name: "scissors" +} +item { + name: "88" + label: 78 + display_name: "teddy bear" +} +item { + name: "89" + label: 79 + display_name: "hair drier" +} +item { + name: "90" + label: 80 + display_name: "toothbrush" +} diff --git a/data/coco/labels.txt b/data/coco/labels.txt new file mode 100644 index 00000000000..146dd8daae0 --- /dev/null +++ b/data/coco/labels.txt @@ -0,0 +1,80 @@ +1,1,person +2,2,bicycle +3,3,car +4,4,motorcycle +5,5,airplane +6,6,bus +7,7,train +8,8,truck +9,9,boat +10,10,traffic light +11,11,fire hydrant +13,12,stop sign +14,13,parking meter +15,14,bench +16,15,bird +17,16,cat +18,17,dog +19,18,horse +20,19,sheep +21,20,cow +22,21,elephant +23,22,bear +24,23,zebra +25,24,giraffe +27,25,backpack +28,26,umbrella +31,27,handbag +32,28,tie +33,29,suitcase +34,30,frisbee +35,31,skis +36,32,snowboard +37,33,sports ball +38,34,kite +39,35,baseball bat +40,36,baseball glove +41,37,skateboard +42,38,surfboard +43,39,tennis racket +44,40,bottle +46,41,wine glass +47,42,cup +48,43,fork +49,44,knife +50,45,spoon +51,46,bowl +52,47,banana +53,48,apple +54,49,sandwich +55,50,orange +56,51,broccoli +57,52,carrot +58,53,hot dog +59,54,pizza +60,55,donut +61,56,cake +62,57,chair +63,58,couch +64,59,potted plant +65,60,bed +67,61,dining table +70,62,toilet +72,63,tv +73,64,laptop +74,65,mouse +75,66,remote +76,67,keyboard +77,68,cell phone +78,69,microwave +79,70,oven +80,71,toaster +81,72,sink +82,73,refrigerator +84,74,book +85,75,clock +86,76,vase +87,77,scissors +88,78,teddy bear +89,79,hair drier +90,80,toothbrush diff --git a/docker/Makefile b/docker/Makefile index 3a6575b0c43..722718ada8e 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -1,26 +1,39 @@ # A makefile to build the docker images for caffe. -# Two caffe images will be built: -# caffe:cpu --> A CPU-only build of caffe. +# Three caffe images can be built: +# caffe:cpu-centos --> A CPU-only build of caffe in CentOS-based image. +# caffe:cpu-ubuntu --> A CPU-only build of caffe in Ubuntu-based image. # caffe:gpu --> A GPU-enabled build using the latest CUDA and CUDNN versions. DOCKER ?= docker +ifneq ($(http_proxy),) +DOCKER_BUILD_ARG += --build-arg http_proxy=$(http_proxy) +endif +ifneq ($(https_proxy),) +DOCKER_BUILD_ARG += --build-arg https_proxy=$(https_proxy) +endif + + all: docker_files standalone .PHONY: standalone devel +cpu_standalone: cpu-centos cpu-ubuntu standalone: cpu_standalone gpu_standalone +cpu-centos: standalone/cpu-centos/Dockerfile + $(DOCKER) build $(DOCKER_BUILD_ARG) -t caffe:cpu-centos standalone/cpu-centos -cpu_standalone: standalone/cpu/Dockerfile - $(DOCKER) build -t caffe:cpu standalone/cpu +cpu-ubuntu: standalone/cpu-ubuntu/Dockerfile + $(DOCKER) build $(DOCKER_BUILD_ARG) -t caffe:cpu-ubuntu standalone/cpu-ubuntu gpu_standalone: standalone/gpu/Dockerfile - $(DOCKER) build -t caffe:gpu standalone/gpu + $(DOCKER) build $(DOCKER_BUILD_ARG) -t caffe:gpu standalone/gpu + docker_files: standalone_files -standalone_files: standalone/cpu/Dockerfile standalone/gpu/Dockerfile +standalone_files: standalone/cpu-centos/Dockerfile standalone/cpu-ubuntu/Dockerfile standalone/gpu/Dockerfile FROM_GPU = "nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04" FROM_CPU = "ubuntu:14.04" diff --git a/docker/standalone/cpu-centos/Dockerfile b/docker/standalone/cpu-centos/Dockerfile new file mode 100644 index 00000000000..b372316e4c2 --- /dev/null +++ b/docker/standalone/cpu-centos/Dockerfile @@ -0,0 +1,53 @@ +FROM centos:7 +MAINTAINER caffe-maint@googlegroups.com + +RUN rpm -iUvh http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-8.noarch.rpm + +RUN yum install -y \ + redhat-rpm-config \ + tar \ + findutils \ + make \ + gcc-c++ \ + cmake \ + git \ + wget \ + atlas-devel \ + boost-devel \ + gflags-devel \ + glog-devel \ + hdf5-devel \ + leveldb-devel \ + lmdb-devel \ + opencv-devel \ + protobuf-devel \ + snappy-devel \ + protobuf-compiler \ + freetype-devel \ + libpng-devel \ + python-devel \ + python-numpy \ + python-pip \ + python-scipy \ + gcc-gfortran \ + libjpeg-turbo-devel + +RUN yum clean all +ENV CAFFE_ROOT=/opt/caffe +WORKDIR $CAFFE_ROOT + +# FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. +ENV CLONE_TAG=master + +RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/intel/caffe.git . && \ + for req in $(cat python/requirements.txt) pydot; do pip --no-cache-dir install $req; done && \ + mkdir build && cd build && \ + cmake -DCPU_ONLY=1 -DCMAKE_BUILD_TYPE=Release .. && \ + make all -j"$(nproc)" + +ENV PYCAFFE_ROOT $CAFFE_ROOT/python +ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH +ENV PATH $CAFFE_ROOT/build/tools:$PYCAFFE_ROOT:$PATH +RUN echo "$CAFFE_ROOT/build/lib" >> /etc/ld.so.conf.d/caffe.conf && ldconfig + +WORKDIR /workspace diff --git a/docker/standalone/cpu/Dockerfile b/docker/standalone/cpu-ubuntu/Dockerfile similarity index 86% rename from docker/standalone/cpu/Dockerfile rename to docker/standalone/cpu-ubuntu/Dockerfile index 4fef25aa6a1..388654d7250 100644 --- a/docker/standalone/cpu/Dockerfile +++ b/docker/standalone/cpu-ubuntu/Dockerfile @@ -6,7 +6,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ cmake \ git \ wget \ - libatlas-base-dev \ libboost-all-dev \ libgflags-dev \ libgoogle-glog-dev \ @@ -29,11 +28,11 @@ WORKDIR $CAFFE_ROOT # FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. ENV CLONE_TAG=master -RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ +RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/intel/caffe.git . && \ for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ mkdir build && cd build && \ - cmake -DCPU_ONLY=1 .. && \ - make -j"$(nproc)" + cmake -DCPU_ONLY=1 -DCMAKE_BUILD_TYPE=Release .. && \ + make all -j"$(nproc)" ENV PYCAFFE_ROOT $CAFFE_ROOT/python ENV PYTHONPATH $PYCAFFE_ROOT:$PYTHONPATH diff --git a/docker/standalone/gpu/Dockerfile b/docker/standalone/gpu/Dockerfile index daf6a7223ff..102923637a6 100644 --- a/docker/standalone/gpu/Dockerfile +++ b/docker/standalone/gpu/Dockerfile @@ -29,7 +29,7 @@ WORKDIR $CAFFE_ROOT # FIXME: clone a specific git tag and use ARG instead of ENV once DockerHub supports this. ENV CLONE_TAG=master -RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/BVLC/caffe.git . && \ +RUN git clone -b ${CLONE_TAG} --depth 1 https://github.com/intel/caffe.git . && \ for req in $(cat python/requirements.txt) pydot; do pip install $req; done && \ mkdir build && cd build && \ cmake -DUSE_CUDNN=1 .. && \ diff --git a/docs/installation.md b/docs/installation_old.md similarity index 57% rename from docs/installation.md rename to docs/installation_old.md index 4aac7c42d27..5c16607f41f 100644 --- a/docs/installation.md +++ b/docs/installation_old.md @@ -1,3 +1,4 @@ + --- title: Installation --- @@ -5,19 +6,8 @@ title: Installation # Installation Prior to installing, have a glance through this guide and take note of the details for your platform. -We install and run Caffe on Ubuntu 16.04–12.04, OS X 10.11–10.8, and through Docker and AWS. -The official Makefile and `Makefile.config` build are complemented by a [community CMake build](#cmake-build). - -**Step-by-step Instructions**: - -- [Docker setup](https://github.com/BVLC/caffe/tree/master/docker) *out-of-the-box brewing* -- [Ubuntu installation](install_apt.html) *the standard platform* -- [OS X installation](install_osx.html) -- [RHEL / CentOS / Fedora installation](install_yum.html) -- [Windows](https://github.com/BVLC/caffe/tree/windows) *see the Windows branch led by Microsoft* -- [OpenCL](https://github.com/BVLC/caffe/tree/opencl) *see the OpenCL branch led by Fabian Tschopp* - -**Overview**: +We install and run Caffe on Ubuntu 14.04, CentOS (7.0, 7.1, 7.2), and AWS. +The official Makefile and `Makefile.config` build are complemented by an automatic CMake build from the community. - [Prerequisites](#prerequisites) - [Compilation](#compilation) @@ -27,45 +17,58 @@ When updating Caffe, it's best to `make clean` before re-compiling. ## Prerequisites -Caffe has several dependencies: +Before building Caffe make sure that the following dependencies are available on target system: -* [CUDA](https://developer.nvidia.com/cuda-zone) is required for GPU mode. - * library version 7+ and the latest driver version are recommended, but 6.* is fine too - * 5.5, and 5.0 are compatible but considered legacy -* [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) via ATLAS, MKL, or OpenBLAS. +* [BLAS library](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms) + * [Intel® Math Kernel Library (Intel ® MKL)](https://software.intel.com/en-us/intel-mkl) + * [Open BLAS](http://www.openblas.net) + * [ATLAS](http://math-atlas.sourceforge.net) * [Boost](http://www.boost.org/) >= 1.55 * `protobuf`, `glog`, `gflags`, `hdf5` -Optional dependencies: +For additional capabilities and acceleration the following dependencies might be necessary: * [OpenCV](http://opencv.org/) >= 2.4 including 3.0 * IO libraries: `lmdb`, `leveldb` (note: leveldb requires `snappy`) -* cuDNN for GPU acceleration (v5) +* For GPU mode + * [CUDA](https://developer.nvidia.com/cuda-zone) + * cuDNN -Pycaffe and Matcaffe interfaces have their own natural needs. +* For Pycaffe + * `Python 2.7` or `Python 3.3+` + * `numpy (>= 1.7)` + * boost-provided `boost.python` -* For Python Caffe: `Python 2.7` or `Python 3.3+`, `numpy (>= 1.7)`, boost-provided `boost.python` -* For MATLAB Caffe: MATLAB with the `mex` compiler. +* For Matcaffe + * MATLAB with the `mex` compiler. -**cuDNN Caffe**: for fastest operation Caffe is accelerated by drop-in integration of [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). To speed up your Caffe models, install cuDNN then uncomment the `USE_CUDNN := 1` flag in `Makefile.config` when installing Caffe. Acceleration is automatic. The current version is cuDNN v5; older versions are supported in older Caffe. +### Building for Intel® Architecture -**CPU-only Caffe**: for cold-brewed CPU-only Caffe uncomment the `CPU_ONLY := 1` flag in `Makefile.config` to configure and build Caffe without CUDA. This is helpful for cloud or cluster deployment. +This version of Caffe is optimized for Intel® Xeon processors and Intel® Xeon Phi™ processors. To achieve the best performance results on Intel Architecture we recommend building Caffe with [Intel MKL](http://software.intel.com/en-us/intel-mkl) and enabling OpenMP support. If you don't have Intel MKL yet you can download it [free of charge](https://software.intel.com/en-us/articles/free_mkl). The following configuration changes are recommended: -### CUDA and BLAS +* Set `BLAS := mkl` in `Makefile.config` +* If you don't need GPU optimizations `CPU_ONLY := 1` flag in `Makefile.config` to configure and build Caffe without CUDA. +[Intel MKL 2017 Beta Update 1](https://software.intel.com/en-us/forums/intel-math-kernel-library/topic/623305) introduces optimized Deep Neural Network (DNN) performance primitives that allow to accelerate the most popular image recognition topologies. Caffe can take advantage of these primitives and get significantly better performance results compared to the previous versions of Intel MKL. There are two ways to take advantage of the new primitives: + +* At Caffe build time add `USE_MKL2017_AS_DEFAULT_ENGINE := 1` to `Makefile.config` or add `-DUSE_MKL2017_AS_DEFAULT_ENGINE=ON` to your commandline when invoking `cmake`. All layers will use new primitives by default. +* Set layer engine to `MKL2017` in model configuration. Only this specific layer will be accelerated with new primitives. + +#### Recommendations +* For Better performance please disable Hyperthreading on your platoform. + +### Building for GPU Caffe requires the CUDA `nvcc` compiler to compile its GPU code and CUDA driver for GPU operation. To install CUDA, go to the [NVIDIA CUDA website](https://developer.nvidia.com/cuda-downloads) and follow installation instructions there. Install the library and the latest standalone driver separately; the driver bundled with the library is usually out-of-date. **Warning!** The 331.* CUDA driver series has a critical performance issue: do not use it. -For best performance, Caffe can be accelerated by [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). Register for free at the cuDNN site, install it, then continue with these installation instructions. To compile with cuDNN set the `USE_CUDNN := 1` flag set in your `Makefile.config`. +For best performance on GPU, Caffe can be accelerated by [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). Register for free at the cuDNN site, install it, then continue with these installation instructions. To compile with cuDNN set the `USE_CUDNN := 1` flag set in your `Makefile.config`. -Caffe requires BLAS as the backend of its matrix and vector computations. -There are several implementations of this library. The choice is yours: +Caffe requires BLAS as the backend of its matrix and vector computations. There are several implementations of this library. The choice is yours: * [ATLAS](http://math-atlas.sourceforge.net/): free, open source, and so the default for Caffe. -* [Intel MKL](http://software.intel.com/en-us/intel-mkl): commercial and optimized for Intel CPUs, with a free trial and [student](http://software.intel.com/en-us/intel-education-offerings) licenses. - 1. Install MKL. - 2. Set up MKL environment (Details: [Linux](https://software.intel.com/en-us/node/528499), [OS X](https://software.intel.com/en-us/node/528659)). Example: *source /opt/intel/mkl/bin/mklvars.sh intel64* - 3. Set `BLAS := mkl` in `Makefile.config` +* [Intel MKL](http://software.intel.com/en-us/intel-mkl): free performance library for Intel Architecture + 1. Install Intel MKL. Free options [are available](https://software.intel.com/en-us/articles/free_mkl) + 2. Set `BLAS := mkl` in `Makefile.config` * [OpenBLAS](http://www.openblas.net/): free and open source; this optimized and parallel BLAS could require more effort to install, although it might offer a speedup. 1. Install OpenBLAS 2. Set `BLAS := open` in `Makefile.config` @@ -134,10 +137,13 @@ The basic steps are as follows: See [PR #1667](https://github.com/BVLC/caffe/pull/1667) for options and details. ## Hardware +### Intel Architecture +This software supports the following hardware: +* Intel® Xeon processor E5-xxxx v3 (codename Haswell) and Intel® Xeon processor E5-xxxx v4 (codename Broadwell) +* Next generation Intel® Xeon Phi™ product family (codenamed Knights Landing) -**Laboratory Tested Hardware**: Berkeley Vision runs Caffe with Titan Xs, K80s, GTX 980s, K40s, K20s, Titans, and GTX 770s including models at ImageNet/ILSVRC scale. We have not encountered any trouble in-house with devices with CUDA capability >= 3.0. All reported hardware issues thus-far have been due to GPU configuration, overheating, and the like. - -**CUDA compute capability**: devices with compute capability <= 2.0 may have to reduce CUDA thread numbers and batch sizes due to hardware constraints. Brew with caution; we recommend compute capability >= 3.0. +### GPU +Berkeley Vision runs Caffe with K40s, K20s, and Titans including models at ImageNet/ILSVRC scale. We also run on GTX series cards (980s and 770s) and GPU-equipped MacBook Pros. We have not encountered any trouble in-house with devices with CUDA capability >= 3.0. All reported hardware issues thus-far have been due to GPU configuration, overheating, and the like. Once installed, check your times against our [reference performance numbers](performance_hardware.html) to make sure everything is configured properly. diff --git a/docs/performance_hardware.md b/docs/performance_hardware.md deleted file mode 100644 index cdd4b361dea..00000000000 --- a/docs/performance_hardware.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: Performance and Hardware Configuration ---- - -# Performance and Hardware Configuration - -To measure performance on different NVIDIA GPUs we use CaffeNet, the Caffe reference ImageNet model. - -For training, each time point is 20 iterations/minibatches of 256 images for 5,120 images total. For testing, a 50,000 image validation set is classified. - -**Acknowledgements**: BVLC members are very grateful to NVIDIA for providing several GPUs to conduct this research. - -## NVIDIA K40 - -Performance is best with ECC off and boost clock enabled. While ECC makes a negligible difference in speed, disabling it frees ~1 GB of GPU memory. - -Best settings with ECC off and maximum clock speed in standard Caffe: - -* Training is 26.5 secs / 20 iterations (5,120 images) -* Testing is 100 secs / validation set (50,000 images) - -Best settings with Caffe + [cuDNN acceleration](http://nvidia.com/cudnn): - -* Training is 19.2 secs / 20 iterations (5,120 images) -* Testing is 60.7 secs / validation set (50,000 images) - -Other settings: - -* ECC on, max speed: training 26.7 secs / 20 iterations, test 101 secs / validation set -* ECC on, default speed: training 31 secs / 20 iterations, test 117 secs / validation set -* ECC off, default speed: training 31 secs / 20 iterations, test 118 secs / validation set - -### K40 configuration tips - -For maximum K40 performance, turn off ECC and boost the clock speed (at your own risk). - -To turn off ECC, do - - sudo nvidia-smi -i 0 --ecc-config=0 # repeat with -i x for each GPU ID - -then reboot. - -Set the "persistence" mode of the GPU settings by - - sudo nvidia-smi -pm 1 - -and then set the clock speed with - - sudo nvidia-smi -i 0 -ac 3004,875 # repeat with -i x for each GPU ID - -but note that this configuration resets across driver reloading / rebooting. Include these commands in a boot script to initialize these settings. For a simple fix, add these commands to `/etc/rc.local` (on Ubuntu). - -## NVIDIA Titan - -Training: 26.26 secs / 20 iterations (5,120 images). -Testing: 100 secs / validation set (50,000 images). - -cuDNN Training: 20.25 secs / 20 iterations (5,120 images). -cuDNN Testing: 66.3 secs / validation set (50,000 images). - - -## NVIDIA K20 - -Training: 36.0 secs / 20 iterations (5,120 images). -Testing: 133 secs / validation set (50,000 images). - -## NVIDIA GTX 770 - -Training: 33.0 secs / 20 iterations (5,120 images). -Testing: 129 secs / validation set (50,000 images). - -cuDNN Training: 24.3 secs / 20 iterations (5,120 images). -cuDNN Testing: 104 secs / validation set (50,000 images). diff --git a/docs/release_notes.md b/docs/release_notes.md new file mode 100644 index 00000000000..f1bea198097 --- /dev/null +++ b/docs/release_notes.md @@ -0,0 +1,307 @@ +``` +All modification made by Intel Corporation: © 2016 Intel Corporation + + All contributions by the University of California: + Copyright (c) 2014, 2015, The Regents of the University of California (Regents) + All rights reserved. + + All other contributions: + Copyright (c) 2014, 2015, the respective contributors + All rights reserved. + For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` +# Release Notes + +## Table Of Contents +- [Introduction](#Introduction) +- [Installation](#Installation) + - [Prerequisites](#Prerequisites) + - [Building for Intel® Architecture](#Building) + - [Building for GPU](#Building) + - [Compilation](#Compilation) +- [Configurations](#Configurations) + - [Hardware](#hardware) + - [Software](#Software) +- [Known issues and limitations](#Known) +- [Instructions](#Instructions) + - [How to measure performance](#performance) + - [How to train singlenode](#singlenode) + - [How to train multinode](#multinode) + - [How to contribute](#contribute) +- [License](#License) + + +## Introduction + +This fork is dedicated to improving Caffe performance when running on CPU, in particular Intel® Xeon processors (Haswell, Broadwell, Xenon Phi) + +## Installation + +Prior to installing, have a glance through this guide and take note of the details for your platform. +We build and test Intel® Distribution of Caffe* on CentOS (7.0, 7.1, 7.2). +The official Makefile and `Makefile.config` build are complemented by an automatic CMake build from the community. + +When updating Intel® Distribution of Caffe*, it's best to `make clean` before re-compiling. + +### Prerequisites + +Before building Caffe make sure that the following dependencies are available on target system: + +* [Boost](http://www.boost.org/) >= 1.55 +* `protobuf`, `glog`, `gflags`, `hdf5` + +For additional capabilities and acceleration the following dependencies might be necessary: + +* [OpenCV](http://opencv.org/) >= 2.4 including 3.0 +* IO libraries: `lmdb`, `leveldb` (note: leveldb requires `snappy`) +* For GPU mode + * [CUDA](https://developer.nvidia.com/cuda-zone) + * cuDNN + +* For Pycaffe + * `Python 2.7` or `Python 3.3+` + * `numpy (>= 1.7)` + * boost-provided `boost.python` + +* For Matcaffe + * MATLAB with the `mex` compiler. + +#### Python and/or MATLAB Caffe (optional) + +##### Python + +The main requirements are `numpy` and `boost.python` (provided by boost). `pandas` is useful too and needed for some examples. + +You can install the dependencies with + + for req in $(cat requirements.txt); do pip install $req; done + +but we suggest first installing the [Anaconda](https://store.continuum.io/cshop/anaconda/) Python distribution, which provides most of the necessary packages, as well as the `hdf5` library dependency. + +To import the `caffe` Python module after completing the installation, add the module directory to your `$PYTHONPATH` by `export PYTHONPATH=/path/to/caffe/python:$PYTHONPATH` or the like. You should not import the module in the `caffe/python/caffe` directory! + +*Caffe's Python interface works with Python 2.7. Python 3.3+ should work out of the box without protobuf support. For protobuf support please install protobuf 3.0 alpha (https://developers.google.com/protocol-buffers/). Earlier Pythons are your own adventure.* + +##### MATLAB + +Install MATLAB, and make sure that its `mex` is in your `$PATH`. + +*Caffe's MATLAB interface works with versions 2015a, 2014a/b, 2013a/b, and 2012b.* + + + +### Building for Intel® Architecture + +This version of Caffe is optimized for Intel® Xeon processors and Intel® Xeon Phi™ processors. To achieve the best performance results on Intel Architecture we recommend building Intel® Distribution of Caffe* with [Intel® MKL](http://software.intel.com/en-us/intel-mkl) and enabling OpenMP support. +This Caffe version is seflcontained. This means that newest version of Intel MKL will be downloaded and installed during compilation of Intel® Distribution of Caffe*. + +* Set `BLAS := mkl` in `Makefile.config` +* If you don't need GPU optimizations `CPU_ONLY := 1` flag in `Makefile.config` to configure and build Intel® Distribution of Caffe* without CUDA. + +[Intel MKL 2017] introduces optimized Deep Neural Network (DNN) performance primitives that allow to accelerate the most popular image recognition topologies. Intel® Distribution of Caffe* can take advantage of these primitives and get significantly better performance results compared to the previous versions of Intel MKL. There are two ways to take advantage of the new primitives: + +* Set layer engine to `MKL2017` in prototxt file (model). Only this specific layer will be accelerated with new primitives. +* Use -engine = MKL2017 in command line as an option during execution of caffe (training, scoring, benchmark) + +### Building for GPU +Caffe requires the CUDA `nvcc` compiler to compile its GPU code and CUDA driver for GPU operation. +To install CUDA, go to the [NVIDIA CUDA website](https://developer.nvidia.com/cuda-downloads) and follow installation instructions there. Install the library and the latest standalone driver separately; the driver bundled with the library is usually out-of-date. **Warning!** The 331.* CUDA driver series has a critical performance issue: do not use it. + +For best performance on GPU, Caffe can be accelerated by [NVIDIA cuDNN](https://developer.nvidia.com/cudnn). Register for free at the cuDNN site, install it, then continue with these installation instructions. To compile with cuDNN set the `USE_CUDNN := 1` flag set in your `Makefile.config`. + +Caffe requires BLAS as the backend of its matrix and vector computations. There are several implementations of this library. The choice is yours: + +* [ATLAS](http://math-atlas.sourceforge.net/): free, open source, and so the default for Caffe. +* [Intel MKL](http://software.intel.com/en-us/intel-mkl): free performance library for Intel Architecture + 1. Install Intel MKL. Free options [are available](https://software.intel.com/en-us/articles/free_mkl) + 2. Set `BLAS := mkl` in `Makefile.config` +* [OpenBLAS](http://www.openblas.net/): free and open source; this optimized and parallel BLAS could require more effort to install, although it might offer a speedup. + 1. Install OpenBLAS + 2. Set `BLAS := open` in `Makefile.config` + +### Compilation + +Caffe can be compiled with either Make or CMake. Make is officially supported while CMake is supported by the community. Build procedure is the same as on bvlc-caffe-master branch. When OpenMP is available will be used automatically. + +#### Compilation with Make + +Configure the build by copying and modifying the example `Makefile.config` for your setup. The defaults should work, but uncomment the relevant lines if using Anaconda Python. + + cp Makefile.config.example Makefile.config + # Adjust Makefile.config (for example, if using Anaconda Python, or if cuDNN is desired) + make all + make test + make runtest + +- For CPU & GPU accelerated Caffe, no changes are needed. +- For cuDNN acceleration using NVIDIA's proprietary cuDNN software, uncomment the `USE_CUDNN := 1` switch in `Makefile.config`. cuDNN is sometimes but not always faster than Caffe's GPU acceleration. +- For CPU-only Caffe, uncomment `CPU_ONLY := 1` in `Makefile.config`. + +To compile the Python and MATLAB wrappers do `make pycaffe` and `make matcaffe` respectively. +Be sure to set your MATLAB and Python paths in `Makefile.config` first! + +**Distribution**: run `make distribute` to create a `distribute` directory with all the Caffe headers, compiled libraries, binaries, etc. needed for distribution to other machines. + +**Speed**: for a faster build, compile in parallel by doing `make all -j8` where 8 is the number of parallel threads for compilation (a good choice for the number of threads is the number of cores in your machine). + +Now that you have installed Caffe, check out the [MNIST tutorial](gathered/examples/mnist.html) and the [reference ImageNet model tutorial](gathered/examples/imagenet.html). + +#### Compilation with CMake + +In lieu of manually editing `Makefile.config` to configure the build, Caffe offers an unofficial CMake build thanks to @Nerei, @akosiorek, and other members of the community. It requires CMake version >= 2.8.7. +The basic steps are as follows: + + mkdir build + cd build + cmake .. + make all + make install + make runtest + +See [PR #1667](https://github.com/BVLC/caffe/pull/1667) for options and details. + +## Configurations + +### Hardware + +Ask hardware questions on the [caffe-users group](https://groups.google.com/forum/#!forum/caffe-users). + +#### Intel Architecture +This software supports the following hardware: +* Intel Xeon processor E5-xxxx v3 (codename: Haswell) and Intel Xeon processor E5-xxxx v4 (codename: Broadwell) +* Next generation Intel Xeon Phi™ product family (codename: Knights Landing) + +#### GPU +Berkeley Vision runs Caffe with K40s, K20s, and Titans including models at ImageNet/ILSVRC scale. We also run on GTX series cards (980s and 770s) and GPU-equipped MacBook Pros. We have not encountered any trouble in-house with devices with CUDA capability >= 3.0. All reported hardware issues thus-far have been due to GPU configuration, overheating, and the like. + +### Software + +#### Linux + +• Linux CentOS 7.0 (or newer) +• gcc 4.8.5 (or newer) +• cmake 2.8.7 (or newer) + + +#### Windows + +There is an unofficial Windows port of Caffe at [niuzhiheng/caffe:windows](https://github.com/niuzhiheng/caffe). Thanks [@niuzhiheng](https://github.com/niuzhiheng)! + +## Change log +25-01-2017 +* integration with MKL2017 update2 (providing better performance solution) +* new multinode solution with better scaling on higher number of nodes (32+): [wiki instructions](https://github.com/intel/caffe/wiki/Multinode-guide) +* old MPI multinode solution was removed +* new engine selection functionality: [wiki instructions](https://github.com/intel/caffe/blob/master/docs/tutorial/interfaces.md) +* new multiphase training functionality +* fixed problems with batch normalization +* new BKM (Best Know Method) and examples for achieving best performance [wiki instructions](https://github.com/intel/caffe/wiki/Recommendations-to-achieve-best-performance) +* other minor performance and functional improvements +* new prototxt solvers with example hyper-parameters optimized to achieve best Time To Train performance (both multi node and single node). + +03-11-2016 +* integration with MKL2017 update1 +* minor changes to provide optimal performance on default prototxt files describing topologies (for AlexNet, GoogleNet v2). +* fixed Dockerfiles - for Ubuntu and Centos. + +1-09-2016 +* added RNN support +* moved form MKL2017 beta update 1 engine to MKL2017 +* added official support for ResNet50, GoogleNet v2, VGG-19. (List of currenlty supported topologies: AlexNet, GoogleNet, GoogleNet v2, ResNet50, VGG-19) +* added official support for multinode on GoogleNet with MKL2017 engine +* added DataLayer optimizations +* added support for compressed LMDB +* initial integration with MKLDNN + + +## Known issues and limitations +* Intel MKL 2017 DNN primitives used by MKL2017 compute engine are optimized for processors with Intel Advanced Version Extensions 2 (Intel AVX2) and Intel Advanced Vector Extensions 512 (Intel AVX512) support. +Workaround: For older processors use MKL2017 GEMM engine: use `-engine = CAFFE` as parameter during execution and make sure that in prototxt file you do not have lines: `engine:=MKL2017`). + +* Local response normalization (LRN) within channel is not supported in MKL2017 engine and will result in runtime error. +Workaround: Use GEMM engine in normalization layer (in prototxt file set `engine:=caffe` for that layer) for topologies that use LRN within channel like cifar. + +* Performance results may be lower when Data Layer is provided in txt files (uncompressed list of jpg files) +Workaround: We recommend to always use compressed LMDB Data Layer + +* LeNet, Cifar, Squeeznet currently are not optimized in terms of performance in Intel MKL2017 +Workaround: better performance results might be achieved with GEMM engine: use `-engine = CAFFE` as parameter during execution. + +## Recommendations to achieve best performance + +At our wiki page we present out recommendations and tuning guide to achieve best performance. +[https://github.com/intel/caffe/wiki/Recommendations-to-achieve-best-performance](https://github.com/intel/caffe/wiki/Recommendations-to-achieve-best-performance) + +## Instructions: + +For instructions and tutorials please visit: [https://github.com/intel/caffe/wiki](https://github.com/intel/caffe/wiki) + +### Caffe Benchmark to measure performance +1. Make sure that you implemented recommendations to achieve best performance +2. Prepare `Makefile.config` configuration as described in Building for Intel Architecture section +3. Check in train_val.prototxt file what Data Layer type is used. For best results don't use data layer (or use LMDB) +3. execute commands: +`source /opt/intel/mkl/bin/mklvars.sh intel64` +`make all test -j 80` +`./build/tools/caffe time --model=models/bvlc_alexnet/train_val.prototxt -iterations 100 +./build/tools/caffe time --model=models/bvlc_googlenet/train_val.prototxt -iterations 100` +or edit commands and provide other optimized topologies. +4. As a result you will get log like: + + `Average Forward pass: 109.978 ms. + Average Backward pass: 172.952 ms. + Average Forward-Backward: 283.39 ms.` +5. To achieve results in `images/s` follow the equation: + +` [Images/s] = batchsize * 1000 / Average Forward-Backward [ms]` + +### How to train singlenode + +1. Prepare `Makefile.config` configuration as described in Building for Intel Architecture section. +2. Compile code as described in Compilation with Cmake section. +3. Copy data set that you wish to use for training and provide link to it in `/models/[chosen topology folder]/train_val.prototxt` file +4. Execute command: +`./build/tools/caffe train --solver=models/[chosen topology folder]/solver.prototxt` + +### How to train multinode + +Tutorials and training instructions are available at: [https://github.com/intel/caffe/wiki/Multinode-guide](https://github.com/intel/caffe/wiki/Multinode-guide) + +### How to contribute + +If you want to contribute code follow the instructions provided in: `/docs/development.md` file. + +### How to create LMDB + +In folder `/examples/imagenet/` we provide scripts and instructions `readme.md` how to create LMDB. + + +## License + +Caffe is released under the [BSD 2-Clause license](https://github.com/BVLC/caffe/blob/master/LICENSE). The BVLC reference models are released for unrestricted use. + +*** + *Other names and brands may be claimed as the property of others diff --git a/docs/tutorial/interfaces.md b/docs/tutorial/interfaces.md index d7ff378239d..cdec7b84104 100644 --- a/docs/tutorial/interfaces.md +++ b/docs/tutorial/interfaces.md @@ -19,8 +19,8 @@ For example, you can run: # train LeNet caffe train -solver examples/mnist/lenet_solver.prototxt - # train on GPU 2 - caffe train -solver examples/mnist/lenet_solver.prototxt -gpu 2 + # train LeNet using CAFFE engine + caffe train -solver examples/mnist/lenet_solver.prototxt -engine CAFFE # resume training from the half-way point snapshot caffe train -solver examples/mnist/lenet_solver.prototxt -snapshot examples/mnist/lenet_iter_5000.solverstate @@ -33,29 +33,65 @@ For a full example of fine-tuning, see examples/finetuning_on_flickr_style, but # score the learned LeNet model on the validation set as defined in the # model architeture lenet_train_test.prototxt - caffe test -model examples/mnist/lenet_train_test.prototxt -weights examples/mnist/lenet_iter_10000.caffemodel -gpu 0 -iterations 100 + caffe test -model examples/mnist/lenet_train_test.prototxt -weights examples/mnist/lenet_iter_10000.caffemodel -iterations 100 **Benchmarking**: `caffe time` benchmarks model execution layer-by-layer through timing and synchronization. This is useful to check system performance and measure relative execution times for models. # (These example calls require you complete the LeNet / MNIST example first.) # time LeNet training on CPU for 10 iterations caffe time -model examples/mnist/lenet_train_test.prototxt -iterations 10 - # time LeNet training on GPU for the default 50 iterations - caffe time -model examples/mnist/lenet_train_test.prototxt -gpu 0 - # time a model architecture with the given weights on the first GPU for 10 iterations - caffe time -model examples/mnist/lenet_train_test.prototxt -weights examples/mnist/lenet_iter_10000.caffemodel -gpu 0 -iterations 10 - -**Diagnostics**: `caffe device_query` reports GPU details for reference and checking device ordinals for running on a given device in multi-GPU machines. - - # query the first device - caffe device_query -gpu 0 - -**Parallelism**: the `-gpu` flag to the `caffe` tool can take a comma separated list of IDs to run on multiple GPUs. A solver and net will be instantiated for each GPU so the batch size is effectively multiplied by the number of GPUs. To reproduce single GPU training, reduce the batch size in the network definition accordingly. - - # train on GPUs 0 & 1 (doubling the batch size) - caffe train -solver examples/mnist/lenet_solver.prototxt -gpu 0,1 - # train on all GPUs (multiplying batch size by number of devices) - caffe train -solver examples/mnist/lenet_solver.prototxt -gpu all + # it is very useful to combine "time" with engine option as it will allow to get measures for diffrent engines + caffe time -model examples/mnist/lenet_train_test.prototxt -engine MKL2017 -iterations 10 + caffe time -model examples/mnist/lenet_train_test.prototxt -engine CAFFE -iterations 10 + caffe time -model examples/mnist/lenet_train_test.prototxt -engine MKLDNN -iterations 10 + # time LeNet forward pass only for the default 50 iterations using engine: MKLDNN + caffe time -model examples/mnist/lenet_train_test.prototxt -forward_only -engine MKLDNN + +## C++ + +To use caffe from C++ code you would need headers and caffe lib (libcaffe.so). All of this is provided in convenient way when "make distribute" (Makefiles) or make install (cmake builds) targets are executed. + +Example of c++ program using caffe (classification done using already trained Lenet model, using MKL2017 engine): + + #include "caffe/blob.hpp" + #include "caffe/common.hpp" + #include "caffe/net.hpp" + #include "caffe/proto/caffe.pb.h" + #include "caffe/util/db.hpp" + #include "caffe/util/io.hpp" + + #include + + using namespace caffe; + + int main(void) { + + // Lenet model and weights of trained model + const std::string model_path = "/examples/mnist/lenet.prototxt"; + const std::string weights_path = "/examples/mnist/lenet_iter_10000.caffemodel"; + + // Engine to be used (default is CAFFE) + const std::string engine_name = std::string("MKL2017"); + //const std::string engine_name = std::string("MKLDNN"); + const std::vector stages(1,""); + const int level = 0; + + caffe::Caffe::set_mode(Caffe::CPU); + std::unique_ptr> net{ + new caffe::Net(model_path, caffe::TEST, level, &stages, NULL, engine_name)}; + + net->CopyTrainedLayersFrom(weights_path); + + const boost::shared_ptr> input_blob{ + net->blob_by_name("data")}; + + // Fill input data container with some data + float* input_data = input_blob->mutable_cpu_data(); + + net->Forward(); + } + +More examples can be found in __examples/cpp_classification/classification.cpp__ ## Python diff --git a/examples/LRCN_activity_recognition/README.txt b/examples/LRCN_activity_recognition/README.txt new file mode 100644 index 00000000000..631d900f7c2 --- /dev/null +++ b/examples/LRCN_activity_recognition/README.txt @@ -0,0 +1,7 @@ +This code should help you reimplement the experiments in: + +Donahue, J., Hendricks, L. A., Guadarrama, S., Rohrbach, M., Venugopalan, S., Saenko, K., & Darrell, T. (2014). Long-term recurrent convolutional networks for visual recognition and description. arXiv preprint arXiv:1411.4389. +Chicago + +Please see http://www.eecs.berkeley.edu/~lisa_anne/LRCN_video for detailed instructions on how to reimplement experiments and download pre-trained models. + diff --git a/examples/LRCN_activity_recognition/classify_video.py b/examples/LRCN_activity_recognition/classify_video.py new file mode 100755 index 00000000000..a0e9762fa1a --- /dev/null +++ b/examples/LRCN_activity_recognition/classify_video.py @@ -0,0 +1,159 @@ +#!/bin/env python +#classify_video.py will classify a video using (1) singleFrame RGB model (2) singleFrame flow model (3) 0.5/0.5 singleFrame RGB/singleFrame flow fusion (4) 0.33/0.67 singleFrame RGB/singleFrame flow fusion (5) LRCN RGB model (6) LRCN flow model (7) 0.5/0.5 LRCN RGB/LRCN flow model (8) 0.33/0.67 LRCN RGB/LRCN flow model +#Before using, change RGB_video_path and flow_video_path. +#Use: classify_video.py video, where video is the video you wish to classify. If no video is specified, the video "v_Archery_g01_c01" will be classified. + +import numpy as np +import glob +caffe_root = '../../' +import sys +sys.path.insert(0,caffe_root + 'python') +import caffe +caffe.set_mode_cpu() +import pickle + +flow_video_path = 'flow_images/' +if len(sys.argv) > 1: + video = sys.argv[1] + if len(sys.argv) > 2: + RGB_video_path = sys.argv[2] + else: + RGB_video_path = 'frames/' +else: + video = 'v_Archery_g01_c01' + +#Initialize transformers + +def initialize_transformer(image_mean): + shape = (10*16, 3, 227, 227) + transformer = caffe.io.Transformer({'data': shape}) + channel_mean = np.zeros((3,227,227)) + for channel_index, mean_val in enumerate(image_mean): + channel_mean[channel_index, ...] = mean_val + transformer.set_mean('data', channel_mean) + transformer.set_raw_scale('data', 255) + transformer.set_channel_swap('data', (2, 1, 0)) + transformer.set_transpose('data', (2, 0, 1)) + return transformer + + +ucf_mean_RGB = np.zeros((3,1,1)) +ucf_mean_flow = np.zeros((3,1,1)) +ucf_mean_flow[:,:,:] = 128 +ucf_mean_RGB[0,:,:] = 103.939 +ucf_mean_RGB[1,:,:] = 116.779 +ucf_mean_RGB[2,:,:] = 128.68 + +transformer_RGB = initialize_transformer(ucf_mean_RGB) + +# Extract list of frames in video +RGB_frames = glob.glob('%s%s/*.jpg' %(RGB_video_path, video)) +flow_frames = glob.glob('%s%s/*.jpg' %(flow_video_path, video)) + +#classify video with LRCN model +def LRCN_classify_video(frames, net, transformer): + clip_length = 16 + offset = 8 + input_images = [] + for im in frames: + input_im = caffe.io.load_image(im) + if (input_im.shape[0] < 240): + input_im = caffe.io.resize_image(input_im, (240,320)) + input_images.append(input_im) + vid_length = len(input_images) + input_data = [] + for i in range(0,vid_length,offset): + if (i + clip_length) < vid_length: + input_data.extend(input_images[i:i+clip_length]) + else: #video may not be divisible by clip_length + input_data.extend(input_images[-clip_length:]) + output_predictions = np.zeros((len(input_data),101)) + for i in range(0,len(input_data),clip_length): + clip_input = input_data[i:i+clip_length] + clip_input = caffe.io.oversample(clip_input,[227,227]) + clip_clip_markers = np.ones((clip_input.shape[0],1,1,1)) + clip_clip_markers[0:10,:,:,:] = 0 +# if is_flow: #need to negate the values when mirroring +# clip_input[5:,:,:,0] = 1 - clip_input[5:,:,:,0] + caffe_in = np.zeros(np.array(clip_input.shape)[[0,3,1,2]], dtype=np.float32) + for ix, inputs in enumerate(clip_input): + caffe_in[ix] = transformer.preprocess('data',inputs) + out = net.forward_all(data=caffe_in, clip_markers=np.array(clip_clip_markers)) + output_predictions[i:i+clip_length] = np.mean(out['probs'],1) + return np.mean(output_predictions,0).argmax(), output_predictions + +#classify video with singleFrame model +def singleFrame_classify_video(frames, net, transformer, is_flow): + batch_size = 16 + input_images = [] + for im in frames: + input_im = caffe.io.load_image(im) + if (input_im.shape[0] < 240): + input_im = caffe.io.resize_image(input_im, (240,320)) + input_images.append(input_im) + vid_length = len(input_images) + + output_predictions = np.zeros((len(input_images),101)) + for i in range(0,len(input_images), batch_size): + clip_input = input_images[i:min(i+batch_size, len(input_images))] + clip_input = caffe.io.oversample(clip_input,[227,227]) + clip_clip_markers = np.ones((clip_input.shape[0],1,1,1)) + clip_clip_markers[0:10,:,:,:] = 0 + if is_flow: #need to negate the values when mirroring + clip_input[5:,:,:,0] = 1 - clip_input[5:,:,:,0] + caffe_in = np.zeros(np.array(clip_input.shape)[[0,3,1,2]], dtype=np.float32) + for ix, inputs in enumerate(clip_input): + caffe_in[ix] = transformer.preprocess('data',inputs) + net.blobs['data'].reshape(caffe_in.shape[0], caffe_in.shape[1], caffe_in.shape[2], caffe_in.shape[3]) + out = net.forward_all(data=caffe_in) + output_predictions[i:i+batch_size] = np.mean(out['probs'].reshape(10,caffe_in.shape[0]/10,101),0) + return np.mean(output_predictions,0).argmax(), output_predictions + +#Models and weights +singleFrame_model = 'deploy_singleFrame.prototxt' +lstm_model = 'deploy_lstm.prototxt' +RGB_singleFrame = 'single_frame_all_layers_hyb_RGB_iter_5000.caffemodel' +flow_singleFrame = 'single_frame_all_layers_hyb_flow_iter_50000.caffemodel' +RGB_lstm = 'RGB_lstm_model_iter_30000.caffemodel' +flow_lstm = 'flow_lstm_model_iter_50000.caffemodel' + +#RGB_singleFrame_net = caffe.Net(singleFrame_model, RGB_singleFrame, caffe.TEST) +#class_RGB_singleFrame, predictions_RGB_singleFrame = \ + #singleFrame_classify_video(RGB_frames, RGB_singleFrame_net, transformer_RGB, False) +#del RGB_singleFrame_net + +#flow_singleFrame_net = caffe.Net(singleFrame_model, flow_singleFrame, caffe.TEST) +#class_flow_singleFrame, predictions_flow_singleFrame = \ + #singleFrame_classify_video(flow_frames, flow_singleFrame_net, transformer_flow, True) +#del flow_singleFrame_net + +RGB_lstm_net = caffe.Net(lstm_model, RGB_lstm, caffe.TEST) +class_RGB_LRCN, predictions_RGB_LRCN = \ + LRCN_classify_video(RGB_frames, RGB_lstm_net, transformer_RGB) +del RGB_lstm_net + +#flow_lstm_net = caffe.Net(lstm_model, flow_lstm, caffe.TEST) +#class_flow_LRCN, predictions_flow_LRCN = \ + #LRCN_classify_video(flow_frames, flow_lstm_net, transformer_flow, True) +#del flow_lstm_net + +#def compute_fusion(RGB_pred, flow_pred, p): + #return np.argmax(p*np.mean(RGB_pred,0) + (1-p)*np.mean(flow_pred,0)) + +#Load activity label hash +action_hash = pickle.load(open('action_hash_rev.p','rb')) + +#print "RGB single frame model classified video as: %s.\n" %(action_hash[class_RGB_singleFrame]) +#print "Flow single frame model classified video as: %s.\n" %(action_hash[class_flow_singleFrame]) +print "RGB LRCN model classified video as: %s.\n" %(action_hash[class_RGB_LRCN]) +#print "Flow LRCN frame model classified video as: %s.\n" %(action_hash[class_flow_LRCN]) +#print "0.5/0.5 single frame fusion model classified video as: %s. \n" %(action_hash[compute_fusion(predictions_RGB_singleFrame, predictions_flow_singleFrame, 0.5)]) +#print "0.33/0.67 single frame fusion model classified video as: %s. \n" %(action_hash[compute_fusion(predictions_RGB_singleFrame, predictions_flow_singleFrame, 0.33)]) +#print "0.5/0.5 LRCN fusion model classified video as: %s. \n" %(action_hash[compute_fusion(predictions_RGB_LRCN, predictions_flow_LRCN, 0.5)]) +#print "0.33/0.67 LRCN fusion model classified video as: %s. \n" %(action_hash[compute_fusion(predictions_RGB_LRCN, predictions_flow_LRCN, 0.33)]) + + + + + + diff --git a/examples/LRCN_activity_recognition/deploy_lstm.prototxt b/examples/LRCN_activity_recognition/deploy_lstm.prototxt new file mode 100644 index 00000000000..39b54ab7501 --- /dev/null +++ b/examples/LRCN_activity_recognition/deploy_lstm.prototxt @@ -0,0 +1,302 @@ +name: "Hyb2Net-LSTM" +input: "data" +input_dim: 160 +input_dim: 3 +input_dim: 227 +input_dim: 227 +input: "clip_markers" +input_dim: 160 +input_dim: 1 +input_dim: 1 +input_dim: 1 + +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + convolution_param { + num_output: 96 + kernel_size: 7 + stride: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm1" + type: "LRN" + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "norm1" + top: "conv2" + convolution_param { + num_output: 384 + kernel_size: 5 + group: 2 + stride: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm2" + type: "LRN" + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "norm2" + top: "conv3" + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer{ + name: "reshape-data" + type: "Reshape" + bottom: "fc6" + top: "fc6-reshape" + reshape_param{ + shape{ + dim: 16 + dim: 10 + dim: 4096 + } + } + include: { phase: TEST} +} +layer{ + name: "reshape-cm" + type: "Reshape" + bottom: "clip_markers" + top: "reshape-cm" + reshape_param{ + shape{ + dim: 16 + dim: 10 + } + } + include: { phase: TEST } +} +layer { + name: "lstm1" + type: "LSTM" + bottom: "fc6-reshape" + bottom: "reshape-cm" + top: "lstm1" + recurrent_param { + num_output: 256 + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc8-final" + type: "InnerProduct" + bottom: "lstm1" + top: "fc8-final" + param { + lr_mult: 10 + decay_mult: 1 + } + param { + lr_mult: 20 + decay_mult: 0 + } + inner_product_param { + num_output: 101 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + axis: 2 + } +} +layer { + name: "probs" + type: "Softmax" + bottom: "fc8-final" + top: "probs" + softmax_param { + axis: 2 + } +} diff --git a/examples/LRCN_activity_recognition/extract_frames.sh b/examples/LRCN_activity_recognition/extract_frames.sh new file mode 100755 index 00000000000..a366e016483 --- /dev/null +++ b/examples/LRCN_activity_recognition/extract_frames.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +EXPECTED_ARGS=2 +E_BADARGS=65 + +if [ $# -lt $EXPECTED_ARGS ] +then + echo "Usage: `basename $0` video frames/sec [size=256]" + exit $E_BADARGS +fi + +NAME=${1%.*} +FRAMES=$2 +BNAME=`basename $NAME` +echo $BNAME +mkdir -m 755 $BNAME + +ffmpeg -i $1 -r $FRAMES $BNAME/$BNAME.%4d.jpg diff --git a/examples/LRCN_activity_recognition/lstm_solver_RGB.prototxt b/examples/LRCN_activity_recognition/lstm_solver_RGB.prototxt new file mode 100644 index 00000000000..2deca526fa8 --- /dev/null +++ b/examples/LRCN_activity_recognition/lstm_solver_RGB.prototxt @@ -0,0 +1,19 @@ +net: "train_test_lstm_RGB.prototxt" +test_iter: 100 +test_state: { stage: 'test-on-test' } +test_interval: 100 +base_lr: 0.001 +lr_policy: "step" +gamma: 0.1 +stepsize: 10000 +display: 20 +max_iter: 30000 +momentum: 0.9 +weight_decay: 0.005 +snapshot: 5000 +snapshot_prefix: "snapshots_lstm_RGB" +solver_mode: CPU +device_id: 0 +random_seed: 1701 +average_loss: 1000 +clip_gradients: 5 diff --git a/examples/LRCN_activity_recognition/run_lstm_RGB.sh b/examples/LRCN_activity_recognition/run_lstm_RGB.sh new file mode 100755 index 00000000000..7aa94b47f5c --- /dev/null +++ b/examples/LRCN_activity_recognition/run_lstm_RGB.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +TOOLS=../../build/tools + +export HDF5_DISABLE_VERSION_CHECK=1 +export PYTHONPATH=. + +GLOG_logtostderr=1 $TOOLS/caffe train -solver lstm_solver_RGB.prototxt -weights single_frame_all_layers_hyb_RGB_iter_5000.caffemodel +echo "Done." diff --git a/examples/LRCN_activity_recognition/run_singleFrame_RGB.sh b/examples/LRCN_activity_recognition/run_singleFrame_RGB.sh new file mode 100755 index 00000000000..c3ed9c0c1e4 --- /dev/null +++ b/examples/LRCN_activity_recognition/run_singleFrame_RGB.sh @@ -0,0 +1,5 @@ +#!/bin/sh +TOOLS=../../build/tools + +GLOG_logtostderr=1 $TOOLS/caffe train -solver singleFrame_solver_RGB.prototxt -weights caffe_imagenet_hyb2_wr_rc_solver_sqrt_iter_310000 +echo 'Done.' diff --git a/examples/LRCN_activity_recognition/sequence_input_layer.py b/examples/LRCN_activity_recognition/sequence_input_layer.py new file mode 100755 index 00000000000..bea60a0590d --- /dev/null +++ b/examples/LRCN_activity_recognition/sequence_input_layer.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python + +#Data layer for video. Change flow_frames and RGB_frames to be the path to the flow and RGB frames. + +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import sys +sys.path.append('../../python') +import caffe +import io +from PIL import Image +import numpy as np +import scipy.misc +import time +import pdb +import glob +import pickle as pkl +import random +import h5py +from multiprocessing import Pool +from threading import Thread +import skimage.io +import copy + +flow_frames = 'flow_images/' +RGB_frames = 'frames/' +test_frames = 16 +train_frames = 16 +test_buffer = 3 +train_buffer = 24 + +def processImageCrop(im_info, transformer, flow): + im_path = im_info[0] + im_crop = im_info[1] + im_reshape = im_info[2] + im_flip = im_info[3] + data_in = caffe.io.load_image(im_path) + if (data_in.shape[0] < im_reshape[0]) | (data_in.shape[1] < im_reshape[1]): + data_in = caffe.io.resize_image(data_in, im_reshape) + if im_flip: + data_in = caffe.io.flip_image(data_in, 1, flow) + data_in = data_in[im_crop[0]:im_crop[2], im_crop[1]:im_crop[3], :] + processed_image = transformer.preprocess('data_in',data_in) + return processed_image + +class ImageProcessorCrop(object): + def __init__(self, transformer, flow): + self.transformer = transformer + self.flow = flow + def __call__(self, im_info): + return processImageCrop(im_info, self.transformer, self.flow) + +class sequenceGeneratorVideo(object): + def __init__(self, buffer_size, clip_length, num_videos, video_dict, video_order): + self.buffer_size = buffer_size + self.clip_length = clip_length + self.N = self.buffer_size*self.clip_length + self.num_videos = num_videos + self.video_dict = video_dict + self.video_order = video_order + self.idx = 0 + + def __call__(self): + label_r = [] + im_paths = [] + im_crop = [] + im_reshape = [] + im_flip = [] + + if self.idx + self.buffer_size >= self.num_videos: + idx_list = range(self.idx, self.num_videos) + idx_list.extend(range(0, self.buffer_size-(self.num_videos-self.idx))) + else: + idx_list = range(self.idx, self.idx+self.buffer_size) + + + for i in idx_list: + key = self.video_order[i] + label = self.video_dict[key]['label'] + video_reshape = self.video_dict[key]['reshape'] + video_crop = self.video_dict[key]['crop'] + label_r.extend([label]*self.clip_length) + + im_reshape.extend([(video_reshape)]*self.clip_length) + r0 = int(random.random()*(video_reshape[0] - video_crop[0])) + r1 = int(random.random()*(video_reshape[1] - video_crop[1])) + im_crop.extend([(r0, r1, r0+video_crop[0], r1+video_crop[1])]*self.clip_length) + f = random.randint(0,1) + im_flip.extend([f]*self.clip_length) + rand_frame = int(random.random()*(self.video_dict[key]['num_frames']-self.clip_length)+1+1) + frames = [] + + for i in range(rand_frame,rand_frame+self.clip_length): + frames.append(self.video_dict[key]['frames'] %i) + + im_paths.extend(frames) + + + im_info = zip(im_paths,im_crop, im_reshape, im_flip) + + self.idx += self.buffer_size + if self.idx >= self.num_videos: + self.idx = self.idx - self.num_videos + + return label_r, im_info + +def advance_batch(result, sequence_generator, image_processor, pool): + + label_r, im_info = sequence_generator() + tmp = image_processor(im_info[0]) + result['data'] = pool.map(image_processor, im_info) + result['label'] = label_r + cm = np.ones(len(label_r)) + cm[0::16] = 0 + result['clip_markers'] = cm + +class BatchAdvancer(): + def __init__(self, result, sequence_generator, image_processor, pool): + self.result = result + self.sequence_generator = sequence_generator + self.image_processor = image_processor + self.pool = pool + + def __call__(self): + return advance_batch(self.result, self.sequence_generator, self.image_processor, self.pool) + +class videoRead(caffe.Layer): + + def initialize(self): + self.train_or_test = 'test' + self.flow = False + self.buffer_size = test_buffer #num videos processed per batch + self.frames = test_frames #length of processed clip + self.N = self.buffer_size*self.frames + self.idx = 0 + self.channels = 3 + self.height = 227 + self.width = 227 + self.path_to_images = RGB_frames + self.video_list = 'ucf101_split1_testVideos.txt' + + def setup(self, bottom, top): + random.seed(10) + self.initialize() + f = open(self.video_list, 'r') + f_lines = f.readlines() + f.close() + + video_dict = {} + current_line = 0 + self.video_order = [] + for ix, line in enumerate(f_lines): + video = line.split(' ')[0].split('/')[1] + l = int(line.split(' ')[1]) + frames = glob.glob('%s%s/*.jpg' %(self.path_to_images, video)) + num_frames = len(frames) + video_dict[video] = {} + video_dict[video]['frames'] = frames[0].split('.')[0] + '.%04d.jpg' + video_dict[video]['reshape'] = (240,320) + video_dict[video]['crop'] = (227, 227) + video_dict[video]['num_frames'] = num_frames + video_dict[video]['label'] = l + self.video_order.append(video) + + self.video_dict = video_dict + self.num_videos = len(video_dict.keys()) + + #set up data transformer + shape = (self.N, self.channels, self.height, self.width) + + self.transformer = caffe.io.Transformer({'data_in': shape}) + self.transformer.set_raw_scale('data_in', 255) + if self.flow: + image_mean = [128, 128, 128] +# TODO: No flow support in transformer currently +# self.transformer.set_is_flow('data_in', True) + else: + image_mean = [103.939, 116.779, 128.68] +# TODO: No flow support in transformer currently +# self.transformer.set_is_flow('data_in', False) + channel_mean = np.zeros((3,227,227)) + for channel_index, mean_val in enumerate(image_mean): + channel_mean[channel_index, ...] = mean_val + self.transformer.set_mean('data_in', channel_mean) + self.transformer.set_channel_swap('data_in', (2, 1, 0)) + self.transformer.set_transpose('data_in', (2, 0, 1)) + + self.thread_result = {} + self.thread = None + pool_size = 24 + + self.image_processor = ImageProcessorCrop(self.transformer, self.flow) + self.sequence_generator = sequenceGeneratorVideo(self.buffer_size, self.frames, self.num_videos, self.video_dict, self.video_order) + + self.pool = Pool(processes=pool_size) + self.batch_advancer = BatchAdvancer(self.thread_result, self.sequence_generator, self.image_processor, self.pool) + self.dispatch_worker() + self.top_names = ['data', 'label','clip_markers'] + print 'Outputs:', self.top_names + if len(top) != len(self.top_names): + raise Exception('Incorrect number of outputs (expected %d, got %d)' % + (len(self.top_names), len(top))) + self.join_worker() + for top_index, name in enumerate(self.top_names): + if name == 'data': + shape = (self.N, self.channels, self.height, self.width) + elif name == 'label': + shape = (self.N,) + elif name == 'clip_markers': + shape = (self.N,) + top[top_index].reshape(*shape) + + def reshape(self, bottom, top): + pass + + def forward(self, bottom, top): + + if self.thread is not None: + self.join_worker() + + #rearrange the data: The LSTM takes inputs as [video0_frame0, video1_frame0,...] but the data is currently arranged as [video0_frame0, video0_frame1, ...] + new_result_data = [None]*len(self.thread_result['data']) + new_result_label = [None]*len(self.thread_result['label']) + new_result_cm = [None]*len(self.thread_result['clip_markers']) + for i in range(self.frames): + for ii in range(self.buffer_size): + old_idx = ii*self.frames + i + new_idx = i*self.buffer_size + ii + new_result_data[new_idx] = self.thread_result['data'][old_idx] + new_result_label[new_idx] = self.thread_result['label'][old_idx] + new_result_cm[new_idx] = self.thread_result['clip_markers'][old_idx] + + for top_index, name in zip(range(len(top)), self.top_names): + if name == 'data': + for i in range(self.N): + top[top_index].data[i, ...] = new_result_data[i] + elif name == 'label': + top[top_index].data[...] = new_result_label + elif name == 'clip_markers': + top[top_index].data[...] = new_result_cm + + self.dispatch_worker() + + def dispatch_worker(self): + assert self.thread is None + self.thread = Thread(target=self.batch_advancer) + self.thread.start() + + def join_worker(self): + assert self.thread is not None + self.thread.join() + self.thread = None + + def backward(self, top, propagate_down, bottom): + pass + +class videoReadTrain_flow(videoRead): + + def initialize(self): + self.train_or_test = 'train' + self.flow = True + self.buffer_size = train_buffer #num videos processed per batch + self.frames = train_frames #length of processed clip + self.N = self.buffer_size*self.frames + self.idx = 0 + self.channels = 3 + self.height = 227 + self.width = 227 + self.path_to_images = flow_frames + self.video_list = 'ucf101_split1_trainVideos.txt' + +class videoReadTest_flow(videoRead): + + def initialize(self): + self.train_or_test = 'test' + self.flow = True + self.buffer_size = test_buffer #num videos processed per batch + self.frames = test_frames #length of processed clip + self.N = self.buffer_size*self.frames + self.idx = 0 + self.channels = 3 + self.height = 227 + self.width = 227 + self.path_to_images = flow_frames + self.video_list = 'ucf101_split1_testVideos.txt' + +class videoReadTrain_RGB(videoRead): + + def initialize(self): + self.train_or_test = 'train' + self.flow = False + self.buffer_size = train_buffer #num videos processed per batch + self.frames = train_frames #length of processed clip + self.N = self.buffer_size*self.frames + self.idx = 0 + self.channels = 3 + self.height = 227 + self.width = 227 + self.path_to_images = RGB_frames + self.video_list = 'ucf101_split1_trainVideos.txt' + +class videoReadTest_RGB(videoRead): + + def initialize(self): + self.train_or_test = 'test' + self.flow = False + self.buffer_size = test_buffer #num videos processed per batch + self.frames = test_frames #length of processed clip + self.N = self.buffer_size*self.frames + self.idx = 0 + self.channels = 3 + self.height = 227 + self.width = 227 + self.path_to_images = RGB_frames + self.video_list = 'ucf101_split1_testVideos.txt' diff --git a/examples/LRCN_activity_recognition/singleFrame_solver_RGB.prototxt b/examples/LRCN_activity_recognition/singleFrame_solver_RGB.prototxt new file mode 100644 index 00000000000..0b70b227926 --- /dev/null +++ b/examples/LRCN_activity_recognition/singleFrame_solver_RGB.prototxt @@ -0,0 +1,17 @@ +net: "train_test_singleFrame_RGB.prototxt" +test_iter: 75 +test_state: { stage: 'test-on-test' } +test_interval: 100 +base_lr: 0.001 +lr_policy: "step" +gamma: 0.1 +stepsize: 3000 +display: 20 +max_iter: 5000 +momentum: 0.9 +weight_decay: 0.005 +snapshot: 5000 +snapshot_prefix: "snapshots_singleFrame_RGB" +solver_mode: GPU +device_id: 0 +random_seed: 1701 diff --git a/examples/LRCN_activity_recognition/train_test_lstm_RGB.prototxt b/examples/LRCN_activity_recognition/train_test_lstm_RGB.prototxt new file mode 100644 index 00000000000..760157487e1 --- /dev/null +++ b/examples/LRCN_activity_recognition/train_test_lstm_RGB.prototxt @@ -0,0 +1,443 @@ +name: "lstm_joints" +layer { + name: "data" + type: "Python" + top: "data" + top: "label" + top: "clip_markers" + python_param { + module: "sequence_input_layer" + layer: "videoReadTrain_RGB" + } + include: { phase: TRAIN } +} + +layer { + name: "data" + type: "Python" + top: "data" + top: "label" + top: "clip_markers" + python_param { + module: "sequence_input_layer" + layer: "videoReadTest_RGB" + } + include: { phase: TEST stage: "test-on-test" } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 7 + stride: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm1" + type: "LRN" + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "norm1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + kernel_size: 5 + group: 2 + stride: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm2" + type: "LRN" + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "norm2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.9 + } +} +layer{ + name: "reshape-data" + type: "Reshape" + bottom: "fc6" + top: "fc6-reshape" + reshape_param{ + shape{ + dim: 16 + dim: 24 + dim: 4096 + } + } + include: { phase: TRAIN } +} +layer{ + name: "reshape-data" + type: "Reshape" + bottom: "fc6" + top: "fc6-reshape" + reshape_param{ + shape{ + dim: 16 + dim: 3 + dim: 4096 + } + } + include: { phase: TEST stage: "test-on-test" } +} +layer{ + name: "reshape-label" + type: "Reshape" + bottom: "label" + top: "reshape-label" + reshape_param{ + shape{ + dim: 16 + dim: 24 + } + } + include: { phase: TRAIN } +} +layer{ + name: "reshape-label" + type: "Reshape" + bottom: "label" + top: "reshape-label" + reshape_param{ + shape{ + dim: 16 + dim: 3 + } + } + include: { phase: TEST stage: "test-on-test" } +} +layer{ + name: "reshape-cm" + type: "Reshape" + bottom: "clip_markers" + top: "reshape-cm" + reshape_param{ + shape{ + dim: 16 + dim: 24 + } + } + include: { phase: TRAIN } +} +layer{ + name: "reshape-cm" + type: "Reshape" + bottom: "clip_markers" + top: "reshape-cm" + reshape_param{ + shape{ + dim: 16 + dim: 3 + } + } + include: { phase: TEST stage: "test-on-test" } +} +layer { + name: "lstm1" + type: "LSTM" + bottom: "fc6-reshape" + bottom: "reshape-cm" + top: "lstm1" + recurrent_param { + num_output: 256 + weight_filler { + type: "uniform" + min: -0.01 + max: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "lstm1-drop" + type: "Dropout" + bottom: "lstm1" + top: "lstm1-drop" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8-final" + type: "InnerProduct" + bottom: "lstm1-drop" + top: "fc8-final" + param { + lr_mult: 10 + decay_mult: 1 + } + param { + lr_mult: 20 + decay_mult: 0 + } + inner_product_param { + num_output: 101 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + axis: 2 + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8-final" + bottom: "reshape-label" + top: "loss" + softmax_param { + axis: 2 + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "fc8-final" + bottom: "reshape-label" + top: "accuracy" + accuracy_param { + axis: 2 + } +} diff --git a/examples/LRCN_activity_recognition/train_test_singleFrame_RGB.prototxt b/examples/LRCN_activity_recognition/train_test_singleFrame_RGB.prototxt new file mode 100644 index 00000000000..26e4ddc555d --- /dev/null +++ b/examples/LRCN_activity_recognition/train_test_singleFrame_RGB.prototxt @@ -0,0 +1,390 @@ +name: "singleFrame_RGB" +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 227 + mean_value: 103.939 + mean_value: 116.779 + mean_value: 123.68 + } + image_data_param { + source: "ucf101_singleFrame_RGB_train_split1.txt" + root_folder: "frames/" + batch_size: 128 + new_height: 240 + new_width: 320 + } +} +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TEST + stage: "test-on-test" + } + transform_param { + mirror: true + crop_size: 227 + mean_value: 103.939 + mean_value: 116.779 + mean_value: 123.68 + } + image_data_param { + source: "ucf101_singleFrame_RGB_test_split1.txt" + root_folder: "frames/" + batch_size: 128 + new_height: 240 + new_width: 320 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 7 + stride: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm1" + type: "LRN" + bottom: "pool1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "norm1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + kernel_size: 5 + group: 2 + stride: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "norm2" + type: "LRN" + bottom: "pool2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "norm2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc7" + type: "InnerProduct" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "drop7" + type: "Dropout" + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8-ucf" + type: "InnerProduct" + bottom: "fc7" + top: "fc8-ucf" + param { + lr_mult: 10 + decay_mult: 1 + } + param { + lr_mult: 20 + decay_mult: 0 + } + inner_product_param { + num_output: 101 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8-ucf" + bottom: "label" + top: "loss" +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "fc8-ucf" + bottom: "label" + top: "accuracy" +} diff --git a/examples/LRCN_activity_recognition/ucf101_split1_testVideos.txt b/examples/LRCN_activity_recognition/ucf101_split1_testVideos.txt new file mode 100644 index 00000000000..29bc86188c7 --- /dev/null +++ b/examples/LRCN_activity_recognition/ucf101_split1_testVideos.txt @@ -0,0 +1,3783 @@ +JugglingBalls/v_JugglingBalls_g01_c02 45 +MilitaryParade/v_MilitaryParade_g05_c03 52 +Skiing/v_Skiing_g05_c03 80 +Diving/v_Diving_g06_c03 25 +BrushingTeeth/v_BrushingTeeth_g05_c05 19 +PlayingDhol/v_PlayingDhol_g04_c06 60 +CuttingInKitchen/v_CuttingInKitchen_g06_c01 24 +JumpRope/v_JumpRope_g06_c04 47 +Lunges/v_Lunges_g03_c02 51 +ThrowDiscus/v_ThrowDiscus_g03_c02 92 +Lunges/v_Lunges_g04_c01 51 +HandstandPushups/v_HandStandPushups_g02_c01 36 +JugglingBalls/v_JugglingBalls_g03_c07 45 +WallPushups/v_WallPushups_g06_c07 98 +Haircut/v_Haircut_g04_c05 33 +BaseballPitch/v_BaseballPitch_g03_c05 6 +HammerThrow/v_HammerThrow_g06_c04 35 +FrisbeeCatch/v_FrisbeeCatch_g03_c02 30 +SkateBoarding/v_SkateBoarding_g05_c02 79 +BalanceBeam/v_BalanceBeam_g03_c01 4 +Archery/v_Archery_g04_c05 2 +ShavingBeard/v_ShavingBeard_g02_c04 77 +SkateBoarding/v_SkateBoarding_g06_c03 79 +BaseballPitch/v_BaseballPitch_g03_c04 6 +HammerThrow/v_HammerThrow_g04_c06 35 +Typing/v_Typing_g01_c01 94 +BabyCrawling/v_BabyCrawling_g07_c03 3 +SumoWrestling/v_SumoWrestling_g02_c04 86 +ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c04 0 +Swing/v_Swing_g03_c01 88 +Mixing/v_Mixing_g06_c01 53 +WallPushups/v_WallPushups_g06_c04 98 +HammerThrow/v_HammerThrow_g04_c04 35 +Biking/v_Biking_g07_c06 10 +Skijet/v_Skijet_g01_c03 81 +GolfSwing/v_GolfSwing_g05_c02 32 +Nunchucks/v_Nunchucks_g07_c03 55 +Nunchucks/v_Nunchucks_g03_c06 55 +ApplyLipstick/v_ApplyLipstick_g05_c02 1 +RockClimbingIndoor/v_RockClimbingIndoor_g07_c03 73 +ApplyEyeMakeup/v_ApplyEyeMakeup_g03_c02 0 +Swing/v_Swing_g05_c02 88 +Skijet/v_Skijet_g02_c02 81 +HorseRiding/v_HorseRiding_g02_c03 41 +BlowingCandles/v_BlowingCandles_g05_c03 13 +Skijet/v_Skijet_g01_c01 81 +Shotput/v_Shotput_g02_c07 78 +RockClimbingIndoor/v_RockClimbingIndoor_g02_c02 73 +CricketShot/v_CricketShot_g06_c05 23 +ApplyEyeMakeup/v_ApplyEyeMakeup_g03_c06 0 +Swing/v_Swing_g07_c05 88 +Skiing/v_Skiing_g02_c03 80 +BalanceBeam/v_BalanceBeam_g07_c03 4 +YoYo/v_YoYo_g05_c01 100 +Lunges/v_Lunges_g06_c03 51 +HandstandPushups/v_HandStandPushups_g06_c02 36 +CricketShot/v_CricketShot_g03_c03 23 +WallPushups/v_WallPushups_g03_c04 98 +JumpingJack/v_JumpingJack_g02_c03 46 +BoxingSpeedBag/v_BoxingSpeedBag_g04_c02 17 +TableTennisShot/v_TableTennisShot_g01_c02 89 +TaiChi/v_TaiChi_g05_c03 90 +HorseRiding/v_HorseRiding_g02_c07 41 +HandstandWalking/v_HandstandWalking_g02_c03 37 +Typing/v_Typing_g04_c04 94 +JavelinThrow/v_JavelinThrow_g02_c03 44 +HorseRiding/v_HorseRiding_g07_c02 41 +Typing/v_Typing_g07_c06 94 +SoccerJuggling/v_SoccerJuggling_g01_c02 83 +WalkingWithDog/v_WalkingWithDog_g04_c01 97 +PlayingDhol/v_PlayingDhol_g01_c02 60 +Skiing/v_Skiing_g01_c04 80 +PlayingDhol/v_PlayingDhol_g07_c06 60 +Drumming/v_Drumming_g03_c02 26 +PlayingFlute/v_PlayingFlute_g02_c02 61 +SoccerPenalty/v_SoccerPenalty_g06_c03 84 +BaseballPitch/v_BaseballPitch_g03_c01 6 +PizzaTossing/v_PizzaTossing_g07_c01 57 +Archery/v_Archery_g07_c04 2 +SkateBoarding/v_SkateBoarding_g04_c05 79 +ShavingBeard/v_ShavingBeard_g04_c04 77 +JavelinThrow/v_JavelinThrow_g05_c02 44 +Archery/v_Archery_g02_c07 2 +Typing/v_Typing_g03_c01 94 +WalkingWithDog/v_WalkingWithDog_g07_c01 97 +Nunchucks/v_Nunchucks_g01_c01 55 +HorseRiding/v_HorseRiding_g03_c07 41 +Drumming/v_Drumming_g06_c04 26 +PushUps/v_PushUps_g01_c01 71 +FloorGymnastics/v_FloorGymnastics_g03_c04 29 +PlayingSitar/v_PlayingSitar_g03_c04 64 +PlayingDaf/v_PlayingDaf_g05_c05 59 +PlayingGuitar/v_PlayingGuitar_g01_c01 62 +PlayingDaf/v_PlayingDaf_g06_c04 59 +SalsaSpin/v_SalsaSpin_g03_c05 76 +SoccerPenalty/v_SoccerPenalty_g05_c06 84 +BreastStroke/v_BreastStroke_g02_c01 18 +ShavingBeard/v_ShavingBeard_g05_c01 77 +CleanAndJerk/v_CleanAndJerk_g01_c05 20 +BrushingTeeth/v_BrushingTeeth_g06_c01 19 +Nunchucks/v_Nunchucks_g06_c04 55 +TrampolineJumping/v_TrampolineJumping_g02_c04 93 +CricketBowling/v_CricketBowling_g03_c02 22 +Punch/v_Punch_g07_c06 70 +Biking/v_Biking_g04_c05 10 +SumoWrestling/v_SumoWrestling_g03_c01 86 +PullUps/v_PullUps_g01_c01 69 +GolfSwing/v_GolfSwing_g02_c02 32 +Skiing/v_Skiing_g07_c04 80 +PlayingFlute/v_PlayingFlute_g06_c04 61 +Drumming/v_Drumming_g02_c06 26 +PlayingGuitar/v_PlayingGuitar_g04_c02 62 +Drumming/v_Drumming_g06_c03 26 +BoxingPunchingBag/v_BoxingPunchingBag_g04_c03 16 +Billiards/v_Billiards_g03_c02 11 +FloorGymnastics/v_FloorGymnastics_g01_c02 29 +BaseballPitch/v_BaseballPitch_g03_c02 6 +WallPushups/v_WallPushups_g05_c04 98 +CleanAndJerk/v_CleanAndJerk_g04_c04 20 +HammerThrow/v_HammerThrow_g04_c03 35 +WallPushups/v_WallPushups_g07_c04 98 +SoccerPenalty/v_SoccerPenalty_g04_c04 84 +RopeClimbing/v_RopeClimbing_g07_c03 74 +BaseballPitch/v_BaseballPitch_g06_c07 6 +MoppingFloor/v_MoppingFloor_g01_c02 54 +BodyWeightSquats/v_BodyWeightSquats_g02_c03 14 +Lunges/v_Lunges_g02_c02 51 +HighJump/v_HighJump_g01_c04 39 +Diving/v_Diving_g05_c02 25 +Skijet/v_Skijet_g05_c04 81 +SkyDiving/v_SkyDiving_g07_c04 82 +WritingOnBoard/v_WritingOnBoard_g03_c04 99 +Mixing/v_Mixing_g07_c01 53 +Archery/v_Archery_g02_c06 2 +WalkingWithDog/v_WalkingWithDog_g01_c01 97 +BandMarching/v_BandMarching_g03_c03 5 +TableTennisShot/v_TableTennisShot_g07_c02 89 +BodyWeightSquats/v_BodyWeightSquats_g03_c01 14 +BasketballDunk/v_BasketballDunk_g02_c02 8 +BaseballPitch/v_BaseballPitch_g07_c07 6 +HighJump/v_HighJump_g07_c05 39 +RockClimbingIndoor/v_RockClimbingIndoor_g05_c01 73 +HorseRace/v_HorseRace_g05_c03 40 +PlayingDaf/v_PlayingDaf_g06_c06 59 +JugglingBalls/v_JugglingBalls_g03_c03 45 +PlayingPiano/v_PlayingPiano_g01_c03 63 +BlowingCandles/v_BlowingCandles_g06_c06 13 +Skiing/v_Skiing_g03_c07 80 +BlowingCandles/v_BlowingCandles_g01_c02 13 +PommelHorse/v_PommelHorse_g04_c01 68 +HandstandPushups/v_HandStandPushups_g04_c04 36 +Rowing/v_Rowing_g07_c03 75 +Haircut/v_Haircut_g07_c01 33 +TennisSwing/v_TennisSwing_g02_c06 91 +PlayingGuitar/v_PlayingGuitar_g02_c04 62 +PlayingDaf/v_PlayingDaf_g01_c04 59 +JumpingJack/v_JumpingJack_g07_c02 46 +PizzaTossing/v_PizzaTossing_g02_c02 57 +BenchPress/v_BenchPress_g02_c05 9 +Kayaking/v_Kayaking_g03_c04 48 +SumoWrestling/v_SumoWrestling_g06_c07 86 +Knitting/v_Knitting_g07_c02 49 +PlayingSitar/v_PlayingSitar_g05_c06 64 +BabyCrawling/v_BabyCrawling_g03_c04 3 +Rafting/v_Rafting_g03_c04 72 +RockClimbingIndoor/v_RockClimbingIndoor_g06_c05 73 +Nunchucks/v_Nunchucks_g03_c07 55 +SkateBoarding/v_SkateBoarding_g03_c04 79 +Surfing/v_Surfing_g02_c04 87 +Shotput/v_Shotput_g06_c02 78 +CliffDiving/v_CliffDiving_g05_c01 21 +PlayingTabla/v_PlayingTabla_g02_c04 65 +CleanAndJerk/v_CleanAndJerk_g03_c04 20 +MilitaryParade/v_MilitaryParade_g01_c01 52 +Kayaking/v_Kayaking_g04_c06 48 +PoleVault/v_PoleVault_g02_c03 67 +Fencing/v_Fencing_g04_c05 27 +Diving/v_Diving_g07_c01 25 +UnevenBars/v_UnevenBars_g02_c01 95 +Shotput/v_Shotput_g02_c05 78 +VolleyballSpiking/v_VolleyballSpiking_g06_c01 96 +Rowing/v_Rowing_g02_c06 75 +Billiards/v_Billiards_g01_c02 11 +FieldHockeyPenalty/v_FieldHockeyPenalty_g01_c05 28 +SoccerJuggling/v_SoccerJuggling_g07_c01 83 +BasketballDunk/v_BasketballDunk_g03_c06 8 +LongJump/v_LongJump_g03_c04 50 +Swing/v_Swing_g06_c03 88 +PullUps/v_PullUps_g06_c01 69 +Biking/v_Biking_g06_c01 10 +ApplyLipstick/v_ApplyLipstick_g05_c01 1 +SalsaSpin/v_SalsaSpin_g04_c06 76 +PizzaTossing/v_PizzaTossing_g02_c05 57 +SalsaSpin/v_SalsaSpin_g01_c02 76 +BandMarching/v_BandMarching_g03_c04 5 +BodyWeightSquats/v_BodyWeightSquats_g06_c01 14 +BoxingPunchingBag/v_BoxingPunchingBag_g01_c06 16 +Fencing/v_Fencing_g05_c04 27 +BlowDryHair/v_BlowDryHair_g04_c03 12 +ShavingBeard/v_ShavingBeard_g06_c04 77 +Skiing/v_Skiing_g01_c06 80 +Billiards/v_Billiards_g04_c03 11 +PlayingSitar/v_PlayingSitar_g04_c04 64 +FrisbeeCatch/v_FrisbeeCatch_g01_c06 30 +UnevenBars/v_UnevenBars_g01_c02 95 +BoxingSpeedBag/v_BoxingSpeedBag_g04_c01 17 +HorseRiding/v_HorseRiding_g07_c07 41 +TennisSwing/v_TennisSwing_g05_c02 91 +BalanceBeam/v_BalanceBeam_g07_c04 4 +Mixing/v_Mixing_g03_c06 53 +GolfSwing/v_GolfSwing_g05_c04 32 +TennisSwing/v_TennisSwing_g04_c02 91 +JavelinThrow/v_JavelinThrow_g06_c03 44 +BlowDryHair/v_BlowDryHair_g02_c03 12 +UnevenBars/v_UnevenBars_g05_c03 95 +CricketBowling/v_CricketBowling_g03_c03 22 +CleanAndJerk/v_CleanAndJerk_g02_c02 20 +Nunchucks/v_Nunchucks_g04_c06 55 +HandstandWalking/v_HandstandWalking_g01_c04 37 +RopeClimbing/v_RopeClimbing_g06_c04 74 +YoYo/v_YoYo_g02_c05 100 +Kayaking/v_Kayaking_g01_c06 48 +Mixing/v_Mixing_g04_c06 53 +BenchPress/v_BenchPress_g05_c05 9 +Drumming/v_Drumming_g07_c03 26 +SoccerJuggling/v_SoccerJuggling_g04_c01 83 +PlayingCello/v_PlayingCello_g05_c05 58 +Archery/v_Archery_g06_c06 2 +BabyCrawling/v_BabyCrawling_g06_c05 3 +Typing/v_Typing_g01_c02 94 +PlayingCello/v_PlayingCello_g07_c03 58 +JumpRope/v_JumpRope_g02_c06 47 +ThrowDiscus/v_ThrowDiscus_g02_c01 92 +Typing/v_Typing_g01_c05 94 +ShavingBeard/v_ShavingBeard_g03_c06 77 +PlayingDaf/v_PlayingDaf_g07_c03 59 +HorseRiding/v_HorseRiding_g05_c07 41 +Mixing/v_Mixing_g04_c07 53 +PoleVault/v_PoleVault_g04_c04 67 +Basketball/v_Basketball_g02_c06 7 +LongJump/v_LongJump_g04_c06 50 +PoleVault/v_PoleVault_g04_c07 67 +Fencing/v_Fencing_g03_c02 27 +PlayingSitar/v_PlayingSitar_g05_c02 64 +WalkingWithDog/v_WalkingWithDog_g02_c06 97 +BandMarching/v_BandMarching_g05_c02 5 +PlayingDaf/v_PlayingDaf_g06_c01 59 +JumpingJack/v_JumpingJack_g06_c06 46 +JumpingJack/v_JumpingJack_g03_c03 46 +JugglingBalls/v_JugglingBalls_g06_c03 45 +Mixing/v_Mixing_g06_c04 53 +Typing/v_Typing_g07_c04 94 +RopeClimbing/v_RopeClimbing_g02_c01 74 +FrontCrawl/v_FrontCrawl_g03_c04 31 +PlayingSitar/v_PlayingSitar_g06_c05 64 +Diving/v_Diving_g01_c07 25 +ShavingBeard/v_ShavingBeard_g01_c04 77 +BasketballDunk/v_BasketballDunk_g02_c01 8 +PushUps/v_PushUps_g06_c02 71 +MilitaryParade/v_MilitaryParade_g02_c02 52 +Rowing/v_Rowing_g06_c01 75 +TrampolineJumping/v_TrampolineJumping_g02_c02 93 +HorseRace/v_HorseRace_g07_c06 40 +Basketball/v_Basketball_g05_c02 7 +Punch/v_Punch_g02_c01 70 +Hammering/v_Hammering_g02_c04 34 +SalsaSpin/v_SalsaSpin_g03_c01 76 +LongJump/v_LongJump_g07_c01 50 +PlayingFlute/v_PlayingFlute_g02_c07 61 +ThrowDiscus/v_ThrowDiscus_g06_c05 92 +PizzaTossing/v_PizzaTossing_g04_c03 57 +Bowling/v_Bowling_g03_c04 15 +SoccerPenalty/v_SoccerPenalty_g03_c01 84 +Mixing/v_Mixing_g01_c01 53 +CleanAndJerk/v_CleanAndJerk_g04_c02 20 +Rafting/v_Rafting_g06_c03 72 +PlayingGuitar/v_PlayingGuitar_g07_c03 62 +GolfSwing/v_GolfSwing_g06_c01 32 +Punch/v_Punch_g07_c07 70 +JumpRope/v_JumpRope_g05_c01 47 +Rowing/v_Rowing_g02_c02 75 +LongJump/v_LongJump_g05_c03 50 +LongJump/v_LongJump_g05_c04 50 +Basketball/v_Basketball_g02_c02 7 +GolfSwing/v_GolfSwing_g06_c04 32 +GolfSwing/v_GolfSwing_g05_c07 32 +HorseRiding/v_HorseRiding_g03_c01 41 +JumpingJack/v_JumpingJack_g07_c03 46 +PoleVault/v_PoleVault_g03_c03 67 +CuttingInKitchen/v_CuttingInKitchen_g02_c02 24 +PlayingCello/v_PlayingCello_g05_c07 58 +WritingOnBoard/v_WritingOnBoard_g05_c02 99 +BenchPress/v_BenchPress_g05_c01 9 +JugglingBalls/v_JugglingBalls_g07_c06 45 +Drumming/v_Drumming_g01_c05 26 +PlayingDhol/v_PlayingDhol_g01_c01 60 +FieldHockeyPenalty/v_FieldHockeyPenalty_g03_c03 28 +CliffDiving/v_CliffDiving_g03_c01 21 +SoccerJuggling/v_SoccerJuggling_g01_c03 83 +SkateBoarding/v_SkateBoarding_g07_c03 79 +Rowing/v_Rowing_g03_c03 75 +PommelHorse/v_PommelHorse_g03_c01 68 +Hammering/v_Hammering_g07_c01 34 +SkyDiving/v_SkyDiving_g02_c01 82 +UnevenBars/v_UnevenBars_g01_c01 95 +Drumming/v_Drumming_g04_c03 26 +BodyWeightSquats/v_BodyWeightSquats_g01_c02 14 +PlayingCello/v_PlayingCello_g06_c01 58 +BenchPress/v_BenchPress_g01_c04 9 +BenchPress/v_BenchPress_g03_c05 9 +PlayingDaf/v_PlayingDaf_g01_c02 59 +BlowDryHair/v_BlowDryHair_g04_c02 12 +PizzaTossing/v_PizzaTossing_g05_c01 57 +SalsaSpin/v_SalsaSpin_g04_c04 76 +JumpingJack/v_JumpingJack_g05_c03 46 +Knitting/v_Knitting_g03_c04 49 +PlayingGuitar/v_PlayingGuitar_g01_c06 62 +Typing/v_Typing_g03_c05 94 +TennisSwing/v_TennisSwing_g07_c02 91 +TableTennisShot/v_TableTennisShot_g01_c01 89 +TableTennisShot/v_TableTennisShot_g05_c04 89 +CricketBowling/v_CricketBowling_g01_c01 22 +JumpRope/v_JumpRope_g03_c03 47 +RopeClimbing/v_RopeClimbing_g01_c01 74 +PlayingTabla/v_PlayingTabla_g01_c04 65 +PlayingViolin/v_PlayingViolin_g02_c02 66 +BasketballDunk/v_BasketballDunk_g02_c03 8 +PommelHorse/v_PommelHorse_g07_c04 68 +Fencing/v_Fencing_g02_c03 27 +BrushingTeeth/v_BrushingTeeth_g02_c01 19 +BoxingPunchingBag/v_BoxingPunchingBag_g06_c06 16 +StillRings/v_StillRings_g04_c04 85 +BlowDryHair/v_BlowDryHair_g03_c02 12 +IceDancing/v_IceDancing_g04_c01 43 +PlayingTabla/v_PlayingTabla_g01_c03 65 +Lunges/v_Lunges_g01_c01 51 +PlayingGuitar/v_PlayingGuitar_g04_c04 62 +SalsaSpin/v_SalsaSpin_g05_c01 76 +SoccerPenalty/v_SoccerPenalty_g04_c01 84 +BoxingSpeedBag/v_BoxingSpeedBag_g02_c04 17 +HorseRiding/v_HorseRiding_g02_c01 41 +Knitting/v_Knitting_g06_c01 49 +BabyCrawling/v_BabyCrawling_g04_c01 3 +BasketballDunk/v_BasketballDunk_g06_c02 8 +ApplyEyeMakeup/v_ApplyEyeMakeup_g04_c07 0 +FieldHockeyPenalty/v_FieldHockeyPenalty_g03_c04 28 +Knitting/v_Knitting_g01_c02 49 +PlayingDaf/v_PlayingDaf_g02_c02 59 +ShavingBeard/v_ShavingBeard_g05_c05 77 +Diving/v_Diving_g06_c02 25 +Biking/v_Biking_g01_c04 10 +HulaHoop/v_HulaHoop_g06_c02 42 +HandstandWalking/v_HandstandWalking_g02_c01 37 +PlayingGuitar/v_PlayingGuitar_g07_c01 62 +BaseballPitch/v_BaseballPitch_g07_c03 6 +BoxingSpeedBag/v_BoxingSpeedBag_g01_c02 17 +HulaHoop/v_HulaHoop_g01_c04 42 +Haircut/v_Haircut_g03_c02 33 +Bowling/v_Bowling_g07_c03 15 +CricketBowling/v_CricketBowling_g01_c03 22 +PoleVault/v_PoleVault_g06_c05 67 +MoppingFloor/v_MoppingFloor_g06_c01 54 +Lunges/v_Lunges_g05_c02 51 +Kayaking/v_Kayaking_g04_c04 48 +FloorGymnastics/v_FloorGymnastics_g05_c03 29 +BoxingSpeedBag/v_BoxingSpeedBag_g02_c02 17 +ShavingBeard/v_ShavingBeard_g06_c03 77 +Rowing/v_Rowing_g05_c02 75 +PlayingDhol/v_PlayingDhol_g01_c04 60 +JumpingJack/v_JumpingJack_g01_c06 46 +Punch/v_Punch_g07_c03 70 +YoYo/v_YoYo_g01_c04 100 +Typing/v_Typing_g02_c03 94 +PlayingPiano/v_PlayingPiano_g07_c03 63 +CliffDiving/v_CliffDiving_g04_c02 21 +SkateBoarding/v_SkateBoarding_g07_c01 79 +RopeClimbing/v_RopeClimbing_g03_c02 74 +SkateBoarding/v_SkateBoarding_g07_c02 79 +JavelinThrow/v_JavelinThrow_g04_c03 44 +HammerThrow/v_HammerThrow_g02_c05 35 +HorseRiding/v_HorseRiding_g07_c05 41 +CleanAndJerk/v_CleanAndJerk_g07_c03 20 +PlayingGuitar/v_PlayingGuitar_g04_c03 62 +HammerThrow/v_HammerThrow_g05_c05 35 +PlayingCello/v_PlayingCello_g04_c02 58 +SkyDiving/v_SkyDiving_g02_c02 82 +Swing/v_Swing_g04_c02 88 +Skiing/v_Skiing_g03_c02 80 +BoxingSpeedBag/v_BoxingSpeedBag_g04_c04 17 +Skiing/v_Skiing_g04_c05 80 +HeadMassage/v_HeadMassage_g07_c04 38 +YoYo/v_YoYo_g04_c01 100 +RockClimbingIndoor/v_RockClimbingIndoor_g01_c03 73 +Shotput/v_Shotput_g01_c07 78 +SumoWrestling/v_SumoWrestling_g07_c07 86 +ApplyLipstick/v_ApplyLipstick_g03_c03 1 +Diving/v_Diving_g03_c05 25 +Hammering/v_Hammering_g07_c04 34 +BenchPress/v_BenchPress_g06_c07 9 +SoccerPenalty/v_SoccerPenalty_g05_c07 84 +HammerThrow/v_HammerThrow_g06_c02 35 +HeadMassage/v_HeadMassage_g05_c01 38 +CricketBowling/v_CricketBowling_g04_c03 22 +BalanceBeam/v_BalanceBeam_g07_c01 4 +BasketballDunk/v_BasketballDunk_g06_c04 8 +JumpingJack/v_JumpingJack_g01_c01 46 +BlowingCandles/v_BlowingCandles_g07_c01 13 +Bowling/v_Bowling_g07_c01 15 +YoYo/v_YoYo_g05_c05 100 +JumpingJack/v_JumpingJack_g03_c02 46 +ApplyEyeMakeup/v_ApplyEyeMakeup_g06_c03 0 +Kayaking/v_Kayaking_g07_c02 48 +Billiards/v_Billiards_g03_c01 11 +BodyWeightSquats/v_BodyWeightSquats_g01_c04 14 +PlayingCello/v_PlayingCello_g01_c07 58 +CuttingInKitchen/v_CuttingInKitchen_g03_c01 24 +VolleyballSpiking/v_VolleyballSpiking_g07_c04 96 +Diving/v_Diving_g05_c06 25 +Drumming/v_Drumming_g07_c01 26 +Nunchucks/v_Nunchucks_g02_c01 55 +Shotput/v_Shotput_g05_c05 78 +CricketShot/v_CricketShot_g02_c04 23 +SkateBoarding/v_SkateBoarding_g04_c01 79 +ParallelBars/v_ParallelBars_g03_c04 56 +CricketBowling/v_CricketBowling_g01_c06 22 +SalsaSpin/v_SalsaSpin_g02_c04 76 +Fencing/v_Fencing_g06_c01 27 +BabyCrawling/v_BabyCrawling_g05_c02 3 +Diving/v_Diving_g04_c01 25 +CricketShot/v_CricketShot_g07_c05 23 +HighJump/v_HighJump_g02_c05 39 +HandstandPushups/v_HandStandPushups_g07_c03 36 +BreastStroke/v_BreastStroke_g05_c02 18 +BenchPress/v_BenchPress_g06_c01 9 +UnevenBars/v_UnevenBars_g07_c04 95 +ThrowDiscus/v_ThrowDiscus_g05_c02 92 +BlowingCandles/v_BlowingCandles_g01_c01 13 +JumpingJack/v_JumpingJack_g01_c02 46 +BrushingTeeth/v_BrushingTeeth_g02_c05 19 +WritingOnBoard/v_WritingOnBoard_g05_c03 99 +TennisSwing/v_TennisSwing_g05_c06 91 +Haircut/v_Haircut_g04_c02 33 +BoxingPunchingBag/v_BoxingPunchingBag_g02_c05 16 +Kayaking/v_Kayaking_g06_c01 48 +ThrowDiscus/v_ThrowDiscus_g07_c05 92 +HandstandWalking/v_HandstandWalking_g03_c04 37 +Surfing/v_Surfing_g06_c01 87 +BenchPress/v_BenchPress_g06_c03 9 +ApplyLipstick/v_ApplyLipstick_g05_c05 1 +WalkingWithDog/v_WalkingWithDog_g05_c02 97 +ApplyLipstick/v_ApplyLipstick_g04_c02 1 +PlayingCello/v_PlayingCello_g07_c05 58 +HandstandWalking/v_HandstandWalking_g02_c04 37 +HighJump/v_HighJump_g03_c03 39 +PommelHorse/v_PommelHorse_g05_c02 68 +Punch/v_Punch_g05_c02 70 +PlayingDhol/v_PlayingDhol_g04_c01 60 +IceDancing/v_IceDancing_g06_c01 43 +SkyDiving/v_SkyDiving_g06_c04 82 +IceDancing/v_IceDancing_g01_c05 43 +TrampolineJumping/v_TrampolineJumping_g02_c05 93 +ParallelBars/v_ParallelBars_g06_c02 56 +CleanAndJerk/v_CleanAndJerk_g02_c03 20 +Fencing/v_Fencing_g03_c04 27 +Swing/v_Swing_g05_c04 88 +LongJump/v_LongJump_g03_c03 50 +Basketball/v_Basketball_g01_c07 7 +BalanceBeam/v_BalanceBeam_g06_c02 4 +IceDancing/v_IceDancing_g05_c05 43 +Swing/v_Swing_g05_c05 88 +SkyDiving/v_SkyDiving_g01_c01 82 +CliffDiving/v_CliffDiving_g02_c02 21 +IceDancing/v_IceDancing_g04_c04 43 +BandMarching/v_BandMarching_g05_c07 5 +Billiards/v_Billiards_g05_c04 11 +CricketBowling/v_CricketBowling_g07_c03 22 +BoxingSpeedBag/v_BoxingSpeedBag_g06_c03 17 +VolleyballSpiking/v_VolleyballSpiking_g01_c01 96 +WallPushups/v_WallPushups_g03_c01 98 +IceDancing/v_IceDancing_g06_c04 43 +CliffDiving/v_CliffDiving_g06_c05 21 +HammerThrow/v_HammerThrow_g01_c02 35 +Punch/v_Punch_g03_c03 70 +WritingOnBoard/v_WritingOnBoard_g04_c03 99 +RockClimbingIndoor/v_RockClimbingIndoor_g07_c02 73 +PommelHorse/v_PommelHorse_g04_c05 68 +Bowling/v_Bowling_g06_c02 15 +Drumming/v_Drumming_g03_c05 26 +HorseRace/v_HorseRace_g01_c04 40 +SumoWrestling/v_SumoWrestling_g01_c01 86 +PlayingDhol/v_PlayingDhol_g03_c06 60 +BaseballPitch/v_BaseballPitch_g01_c06 6 +HeadMassage/v_HeadMassage_g06_c07 38 +TrampolineJumping/v_TrampolineJumping_g07_c04 93 +BlowDryHair/v_BlowDryHair_g02_c02 12 +WalkingWithDog/v_WalkingWithDog_g03_c04 97 +StillRings/v_StillRings_g05_c01 85 +SalsaSpin/v_SalsaSpin_g07_c01 76 +Billiards/v_Billiards_g03_c03 11 +ShavingBeard/v_ShavingBeard_g06_c01 77 +TennisSwing/v_TennisSwing_g04_c07 91 +Rowing/v_Rowing_g05_c03 75 +HammerThrow/v_HammerThrow_g02_c06 35 +WritingOnBoard/v_WritingOnBoard_g07_c06 99 +FrontCrawl/v_FrontCrawl_g01_c02 31 +Billiards/v_Billiards_g04_c07 11 +MoppingFloor/v_MoppingFloor_g01_c03 54 +SkateBoarding/v_SkateBoarding_g07_c04 79 +CuttingInKitchen/v_CuttingInKitchen_g01_c04 24 +StillRings/v_StillRings_g05_c02 85 +SoccerJuggling/v_SoccerJuggling_g03_c04 83 +Haircut/v_Haircut_g05_c03 33 +BreastStroke/v_BreastStroke_g01_c02 18 +BreastStroke/v_BreastStroke_g01_c01 18 +BoxingSpeedBag/v_BoxingSpeedBag_g01_c04 17 +SumoWrestling/v_SumoWrestling_g02_c02 86 +CliffDiving/v_CliffDiving_g07_c04 21 +VolleyballSpiking/v_VolleyballSpiking_g05_c05 96 +Typing/v_Typing_g01_c07 94 +Skiing/v_Skiing_g03_c05 80 +BabyCrawling/v_BabyCrawling_g01_c02 3 +Haircut/v_Haircut_g03_c05 33 +BasketballDunk/v_BasketballDunk_g04_c02 8 +PlayingDhol/v_PlayingDhol_g06_c01 60 +PlayingGuitar/v_PlayingGuitar_g02_c03 62 +TennisSwing/v_TennisSwing_g03_c04 91 +Kayaking/v_Kayaking_g02_c03 48 +JavelinThrow/v_JavelinThrow_g02_c04 44 +HorseRace/v_HorseRace_g01_c02 40 +BoxingPunchingBag/v_BoxingPunchingBag_g03_c04 16 +Basketball/v_Basketball_g06_c01 7 +BrushingTeeth/v_BrushingTeeth_g01_c04 19 +BoxingSpeedBag/v_BoxingSpeedBag_g05_c01 17 +PullUps/v_PullUps_g06_c04 69 +PullUps/v_PullUps_g02_c01 69 +Surfing/v_Surfing_g01_c06 87 +PullUps/v_PullUps_g01_c04 69 +BlowDryHair/v_BlowDryHair_g04_c01 12 +BasketballDunk/v_BasketballDunk_g03_c04 8 +HorseRace/v_HorseRace_g04_c04 40 +RopeClimbing/v_RopeClimbing_g02_c05 74 +Nunchucks/v_Nunchucks_g05_c04 55 +SkyDiving/v_SkyDiving_g07_c03 82 +TennisSwing/v_TennisSwing_g02_c04 91 +LongJump/v_LongJump_g01_c07 50 +VolleyballSpiking/v_VolleyballSpiking_g03_c04 96 +TableTennisShot/v_TableTennisShot_g05_c06 89 +Basketball/v_Basketball_g06_c03 7 +PoleVault/v_PoleVault_g04_c02 67 +YoYo/v_YoYo_g05_c02 100 +Shotput/v_Shotput_g06_c01 78 +Skiing/v_Skiing_g06_c05 80 +Rowing/v_Rowing_g03_c01 75 +HammerThrow/v_HammerThrow_g07_c04 35 +Haircut/v_Haircut_g04_c03 33 +SoccerJuggling/v_SoccerJuggling_g02_c03 83 +BodyWeightSquats/v_BodyWeightSquats_g03_c02 14 +Skiing/v_Skiing_g03_c06 80 +LongJump/v_LongJump_g06_c02 50 +BreastStroke/v_BreastStroke_g06_c03 18 +PlayingGuitar/v_PlayingGuitar_g05_c03 62 +ThrowDiscus/v_ThrowDiscus_g02_c07 92 +Biking/v_Biking_g03_c04 10 +CricketBowling/v_CricketBowling_g04_c05 22 +CleanAndJerk/v_CleanAndJerk_g05_c01 20 +WritingOnBoard/v_WritingOnBoard_g07_c01 99 +VolleyballSpiking/v_VolleyballSpiking_g04_c03 96 +TennisSwing/v_TennisSwing_g01_c06 91 +BlowDryHair/v_BlowDryHair_g05_c02 12 +HandstandWalking/v_HandstandWalking_g04_c05 37 +FloorGymnastics/v_FloorGymnastics_g01_c05 29 +HorseRiding/v_HorseRiding_g05_c01 41 +PlayingPiano/v_PlayingPiano_g02_c01 63 +FrontCrawl/v_FrontCrawl_g05_c02 31 +BoxingPunchingBag/v_BoxingPunchingBag_g01_c01 16 +HeadMassage/v_HeadMassage_g01_c02 38 +WallPushups/v_WallPushups_g02_c02 98 +Typing/v_Typing_g07_c05 94 +FloorGymnastics/v_FloorGymnastics_g03_c03 29 +Kayaking/v_Kayaking_g05_c02 48 +SkateBoarding/v_SkateBoarding_g01_c04 79 +Swing/v_Swing_g05_c07 88 +PlayingSitar/v_PlayingSitar_g04_c06 64 +ParallelBars/v_ParallelBars_g07_c03 56 +HighJump/v_HighJump_g05_c03 39 +FrontCrawl/v_FrontCrawl_g05_c03 31 +TrampolineJumping/v_TrampolineJumping_g07_c02 93 +PushUps/v_PushUps_g06_c04 71 +CuttingInKitchen/v_CuttingInKitchen_g02_c03 24 +WalkingWithDog/v_WalkingWithDog_g05_c03 97 +FrontCrawl/v_FrontCrawl_g04_c07 31 +TennisSwing/v_TennisSwing_g01_c03 91 +BlowDryHair/v_BlowDryHair_g06_c05 12 +PlayingTabla/v_PlayingTabla_g06_c02 65 +TennisSwing/v_TennisSwing_g05_c01 91 +PizzaTossing/v_PizzaTossing_g07_c04 57 +Typing/v_Typing_g02_c01 94 +FieldHockeyPenalty/v_FieldHockeyPenalty_g06_c02 28 +PlayingFlute/v_PlayingFlute_g02_c01 61 +HandstandWalking/v_HandstandWalking_g05_c03 37 +ApplyEyeMakeup/v_ApplyEyeMakeup_g04_c06 0 +CricketBowling/v_CricketBowling_g01_c05 22 +HeadMassage/v_HeadMassage_g03_c05 38 +MilitaryParade/v_MilitaryParade_g07_c05 52 +SalsaSpin/v_SalsaSpin_g05_c03 76 +StillRings/v_StillRings_g05_c03 85 +FrontCrawl/v_FrontCrawl_g02_c01 31 +PommelHorse/v_PommelHorse_g03_c03 68 +BenchPress/v_BenchPress_g02_c07 9 +BabyCrawling/v_BabyCrawling_g03_c03 3 +Typing/v_Typing_g06_c03 94 +Archery/v_Archery_g01_c02 2 +WritingOnBoard/v_WritingOnBoard_g05_c05 99 +BreastStroke/v_BreastStroke_g04_c02 18 +WalkingWithDog/v_WalkingWithDog_g07_c02 97 +JumpingJack/v_JumpingJack_g07_c01 46 +JumpRope/v_JumpRope_g06_c02 47 +WallPushups/v_WallPushups_g01_c01 98 +Billiards/v_Billiards_g02_c02 11 +Mixing/v_Mixing_g02_c04 53 +YoYo/v_YoYo_g04_c02 100 +LongJump/v_LongJump_g06_c04 50 +BabyCrawling/v_BabyCrawling_g04_c04 3 +GolfSwing/v_GolfSwing_g01_c03 32 +HorseRiding/v_HorseRiding_g01_c01 41 +SalsaSpin/v_SalsaSpin_g04_c03 76 +HeadMassage/v_HeadMassage_g04_c01 38 +CleanAndJerk/v_CleanAndJerk_g01_c01 20 +JavelinThrow/v_JavelinThrow_g07_c05 44 +WalkingWithDog/v_WalkingWithDog_g02_c05 97 +YoYo/v_YoYo_g07_c04 100 +ThrowDiscus/v_ThrowDiscus_g03_c04 92 +MilitaryParade/v_MilitaryParade_g06_c03 52 +Basketball/v_Basketball_g06_c04 7 +BlowDryHair/v_BlowDryHair_g06_c04 12 +PushUps/v_PushUps_g04_c01 71 +MilitaryParade/v_MilitaryParade_g05_c04 52 +ParallelBars/v_ParallelBars_g04_c05 56 +BalanceBeam/v_BalanceBeam_g03_c03 4 +Haircut/v_Haircut_g06_c01 33 +PizzaTossing/v_PizzaTossing_g03_c01 57 +Archery/v_Archery_g07_c01 2 +VolleyballSpiking/v_VolleyballSpiking_g06_c03 96 +FrisbeeCatch/v_FrisbeeCatch_g03_c05 30 +PlayingDhol/v_PlayingDhol_g02_c01 60 +Diving/v_Diving_g01_c06 25 +CuttingInKitchen/v_CuttingInKitchen_g01_c03 24 +ShavingBeard/v_ShavingBeard_g02_c02 77 +CuttingInKitchen/v_CuttingInKitchen_g04_c01 24 +YoYo/v_YoYo_g07_c02 100 +Shotput/v_Shotput_g07_c01 78 +SalsaSpin/v_SalsaSpin_g02_c06 76 +HandstandWalking/v_HandstandWalking_g07_c04 37 +RockClimbingIndoor/v_RockClimbingIndoor_g02_c03 73 +PushUps/v_PushUps_g03_c02 71 +HeadMassage/v_HeadMassage_g04_c03 38 +PoleVault/v_PoleVault_g01_c02 67 +Typing/v_Typing_g01_c04 94 +PlayingSitar/v_PlayingSitar_g06_c04 64 +Knitting/v_Knitting_g02_c01 49 +PommelHorse/v_PommelHorse_g02_c02 68 +FrisbeeCatch/v_FrisbeeCatch_g01_c03 30 +IceDancing/v_IceDancing_g03_c02 43 +PushUps/v_PushUps_g07_c02 71 +BasketballDunk/v_BasketballDunk_g05_c04 8 +FloorGymnastics/v_FloorGymnastics_g04_c01 29 +Drumming/v_Drumming_g04_c02 26 +Typing/v_Typing_g06_c06 94 +Lunges/v_Lunges_g07_c01 51 +BreastStroke/v_BreastStroke_g06_c01 18 +LongJump/v_LongJump_g03_c01 50 +SalsaSpin/v_SalsaSpin_g07_c06 76 +Bowling/v_Bowling_g03_c01 15 +HammerThrow/v_HammerThrow_g06_c05 35 +Diving/v_Diving_g02_c03 25 +SumoWrestling/v_SumoWrestling_g07_c06 86 +GolfSwing/v_GolfSwing_g03_c03 32 +JumpRope/v_JumpRope_g06_c05 47 +WritingOnBoard/v_WritingOnBoard_g02_c03 99 +FrontCrawl/v_FrontCrawl_g04_c06 31 +ParallelBars/v_ParallelBars_g04_c01 56 +SoccerPenalty/v_SoccerPenalty_g01_c06 84 +BlowDryHair/v_BlowDryHair_g01_c04 12 +PlayingTabla/v_PlayingTabla_g07_c04 65 +ParallelBars/v_ParallelBars_g03_c01 56 +Skiing/v_Skiing_g03_c01 80 +RopeClimbing/v_RopeClimbing_g05_c03 74 +Haircut/v_Haircut_g02_c01 33 +TableTennisShot/v_TableTennisShot_g06_c03 89 +ThrowDiscus/v_ThrowDiscus_g07_c03 92 +BalanceBeam/v_BalanceBeam_g04_c03 4 +BenchPress/v_BenchPress_g03_c02 9 +ApplyEyeMakeup/v_ApplyEyeMakeup_g06_c01 0 +Biking/v_Biking_g05_c07 10 +BaseballPitch/v_BaseballPitch_g06_c03 6 +HulaHoop/v_HulaHoop_g02_c03 42 +TennisSwing/v_TennisSwing_g02_c01 91 +Rowing/v_Rowing_g07_c02 75 +PushUps/v_PushUps_g04_c02 71 +Knitting/v_Knitting_g03_c03 49 +HighJump/v_HighJump_g03_c04 39 +ParallelBars/v_ParallelBars_g06_c03 56 +BlowDryHair/v_BlowDryHair_g05_c04 12 +BodyWeightSquats/v_BodyWeightSquats_g04_c03 14 +CricketShot/v_CricketShot_g07_c06 23 +PoleVault/v_PoleVault_g02_c01 67 +BalanceBeam/v_BalanceBeam_g02_c04 4 +PoleVault/v_PoleVault_g05_c04 67 +TennisSwing/v_TennisSwing_g05_c04 91 +CricketBowling/v_CricketBowling_g03_c04 22 +BaseballPitch/v_BaseballPitch_g03_c06 6 +StillRings/v_StillRings_g07_c02 85 +PoleVault/v_PoleVault_g02_c06 67 +WallPushups/v_WallPushups_g04_c01 98 +Archery/v_Archery_g03_c01 2 +Mixing/v_Mixing_g02_c05 53 +SkateBoarding/v_SkateBoarding_g06_c02 79 +MoppingFloor/v_MoppingFloor_g02_c01 54 +CleanAndJerk/v_CleanAndJerk_g01_c03 20 +Surfing/v_Surfing_g03_c02 87 +Archery/v_Archery_g07_c05 2 +FrontCrawl/v_FrontCrawl_g07_c06 31 +PlayingPiano/v_PlayingPiano_g02_c02 63 +BoxingPunchingBag/v_BoxingPunchingBag_g02_c04 16 +BasketballDunk/v_BasketballDunk_g07_c01 8 +Archery/v_Archery_g04_c04 2 +Hammering/v_Hammering_g05_c04 34 +HorseRiding/v_HorseRiding_g01_c03 41 +PullUps/v_PullUps_g06_c03 69 +Bowling/v_Bowling_g04_c02 15 +BalanceBeam/v_BalanceBeam_g01_c02 4 +Typing/v_Typing_g04_c01 94 +Knitting/v_Knitting_g05_c04 49 +HeadMassage/v_HeadMassage_g02_c06 38 +Fencing/v_Fencing_g03_c03 27 +SoccerJuggling/v_SoccerJuggling_g04_c02 83 +PommelHorse/v_PommelHorse_g06_c04 68 +CricketShot/v_CricketShot_g06_c06 23 +Billiards/v_Billiards_g07_c04 11 +Drumming/v_Drumming_g05_c02 26 +Lunges/v_Lunges_g01_c06 51 +PommelHorse/v_PommelHorse_g06_c03 68 +Billiards/v_Billiards_g02_c05 11 +Rafting/v_Rafting_g05_c01 72 +SumoWrestling/v_SumoWrestling_g06_c06 86 +SalsaSpin/v_SalsaSpin_g06_c01 76 +Swing/v_Swing_g04_c07 88 +SumoWrestling/v_SumoWrestling_g07_c05 86 +WallPushups/v_WallPushups_g01_c04 98 +TennisSwing/v_TennisSwing_g04_c04 91 +BoxingPunchingBag/v_BoxingPunchingBag_g02_c01 16 +PlayingSitar/v_PlayingSitar_g06_c02 64 +Nunchucks/v_Nunchucks_g02_c03 55 +PlayingTabla/v_PlayingTabla_g03_c01 65 +Lunges/v_Lunges_g04_c03 51 +TrampolineJumping/v_TrampolineJumping_g03_c03 93 +ThrowDiscus/v_ThrowDiscus_g05_c03 92 +FieldHockeyPenalty/v_FieldHockeyPenalty_g04_c06 28 +Typing/v_Typing_g03_c04 94 +SoccerPenalty/v_SoccerPenalty_g01_c05 84 +PlayingViolin/v_PlayingViolin_g01_c04 66 +BabyCrawling/v_BabyCrawling_g05_c05 3 +GolfSwing/v_GolfSwing_g07_c02 32 +Diving/v_Diving_g04_c07 25 +Typing/v_Typing_g03_c07 94 +FrisbeeCatch/v_FrisbeeCatch_g04_c04 30 +CuttingInKitchen/v_CuttingInKitchen_g07_c04 24 +MoppingFloor/v_MoppingFloor_g07_c02 54 +ShavingBeard/v_ShavingBeard_g03_c07 77 +HandstandWalking/v_HandstandWalking_g01_c01 37 +Punch/v_Punch_g01_c05 70 +Skijet/v_Skijet_g03_c03 81 +BrushingTeeth/v_BrushingTeeth_g06_c03 19 +MilitaryParade/v_MilitaryParade_g04_c01 52 +Rafting/v_Rafting_g02_c01 72 +Basketball/v_Basketball_g07_c01 7 +FloorGymnastics/v_FloorGymnastics_g04_c03 29 +Drumming/v_Drumming_g01_c06 26 +SoccerPenalty/v_SoccerPenalty_g05_c02 84 +Mixing/v_Mixing_g05_c07 53 +HeadMassage/v_HeadMassage_g07_c03 38 +PlayingDaf/v_PlayingDaf_g06_c07 59 +BandMarching/v_BandMarching_g05_c04 5 +RockClimbingIndoor/v_RockClimbingIndoor_g01_c02 73 +ApplyEyeMakeup/v_ApplyEyeMakeup_g04_c04 0 +Archery/v_Archery_g02_c05 2 +SumoWrestling/v_SumoWrestling_g05_c03 86 +WalkingWithDog/v_WalkingWithDog_g04_c02 97 +PlayingDaf/v_PlayingDaf_g04_c01 59 +CricketShot/v_CricketShot_g07_c03 23 +JavelinThrow/v_JavelinThrow_g05_c01 44 +RockClimbingIndoor/v_RockClimbingIndoor_g07_c05 73 +Swing/v_Swing_g01_c03 88 +Bowling/v_Bowling_g02_c03 15 +BandMarching/v_BandMarching_g06_c01 5 +Billiards/v_Billiards_g02_c06 11 +PlayingFlute/v_PlayingFlute_g07_c02 61 +Lunges/v_Lunges_g04_c04 51 +PlayingCello/v_PlayingCello_g02_c05 58 +PlayingGuitar/v_PlayingGuitar_g07_c04 62 +Diving/v_Diving_g01_c05 25 +CricketShot/v_CricketShot_g04_c03 23 +WalkingWithDog/v_WalkingWithDog_g03_c02 97 +BandMarching/v_BandMarching_g06_c04 5 +PlayingDhol/v_PlayingDhol_g05_c04 60 +IceDancing/v_IceDancing_g02_c03 43 +ApplyLipstick/v_ApplyLipstick_g07_c03 1 +WritingOnBoard/v_WritingOnBoard_g07_c04 99 +ShavingBeard/v_ShavingBeard_g05_c04 77 +TableTennisShot/v_TableTennisShot_g03_c01 89 +ApplyLipstick/v_ApplyLipstick_g04_c01 1 +Surfing/v_Surfing_g02_c05 87 +FrontCrawl/v_FrontCrawl_g01_c01 31 +SumoWrestling/v_SumoWrestling_g02_c03 86 +FieldHockeyPenalty/v_FieldHockeyPenalty_g06_c04 28 +Punch/v_Punch_g06_c05 70 +FieldHockeyPenalty/v_FieldHockeyPenalty_g01_c03 28 +Mixing/v_Mixing_g03_c01 53 +WallPushups/v_WallPushups_g01_c03 98 +PlayingCello/v_PlayingCello_g05_c02 58 +PlayingSitar/v_PlayingSitar_g04_c03 64 +Surfing/v_Surfing_g01_c03 87 +ThrowDiscus/v_ThrowDiscus_g01_c03 92 +Knitting/v_Knitting_g06_c04 49 +Swing/v_Swing_g06_c07 88 +Archery/v_Archery_g06_c05 2 +Biking/v_Biking_g02_c06 10 +FieldHockeyPenalty/v_FieldHockeyPenalty_g02_c03 28 +FrisbeeCatch/v_FrisbeeCatch_g01_c04 30 +WritingOnBoard/v_WritingOnBoard_g06_c04 99 +LongJump/v_LongJump_g03_c06 50 +Hammering/v_Hammering_g04_c04 34 +BandMarching/v_BandMarching_g02_c04 5 +WalkingWithDog/v_WalkingWithDog_g06_c03 97 +BreastStroke/v_BreastStroke_g02_c03 18 +ApplyLipstick/v_ApplyLipstick_g01_c01 1 +MoppingFloor/v_MoppingFloor_g02_c03 54 +TennisSwing/v_TennisSwing_g04_c06 91 +GolfSwing/v_GolfSwing_g04_c05 32 +PlayingTabla/v_PlayingTabla_g04_c05 65 +Surfing/v_Surfing_g04_c03 87 +IceDancing/v_IceDancing_g01_c06 43 +Diving/v_Diving_g01_c01 25 +WalkingWithDog/v_WalkingWithDog_g02_c03 97 +HeadMassage/v_HeadMassage_g05_c05 38 +ParallelBars/v_ParallelBars_g03_c03 56 +ParallelBars/v_ParallelBars_g07_c05 56 +JugglingBalls/v_JugglingBalls_g02_c05 45 +IceDancing/v_IceDancing_g02_c01 43 +Skiing/v_Skiing_g03_c03 80 +Fencing/v_Fencing_g03_c05 27 +HighJump/v_HighJump_g02_c04 39 +Haircut/v_Haircut_g06_c04 33 +PlayingPiano/v_PlayingPiano_g04_c04 63 +HorseRiding/v_HorseRiding_g04_c03 41 +ShavingBeard/v_ShavingBeard_g06_c05 77 +BoxingSpeedBag/v_BoxingSpeedBag_g05_c04 17 +PlayingSitar/v_PlayingSitar_g06_c06 64 +UnevenBars/v_UnevenBars_g05_c01 95 +Rowing/v_Rowing_g03_c02 75 +BenchPress/v_BenchPress_g03_c04 9 +LongJump/v_LongJump_g04_c04 50 +RopeClimbing/v_RopeClimbing_g05_c04 74 +Kayaking/v_Kayaking_g05_c03 48 +GolfSwing/v_GolfSwing_g01_c05 32 +BoxingSpeedBag/v_BoxingSpeedBag_g06_c02 17 +Diving/v_Diving_g01_c04 25 +PlayingDhol/v_PlayingDhol_g06_c05 60 +BenchPress/v_BenchPress_g02_c04 9 +Surfing/v_Surfing_g03_c03 87 +PlayingGuitar/v_PlayingGuitar_g01_c04 62 +Nunchucks/v_Nunchucks_g01_c02 55 +PlayingFlute/v_PlayingFlute_g03_c02 61 +BandMarching/v_BandMarching_g07_c02 5 +Haircut/v_Haircut_g01_c01 33 +TaiChi/v_TaiChi_g07_c02 90 +Typing/v_Typing_g07_c02 94 +TaiChi/v_TaiChi_g01_c04 90 +Diving/v_Diving_g03_c06 25 +Biking/v_Biking_g07_c04 10 +BaseballPitch/v_BaseballPitch_g01_c03 6 +ParallelBars/v_ParallelBars_g06_c04 56 +VolleyballSpiking/v_VolleyballSpiking_g03_c02 96 +Archery/v_Archery_g01_c06 2 +PlayingCello/v_PlayingCello_g04_c04 58 +Punch/v_Punch_g05_c07 70 +YoYo/v_YoYo_g04_c03 100 +BoxingSpeedBag/v_BoxingSpeedBag_g04_c03 17 +CliffDiving/v_CliffDiving_g07_c02 21 +BodyWeightSquats/v_BodyWeightSquats_g03_c05 14 +Haircut/v_Haircut_g04_c04 33 +MoppingFloor/v_MoppingFloor_g01_c01 54 +UnevenBars/v_UnevenBars_g06_c01 95 +TableTennisShot/v_TableTennisShot_g06_c05 89 +Diving/v_Diving_g06_c04 25 +BoxingPunchingBag/v_BoxingPunchingBag_g03_c07 16 +BenchPress/v_BenchPress_g04_c03 9 +TaiChi/v_TaiChi_g03_c01 90 +HeadMassage/v_HeadMassage_g06_c01 38 +JumpRope/v_JumpRope_g02_c01 47 +PushUps/v_PushUps_g05_c02 71 +Archery/v_Archery_g01_c03 2 +CricketBowling/v_CricketBowling_g04_c01 22 +UnevenBars/v_UnevenBars_g04_c04 95 +JavelinThrow/v_JavelinThrow_g07_c01 44 +TableTennisShot/v_TableTennisShot_g04_c04 89 +PlayingPiano/v_PlayingPiano_g06_c01 63 +SoccerPenalty/v_SoccerPenalty_g03_c04 84 +Shotput/v_Shotput_g05_c04 78 +FloorGymnastics/v_FloorGymnastics_g07_c06 29 +BandMarching/v_BandMarching_g03_c01 5 +BreastStroke/v_BreastStroke_g05_c03 18 +BoxingSpeedBag/v_BoxingSpeedBag_g01_c03 17 +HandstandPushups/v_HandStandPushups_g05_c01 36 +Diving/v_Diving_g03_c01 25 +Surfing/v_Surfing_g06_c04 87 +CuttingInKitchen/v_CuttingInKitchen_g03_c02 24 +TableTennisShot/v_TableTennisShot_g05_c01 89 +RopeClimbing/v_RopeClimbing_g07_c01 74 +YoYo/v_YoYo_g02_c04 100 +UnevenBars/v_UnevenBars_g07_c02 95 +Skiing/v_Skiing_g06_c01 80 +PoleVault/v_PoleVault_g04_c05 67 +WalkingWithDog/v_WalkingWithDog_g01_c02 97 +Biking/v_Biking_g01_c02 10 +HorseRace/v_HorseRace_g04_c05 40 +HandstandWalking/v_HandstandWalking_g04_c01 37 +PlayingPiano/v_PlayingPiano_g05_c04 63 +JugglingBalls/v_JugglingBalls_g04_c02 45 +ShavingBeard/v_ShavingBeard_g05_c06 77 +YoYo/v_YoYo_g01_c01 100 +BlowingCandles/v_BlowingCandles_g05_c05 13 +Archery/v_Archery_g05_c05 2 +Rowing/v_Rowing_g04_c05 75 +PlayingGuitar/v_PlayingGuitar_g05_c05 62 +SoccerJuggling/v_SoccerJuggling_g06_c04 83 +HeadMassage/v_HeadMassage_g05_c02 38 +Rafting/v_Rafting_g05_c02 72 +BoxingPunchingBag/v_BoxingPunchingBag_g02_c06 16 +MoppingFloor/v_MoppingFloor_g07_c05 54 +HulaHoop/v_HulaHoop_g03_c05 42 +RopeClimbing/v_RopeClimbing_g05_c05 74 +JugglingBalls/v_JugglingBalls_g07_c03 45 +PlayingGuitar/v_PlayingGuitar_g06_c03 62 +BenchPress/v_BenchPress_g01_c02 9 +ApplyEyeMakeup/v_ApplyEyeMakeup_g06_c02 0 +SoccerPenalty/v_SoccerPenalty_g02_c01 84 +TennisSwing/v_TennisSwing_g06_c06 91 +ThrowDiscus/v_ThrowDiscus_g03_c01 92 +WritingOnBoard/v_WritingOnBoard_g02_c04 99 +FrisbeeCatch/v_FrisbeeCatch_g02_c05 30 +Billiards/v_Billiards_g02_c01 11 +TennisSwing/v_TennisSwing_g01_c02 91 +Fencing/v_Fencing_g07_c04 27 +HandstandWalking/v_HandstandWalking_g05_c04 37 +ParallelBars/v_ParallelBars_g04_c06 56 +TennisSwing/v_TennisSwing_g04_c01 91 +JumpRope/v_JumpRope_g03_c04 47 +PlayingGuitar/v_PlayingGuitar_g06_c04 62 +BenchPress/v_BenchPress_g04_c05 9 +Mixing/v_Mixing_g03_c05 53 +FrontCrawl/v_FrontCrawl_g02_c02 31 +HorseRiding/v_HorseRiding_g02_c04 41 +ApplyEyeMakeup/v_ApplyEyeMakeup_g04_c02 0 +JugglingBalls/v_JugglingBalls_g02_c03 45 +BoxingSpeedBag/v_BoxingSpeedBag_g07_c07 17 +MoppingFloor/v_MoppingFloor_g04_c02 54 +LongJump/v_LongJump_g07_c04 50 +BaseballPitch/v_BaseballPitch_g04_c02 6 +CuttingInKitchen/v_CuttingInKitchen_g02_c01 24 +FrisbeeCatch/v_FrisbeeCatch_g05_c01 30 +BrushingTeeth/v_BrushingTeeth_g01_c01 19 +JavelinThrow/v_JavelinThrow_g01_c03 44 +Archery/v_Archery_g05_c04 2 +BlowDryHair/v_BlowDryHair_g06_c07 12 +JavelinThrow/v_JavelinThrow_g03_c02 44 +LongJump/v_LongJump_g03_c02 50 +SoccerJuggling/v_SoccerJuggling_g04_c05 83 +CuttingInKitchen/v_CuttingInKitchen_g01_c01 24 +SoccerPenalty/v_SoccerPenalty_g05_c05 84 +JumpingJack/v_JumpingJack_g06_c05 46 +BlowingCandles/v_BlowingCandles_g03_c01 13 +PlayingDaf/v_PlayingDaf_g02_c07 59 +TableTennisShot/v_TableTennisShot_g07_c03 89 +CricketShot/v_CricketShot_g05_c06 23 +PizzaTossing/v_PizzaTossing_g06_c05 57 +Kayaking/v_Kayaking_g06_c04 48 +TennisSwing/v_TennisSwing_g06_c01 91 +PullUps/v_PullUps_g05_c04 69 +PlayingFlute/v_PlayingFlute_g04_c03 61 +Haircut/v_Haircut_g01_c02 33 +PlayingFlute/v_PlayingFlute_g04_c05 61 +JugglingBalls/v_JugglingBalls_g05_c02 45 +ShavingBeard/v_ShavingBeard_g06_c07 77 +Billiards/v_Billiards_g06_c04 11 +ThrowDiscus/v_ThrowDiscus_g07_c06 92 +PlayingDhol/v_PlayingDhol_g02_c06 60 +Swing/v_Swing_g06_c02 88 +ShavingBeard/v_ShavingBeard_g02_c05 77 +Lunges/v_Lunges_g07_c07 51 +SumoWrestling/v_SumoWrestling_g06_c01 86 +CliffDiving/v_CliffDiving_g01_c05 21 +CleanAndJerk/v_CleanAndJerk_g03_c06 20 +HorseRace/v_HorseRace_g06_c04 40 +HammerThrow/v_HammerThrow_g05_c04 35 +HulaHoop/v_HulaHoop_g05_c01 42 +CricketBowling/v_CricketBowling_g02_c07 22 +FrisbeeCatch/v_FrisbeeCatch_g07_c06 30 +FloorGymnastics/v_FloorGymnastics_g06_c04 29 +PlayingFlute/v_PlayingFlute_g03_c01 61 +HorseRiding/v_HorseRiding_g07_c04 41 +HandstandWalking/v_HandstandWalking_g03_c02 37 +CleanAndJerk/v_CleanAndJerk_g04_c05 20 +PlayingCello/v_PlayingCello_g06_c02 58 +PommelHorse/v_PommelHorse_g03_c02 68 +ApplyLipstick/v_ApplyLipstick_g05_c03 1 +ApplyEyeMakeup/v_ApplyEyeMakeup_g03_c03 0 +BabyCrawling/v_BabyCrawling_g02_c01 3 +Surfing/v_Surfing_g07_c02 87 +MilitaryParade/v_MilitaryParade_g01_c05 52 +Punch/v_Punch_g07_c05 70 +Billiards/v_Billiards_g03_c05 11 +Skiing/v_Skiing_g04_c01 80 +PlayingSitar/v_PlayingSitar_g05_c01 64 +Punch/v_Punch_g01_c02 70 +JumpingJack/v_JumpingJack_g01_c07 46 +FloorGymnastics/v_FloorGymnastics_g05_c04 29 +BandMarching/v_BandMarching_g04_c03 5 +Billiards/v_Billiards_g07_c02 11 +Drumming/v_Drumming_g03_c04 26 +Punch/v_Punch_g06_c01 70 +CliffDiving/v_CliffDiving_g07_c05 21 +BaseballPitch/v_BaseballPitch_g01_c05 6 +BenchPress/v_BenchPress_g07_c05 9 +PlayingFlute/v_PlayingFlute_g06_c01 61 +ParallelBars/v_ParallelBars_g07_c02 56 +IceDancing/v_IceDancing_g03_c06 43 +PlayingDaf/v_PlayingDaf_g04_c03 59 +FrontCrawl/v_FrontCrawl_g02_c03 31 +BaseballPitch/v_BaseballPitch_g06_c06 6 +Typing/v_Typing_g03_c02 94 +PizzaTossing/v_PizzaTossing_g01_c02 57 +PlayingPiano/v_PlayingPiano_g05_c02 63 +BlowDryHair/v_BlowDryHair_g07_c03 12 +Kayaking/v_Kayaking_g02_c02 48 +HammerThrow/v_HammerThrow_g03_c02 35 +BaseballPitch/v_BaseballPitch_g07_c05 6 +HandstandPushups/v_HandStandPushups_g07_c01 36 +Knitting/v_Knitting_g06_c03 49 +Hammering/v_Hammering_g03_c05 34 +FieldHockeyPenalty/v_FieldHockeyPenalty_g04_c04 28 +MilitaryParade/v_MilitaryParade_g03_c01 52 +SoccerJuggling/v_SoccerJuggling_g04_c06 83 +ParallelBars/v_ParallelBars_g04_c02 56 +RockClimbingIndoor/v_RockClimbingIndoor_g01_c05 73 +BabyCrawling/v_BabyCrawling_g07_c05 3 +PommelHorse/v_PommelHorse_g07_c06 68 +ApplyEyeMakeup/v_ApplyEyeMakeup_g05_c02 0 +Fencing/v_Fencing_g01_c05 27 +ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c07 0 +Basketball/v_Basketball_g04_c03 7 +FloorGymnastics/v_FloorGymnastics_g02_c01 29 +TrampolineJumping/v_TrampolineJumping_g07_c05 93 +Billiards/v_Billiards_g05_c03 11 +PizzaTossing/v_PizzaTossing_g03_c03 57 +Rafting/v_Rafting_g06_c04 72 +TableTennisShot/v_TableTennisShot_g02_c03 89 +PlayingViolin/v_PlayingViolin_g07_c04 66 +SoccerPenalty/v_SoccerPenalty_g01_c04 84 +Rafting/v_Rafting_g05_c04 72 +BlowingCandles/v_BlowingCandles_g04_c03 13 +BoxingPunchingBag/v_BoxingPunchingBag_g05_c03 16 +Basketball/v_Basketball_g02_c01 7 +IceDancing/v_IceDancing_g06_c05 43 +CricketBowling/v_CricketBowling_g07_c01 22 +PlayingDhol/v_PlayingDhol_g05_c06 60 +PlayingSitar/v_PlayingSitar_g01_c03 64 +SkateBoarding/v_SkateBoarding_g01_c02 79 +SoccerPenalty/v_SoccerPenalty_g07_c04 84 +PullUps/v_PullUps_g03_c02 69 +SoccerPenalty/v_SoccerPenalty_g07_c06 84 +WallPushups/v_WallPushups_g07_c03 98 +Hammering/v_Hammering_g03_c03 34 +BodyWeightSquats/v_BodyWeightSquats_g06_c04 14 +Billiards/v_Billiards_g07_c03 11 +BreastStroke/v_BreastStroke_g01_c03 18 +TableTennisShot/v_TableTennisShot_g06_c02 89 +HulaHoop/v_HulaHoop_g03_c04 42 +BoxingPunchingBag/v_BoxingPunchingBag_g03_c06 16 +WritingOnBoard/v_WritingOnBoard_g05_c06 99 +Nunchucks/v_Nunchucks_g03_c02 55 +Skiing/v_Skiing_g07_c01 80 +VolleyballSpiking/v_VolleyballSpiking_g04_c04 96 +ParallelBars/v_ParallelBars_g05_c04 56 +Archery/v_Archery_g03_c02 2 +PlayingFlute/v_PlayingFlute_g05_c03 61 +BabyCrawling/v_BabyCrawling_g05_c01 3 +PullUps/v_PullUps_g03_c03 69 +BandMarching/v_BandMarching_g01_c07 5 +ApplyLipstick/v_ApplyLipstick_g01_c04 1 +TableTennisShot/v_TableTennisShot_g07_c01 89 +BrushingTeeth/v_BrushingTeeth_g02_c02 19 +ApplyLipstick/v_ApplyLipstick_g01_c02 1 +FloorGymnastics/v_FloorGymnastics_g03_c02 29 +TrampolineJumping/v_TrampolineJumping_g04_c04 93 +ThrowDiscus/v_ThrowDiscus_g06_c07 92 +BalanceBeam/v_BalanceBeam_g07_c02 4 +TrampolineJumping/v_TrampolineJumping_g02_c01 93 +HulaHoop/v_HulaHoop_g05_c03 42 +ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c06 0 +WallPushups/v_WallPushups_g02_c04 98 +FloorGymnastics/v_FloorGymnastics_g01_c03 29 +Skiing/v_Skiing_g07_c03 80 +Punch/v_Punch_g05_c06 70 +PlayingDaf/v_PlayingDaf_g03_c02 59 +HorseRiding/v_HorseRiding_g01_c04 41 +Fencing/v_Fencing_g05_c03 27 +StillRings/v_StillRings_g05_c04 85 +BabyCrawling/v_BabyCrawling_g07_c02 3 +JugglingBalls/v_JugglingBalls_g06_c01 45 +HorseRiding/v_HorseRiding_g03_c04 41 +ShavingBeard/v_ShavingBeard_g01_c03 77 +ParallelBars/v_ParallelBars_g01_c03 56 +SalsaSpin/v_SalsaSpin_g02_c05 76 +PlayingTabla/v_PlayingTabla_g03_c04 65 +RopeClimbing/v_RopeClimbing_g06_c03 74 +GolfSwing/v_GolfSwing_g07_c01 32 +WritingOnBoard/v_WritingOnBoard_g04_c01 99 +FrontCrawl/v_FrontCrawl_g06_c04 31 +TableTennisShot/v_TableTennisShot_g06_c04 89 +Nunchucks/v_Nunchucks_g02_c04 55 +Surfing/v_Surfing_g01_c05 87 +Haircut/v_Haircut_g02_c03 33 +Typing/v_Typing_g05_c03 94 +FrontCrawl/v_FrontCrawl_g06_c01 31 +BenchPress/v_BenchPress_g04_c06 9 +RockClimbingIndoor/v_RockClimbingIndoor_g05_c05 73 +BaseballPitch/v_BaseballPitch_g05_c05 6 +BaseballPitch/v_BaseballPitch_g05_c07 6 +Hammering/v_Hammering_g04_c05 34 +HulaHoop/v_HulaHoop_g02_c04 42 +SkyDiving/v_SkyDiving_g07_c01 82 +Nunchucks/v_Nunchucks_g02_c02 55 +BlowDryHair/v_BlowDryHair_g03_c03 12 +PlayingDaf/v_PlayingDaf_g05_c04 59 +PlayingDhol/v_PlayingDhol_g07_c07 60 +PoleVault/v_PoleVault_g06_c02 67 +HorseRace/v_HorseRace_g07_c01 40 +HorseRace/v_HorseRace_g01_c01 40 +PlayingViolin/v_PlayingViolin_g03_c02 66 +PlayingFlute/v_PlayingFlute_g05_c06 61 +Fencing/v_Fencing_g01_c02 27 +MilitaryParade/v_MilitaryParade_g04_c03 52 +Lunges/v_Lunges_g07_c04 51 +WallPushups/v_WallPushups_g06_c06 98 +CleanAndJerk/v_CleanAndJerk_g01_c04 20 +GolfSwing/v_GolfSwing_g04_c03 32 +ApplyLipstick/v_ApplyLipstick_g05_c04 1 +MoppingFloor/v_MoppingFloor_g06_c03 54 +Knitting/v_Knitting_g04_c05 49 +Skiing/v_Skiing_g02_c01 80 +TennisSwing/v_TennisSwing_g02_c07 91 +Drumming/v_Drumming_g04_c05 26 +JumpRope/v_JumpRope_g02_c02 47 +LongJump/v_LongJump_g05_c02 50 +PommelHorse/v_PommelHorse_g06_c01 68 +FieldHockeyPenalty/v_FieldHockeyPenalty_g04_c03 28 +BandMarching/v_BandMarching_g05_c05 5 +Bowling/v_Bowling_g01_c06 15 +PlayingDhol/v_PlayingDhol_g06_c06 60 +Shotput/v_Shotput_g05_c03 78 +MilitaryParade/v_MilitaryParade_g02_c04 52 +Shotput/v_Shotput_g03_c06 78 +ParallelBars/v_ParallelBars_g01_c01 56 +BodyWeightSquats/v_BodyWeightSquats_g05_c02 14 +Surfing/v_Surfing_g02_c03 87 +ShavingBeard/v_ShavingBeard_g07_c05 77 +Hammering/v_Hammering_g04_c03 34 +Biking/v_Biking_g06_c02 10 +Skiing/v_Skiing_g02_c04 80 +HandstandPushups/v_HandStandPushups_g01_c04 36 +BenchPress/v_BenchPress_g06_c04 9 +BlowDryHair/v_BlowDryHair_g06_c06 12 +PullUps/v_PullUps_g02_c02 69 +TableTennisShot/v_TableTennisShot_g05_c02 89 +RockClimbingIndoor/v_RockClimbingIndoor_g06_c04 73 +WallPushups/v_WallPushups_g03_c03 98 +SalsaSpin/v_SalsaSpin_g07_c04 76 +BreastStroke/v_BreastStroke_g06_c04 18 +BlowingCandles/v_BlowingCandles_g06_c01 13 +CliffDiving/v_CliffDiving_g06_c02 21 +Swing/v_Swing_g06_c06 88 +BasketballDunk/v_BasketballDunk_g04_c04 8 +Kayaking/v_Kayaking_g06_c02 48 +IceDancing/v_IceDancing_g04_c03 43 +Biking/v_Biking_g02_c04 10 +BaseballPitch/v_BaseballPitch_g03_c07 6 +BaseballPitch/v_BaseballPitch_g03_c03 6 +CleanAndJerk/v_CleanAndJerk_g07_c01 20 +BoxingPunchingBag/v_BoxingPunchingBag_g01_c03 16 +Rowing/v_Rowing_g03_c07 75 +ApplyLipstick/v_ApplyLipstick_g07_c02 1 +Hammering/v_Hammering_g02_c01 34 +BoxingSpeedBag/v_BoxingSpeedBag_g03_c03 17 +SoccerPenalty/v_SoccerPenalty_g01_c03 84 +BoxingSpeedBag/v_BoxingSpeedBag_g03_c02 17 +HandstandWalking/v_HandstandWalking_g04_c04 37 +Drumming/v_Drumming_g01_c04 26 +SumoWrestling/v_SumoWrestling_g06_c03 86 +CricketShot/v_CricketShot_g06_c02 23 +SumoWrestling/v_SumoWrestling_g07_c02 86 +Drumming/v_Drumming_g02_c02 26 +VolleyballSpiking/v_VolleyballSpiking_g07_c07 96 +PoleVault/v_PoleVault_g03_c05 67 +BoxingSpeedBag/v_BoxingSpeedBag_g03_c04 17 +PlayingGuitar/v_PlayingGuitar_g03_c05 62 +PlayingDhol/v_PlayingDhol_g07_c03 60 +BoxingPunchingBag/v_BoxingPunchingBag_g04_c05 16 +MoppingFloor/v_MoppingFloor_g03_c02 54 +PlayingDaf/v_PlayingDaf_g02_c05 59 +UnevenBars/v_UnevenBars_g03_c03 95 +Rafting/v_Rafting_g03_c02 72 +WallPushups/v_WallPushups_g04_c02 98 +MoppingFloor/v_MoppingFloor_g07_c04 54 +CricketBowling/v_CricketBowling_g02_c05 22 +BandMarching/v_BandMarching_g02_c01 5 +PlayingSitar/v_PlayingSitar_g02_c04 64 +WritingOnBoard/v_WritingOnBoard_g01_c06 99 +CleanAndJerk/v_CleanAndJerk_g01_c02 20 +PlayingFlute/v_PlayingFlute_g03_c03 61 +Kayaking/v_Kayaking_g06_c05 48 +CricketShot/v_CricketShot_g02_c03 23 +PushUps/v_PushUps_g01_c02 71 +TableTennisShot/v_TableTennisShot_g05_c07 89 +PlayingSitar/v_PlayingSitar_g06_c01 64 +Billiards/v_Billiards_g07_c01 11 +TableTennisShot/v_TableTennisShot_g04_c01 89 +VolleyballSpiking/v_VolleyballSpiking_g05_c02 96 +GolfSwing/v_GolfSwing_g07_c04 32 +PullUps/v_PullUps_g04_c02 69 +HammerThrow/v_HammerThrow_g03_c03 35 +StillRings/v_StillRings_g04_c02 85 +RopeClimbing/v_RopeClimbing_g05_c01 74 +TennisSwing/v_TennisSwing_g03_c02 91 +BandMarching/v_BandMarching_g05_c01 5 +SalsaSpin/v_SalsaSpin_g02_c02 76 +Bowling/v_Bowling_g04_c03 15 +BlowingCandles/v_BlowingCandles_g05_c01 13 +PlayingSitar/v_PlayingSitar_g02_c06 64 +BasketballDunk/v_BasketballDunk_g07_c05 8 +PlayingSitar/v_PlayingSitar_g05_c05 64 +BoxingSpeedBag/v_BoxingSpeedBag_g01_c01 17 +HorseRace/v_HorseRace_g06_c06 40 +PullUps/v_PullUps_g07_c02 69 +SoccerJuggling/v_SoccerJuggling_g07_c05 83 +BasketballDunk/v_BasketballDunk_g06_c03 8 +GolfSwing/v_GolfSwing_g05_c01 32 +PlayingSitar/v_PlayingSitar_g03_c06 64 +BaseballPitch/v_BaseballPitch_g05_c06 6 +Bowling/v_Bowling_g07_c05 15 +ParallelBars/v_ParallelBars_g07_c04 56 +Haircut/v_Haircut_g02_c02 33 +BlowingCandles/v_BlowingCandles_g06_c04 13 +BandMarching/v_BandMarching_g07_c07 5 +JumpRope/v_JumpRope_g04_c07 47 +HorseRace/v_HorseRace_g05_c02 40 +BoxingSpeedBag/v_BoxingSpeedBag_g07_c04 17 +Mixing/v_Mixing_g07_c05 53 +WallPushups/v_WallPushups_g03_c05 98 +Mixing/v_Mixing_g04_c04 53 +ParallelBars/v_ParallelBars_g02_c02 56 +Punch/v_Punch_g05_c03 70 +JugglingBalls/v_JugglingBalls_g02_c01 45 +CuttingInKitchen/v_CuttingInKitchen_g04_c02 24 +Skijet/v_Skijet_g04_c02 81 +BreastStroke/v_BreastStroke_g06_c02 18 +HeadMassage/v_HeadMassage_g04_c02 38 +SalsaSpin/v_SalsaSpin_g06_c04 76 +Archery/v_Archery_g06_c01 2 +SkyDiving/v_SkyDiving_g03_c01 82 +FloorGymnastics/v_FloorGymnastics_g04_c04 29 +BandMarching/v_BandMarching_g07_c03 5 +IceDancing/v_IceDancing_g05_c06 43 +Punch/v_Punch_g05_c01 70 +Kayaking/v_Kayaking_g04_c03 48 +ApplyLipstick/v_ApplyLipstick_g02_c03 1 +Lunges/v_Lunges_g02_c03 51 +BaseballPitch/v_BaseballPitch_g07_c01 6 +SkateBoarding/v_SkateBoarding_g04_c04 79 +HandstandWalking/v_HandstandWalking_g07_c01 37 +RockClimbingIndoor/v_RockClimbingIndoor_g06_c07 73 +JumpRope/v_JumpRope_g05_c05 47 +IceDancing/v_IceDancing_g01_c01 43 +HulaHoop/v_HulaHoop_g04_c02 42 +TaiChi/v_TaiChi_g03_c04 90 +PommelHorse/v_PommelHorse_g07_c03 68 +PlayingGuitar/v_PlayingGuitar_g06_c07 62 +Skijet/v_Skijet_g02_c01 81 +HorseRace/v_HorseRace_g03_c01 40 +Biking/v_Biking_g02_c05 10 +MilitaryParade/v_MilitaryParade_g02_c01 52 +Basketball/v_Basketball_g05_c04 7 +Kayaking/v_Kayaking_g03_c02 48 +FrisbeeCatch/v_FrisbeeCatch_g06_c05 30 +PlayingDhol/v_PlayingDhol_g01_c07 60 +ShavingBeard/v_ShavingBeard_g04_c01 77 +HighJump/v_HighJump_g02_c07 39 +HammerThrow/v_HammerThrow_g07_c03 35 +HorseRace/v_HorseRace_g06_c03 40 +PlayingFlute/v_PlayingFlute_g06_c05 61 +RopeClimbing/v_RopeClimbing_g02_c04 74 +HeadMassage/v_HeadMassage_g05_c04 38 +Diving/v_Diving_g05_c01 25 +HammerThrow/v_HammerThrow_g03_c05 35 +VolleyballSpiking/v_VolleyballSpiking_g07_c05 96 +PlayingDhol/v_PlayingDhol_g03_c07 60 +PoleVault/v_PoleVault_g05_c01 67 +BlowDryHair/v_BlowDryHair_g05_c01 12 +BoxingPunchingBag/v_BoxingPunchingBag_g04_c06 16 +BodyWeightSquats/v_BodyWeightSquats_g05_c04 14 +Mixing/v_Mixing_g03_c03 53 +LongJump/v_LongJump_g01_c03 50 +HammerThrow/v_HammerThrow_g03_c04 35 +Billiards/v_Billiards_g01_c05 11 +Mixing/v_Mixing_g06_c06 53 +SoccerPenalty/v_SoccerPenalty_g07_c01 84 +Rowing/v_Rowing_g02_c01 75 +BrushingTeeth/v_BrushingTeeth_g05_c04 19 +FrisbeeCatch/v_FrisbeeCatch_g06_c03 30 +HandstandWalking/v_HandstandWalking_g04_c03 37 +BenchPress/v_BenchPress_g02_c02 9 +StillRings/v_StillRings_g03_c02 85 +StillRings/v_StillRings_g03_c01 85 +CliffDiving/v_CliffDiving_g04_c01 21 +YoYo/v_YoYo_g06_c03 100 +HandstandPushups/v_HandStandPushups_g06_c01 36 +Surfing/v_Surfing_g05_c02 87 +PlayingDhol/v_PlayingDhol_g05_c05 60 +WalkingWithDog/v_WalkingWithDog_g06_c01 97 +JavelinThrow/v_JavelinThrow_g07_c04 44 +HulaHoop/v_HulaHoop_g07_c03 42 +JavelinThrow/v_JavelinThrow_g03_c04 44 +PullUps/v_PullUps_g07_c03 69 +BenchPress/v_BenchPress_g04_c01 9 +HorseRiding/v_HorseRiding_g05_c04 41 +Rowing/v_Rowing_g02_c04 75 +ApplyEyeMakeup/v_ApplyEyeMakeup_g05_c03 0 +BoxingSpeedBag/v_BoxingSpeedBag_g04_c06 17 +HulaHoop/v_HulaHoop_g04_c04 42 +PlayingPiano/v_PlayingPiano_g04_c02 63 +PlayingCello/v_PlayingCello_g06_c07 58 +BreastStroke/v_BreastStroke_g04_c01 18 +StillRings/v_StillRings_g03_c03 85 +BaseballPitch/v_BaseballPitch_g01_c01 6 +HammerThrow/v_HammerThrow_g06_c06 35 +CricketShot/v_CricketShot_g02_c02 23 +Drumming/v_Drumming_g02_c01 26 +BaseballPitch/v_BaseballPitch_g05_c02 6 +Rafting/v_Rafting_g07_c03 72 +SoccerJuggling/v_SoccerJuggling_g02_c04 83 +BalanceBeam/v_BalanceBeam_g04_c02 4 +MilitaryParade/v_MilitaryParade_g04_c02 52 +MilitaryParade/v_MilitaryParade_g07_c03 52 +PlayingDhol/v_PlayingDhol_g06_c03 60 +BasketballDunk/v_BasketballDunk_g05_c06 8 +Skijet/v_Skijet_g05_c03 81 +HandstandPushups/v_HandStandPushups_g05_c04 36 +Punch/v_Punch_g04_c01 70 +FloorGymnastics/v_FloorGymnastics_g03_c01 29 +HandstandWalking/v_HandstandWalking_g03_c03 37 +ParallelBars/v_ParallelBars_g04_c07 56 +YoYo/v_YoYo_g04_c05 100 +SoccerPenalty/v_SoccerPenalty_g07_c02 84 +BasketballDunk/v_BasketballDunk_g07_c02 8 +HandstandWalking/v_HandstandWalking_g04_c02 37 +PlayingViolin/v_PlayingViolin_g02_c04 66 +HammerThrow/v_HammerThrow_g01_c05 35 +Fencing/v_Fencing_g05_c01 27 +PommelHorse/v_PommelHorse_g02_c04 68 +ThrowDiscus/v_ThrowDiscus_g02_c06 92 +PlayingFlute/v_PlayingFlute_g04_c02 61 +PlayingCello/v_PlayingCello_g02_c03 58 +WalkingWithDog/v_WalkingWithDog_g01_c03 97 +Surfing/v_Surfing_g06_c02 87 +LongJump/v_LongJump_g04_c03 50 +MilitaryParade/v_MilitaryParade_g01_c07 52 +Shotput/v_Shotput_g04_c05 78 +TaiChi/v_TaiChi_g02_c02 90 +Billiards/v_Billiards_g04_c05 11 +BoxingPunchingBag/v_BoxingPunchingBag_g06_c01 16 +Mixing/v_Mixing_g06_c02 53 +HighJump/v_HighJump_g04_c02 39 +FloorGymnastics/v_FloorGymnastics_g07_c07 29 +Swing/v_Swing_g05_c01 88 +Archery/v_Archery_g01_c05 2 +PlayingViolin/v_PlayingViolin_g07_c01 66 +TennisSwing/v_TennisSwing_g06_c04 91 +MoppingFloor/v_MoppingFloor_g07_c03 54 +PlayingSitar/v_PlayingSitar_g07_c07 64 +PommelHorse/v_PommelHorse_g05_c04 68 +BrushingTeeth/v_BrushingTeeth_g07_c04 19 +CuttingInKitchen/v_CuttingInKitchen_g05_c03 24 +SkateBoarding/v_SkateBoarding_g01_c03 79 +SkyDiving/v_SkyDiving_g01_c02 82 +YoYo/v_YoYo_g05_c03 100 +PlayingCello/v_PlayingCello_g02_c06 58 +CuttingInKitchen/v_CuttingInKitchen_g04_c03 24 +CliffDiving/v_CliffDiving_g02_c01 21 +BoxingSpeedBag/v_BoxingSpeedBag_g06_c01 17 +Rowing/v_Rowing_g04_c02 75 +BrushingTeeth/v_BrushingTeeth_g02_c07 19 +SumoWrestling/v_SumoWrestling_g03_c03 86 +HammerThrow/v_HammerThrow_g07_c01 35 +Bowling/v_Bowling_g07_c07 15 +PlayingFlute/v_PlayingFlute_g04_c04 61 +BalanceBeam/v_BalanceBeam_g04_c04 4 +HeadMassage/v_HeadMassage_g04_c04 38 +Shotput/v_Shotput_g04_c01 78 +Bowling/v_Bowling_g05_c04 15 +PlayingSitar/v_PlayingSitar_g07_c02 64 +SoccerJuggling/v_SoccerJuggling_g02_c01 83 +LongJump/v_LongJump_g02_c04 50 +SalsaSpin/v_SalsaSpin_g05_c06 76 +Drumming/v_Drumming_g05_c04 26 +BoxingPunchingBag/v_BoxingPunchingBag_g07_c06 16 +Shotput/v_Shotput_g05_c02 78 +RockClimbingIndoor/v_RockClimbingIndoor_g06_c06 73 +BoxingSpeedBag/v_BoxingSpeedBag_g07_c06 17 +ParallelBars/v_ParallelBars_g01_c02 56 +BaseballPitch/v_BaseballPitch_g02_c03 6 +SoccerJuggling/v_SoccerJuggling_g01_c04 83 +Billiards/v_Billiards_g05_c02 11 +Typing/v_Typing_g05_c02 94 +PlayingCello/v_PlayingCello_g05_c01 58 +Haircut/v_Haircut_g07_c05 33 +BodyWeightSquats/v_BodyWeightSquats_g01_c03 14 +WalkingWithDog/v_WalkingWithDog_g01_c04 97 +Knitting/v_Knitting_g06_c02 49 +Typing/v_Typing_g07_c01 94 +SoccerJuggling/v_SoccerJuggling_g01_c01 83 +BenchPress/v_BenchPress_g05_c02 9 +PlayingFlute/v_PlayingFlute_g06_c06 61 +WritingOnBoard/v_WritingOnBoard_g01_c05 99 +TrampolineJumping/v_TrampolineJumping_g05_c01 93 +Knitting/v_Knitting_g01_c01 49 +HandstandWalking/v_HandstandWalking_g07_c05 37 +SoccerJuggling/v_SoccerJuggling_g07_c04 83 +BlowingCandles/v_BlowingCandles_g04_c02 13 +TaiChi/v_TaiChi_g02_c04 90 +Knitting/v_Knitting_g04_c01 49 +SalsaSpin/v_SalsaSpin_g02_c03 76 +BabyCrawling/v_BabyCrawling_g02_c06 3 +BandMarching/v_BandMarching_g07_c04 5 +Mixing/v_Mixing_g04_c03 53 +HammerThrow/v_HammerThrow_g04_c07 35 +FloorGymnastics/v_FloorGymnastics_g04_c05 29 +BandMarching/v_BandMarching_g01_c04 5 +Punch/v_Punch_g05_c04 70 +BoxingPunchingBag/v_BoxingPunchingBag_g07_c03 16 +JugglingBalls/v_JugglingBalls_g07_c04 45 +SoccerPenalty/v_SoccerPenalty_g02_c04 84 +PlayingCello/v_PlayingCello_g01_c06 58 +FrontCrawl/v_FrontCrawl_g04_c05 31 +PizzaTossing/v_PizzaTossing_g04_c05 57 +SalsaSpin/v_SalsaSpin_g06_c03 76 +PlayingViolin/v_PlayingViolin_g02_c01 66 +Rowing/v_Rowing_g03_c04 75 +MilitaryParade/v_MilitaryParade_g03_c03 52 +HeadMassage/v_HeadMassage_g02_c04 38 +Typing/v_Typing_g06_c07 94 +PlayingFlute/v_PlayingFlute_g03_c07 61 +PlayingPiano/v_PlayingPiano_g05_c01 63 +HandstandWalking/v_HandstandWalking_g07_c02 37 +PlayingFlute/v_PlayingFlute_g04_c06 61 +Archery/v_Archery_g07_c02 2 +Typing/v_Typing_g05_c05 94 +FieldHockeyPenalty/v_FieldHockeyPenalty_g07_c03 28 +BasketballDunk/v_BasketballDunk_g02_c04 8 +Kayaking/v_Kayaking_g01_c01 48 +IceDancing/v_IceDancing_g07_c06 43 +JavelinThrow/v_JavelinThrow_g04_c01 44 +SoccerJuggling/v_SoccerJuggling_g05_c02 83 +Drumming/v_Drumming_g02_c05 26 +ShavingBeard/v_ShavingBeard_g03_c04 77 +JumpingJack/v_JumpingJack_g06_c01 46 +Biking/v_Biking_g04_c02 10 +SalsaSpin/v_SalsaSpin_g07_c02 76 +Drumming/v_Drumming_g03_c03 26 +Hammering/v_Hammering_g06_c06 34 +SalsaSpin/v_SalsaSpin_g04_c01 76 +PlayingPiano/v_PlayingPiano_g06_c03 63 +SalsaSpin/v_SalsaSpin_g06_c02 76 +SumoWrestling/v_SumoWrestling_g03_c04 86 +YoYo/v_YoYo_g03_c05 100 +PlayingTabla/v_PlayingTabla_g04_c02 65 +PlayingFlute/v_PlayingFlute_g01_c03 61 +WritingOnBoard/v_WritingOnBoard_g07_c07 99 +Biking/v_Biking_g04_c01 10 +PlayingFlute/v_PlayingFlute_g01_c05 61 +SumoWrestling/v_SumoWrestling_g01_c04 86 +Lunges/v_Lunges_g06_c05 51 +HammerThrow/v_HammerThrow_g06_c07 35 +SoccerPenalty/v_SoccerPenalty_g06_c02 84 +CricketShot/v_CricketShot_g07_c07 23 +PlayingCello/v_PlayingCello_g07_c01 58 +BoxingSpeedBag/v_BoxingSpeedBag_g04_c07 17 +TaiChi/v_TaiChi_g03_c02 90 +GolfSwing/v_GolfSwing_g04_c06 32 +BreastStroke/v_BreastStroke_g05_c01 18 +PlayingDhol/v_PlayingDhol_g04_c02 60 +BlowDryHair/v_BlowDryHair_g05_c05 12 +ApplyLipstick/v_ApplyLipstick_g07_c04 1 +Punch/v_Punch_g07_c04 70 +VolleyballSpiking/v_VolleyballSpiking_g02_c02 96 +FieldHockeyPenalty/v_FieldHockeyPenalty_g02_c05 28 +BenchPress/v_BenchPress_g04_c04 9 +Shotput/v_Shotput_g02_c01 78 +PlayingCello/v_PlayingCello_g03_c04 58 +HulaHoop/v_HulaHoop_g03_c01 42 +SalsaSpin/v_SalsaSpin_g02_c01 76 +Surfing/v_Surfing_g01_c01 87 +WritingOnBoard/v_WritingOnBoard_g01_c04 99 +RockClimbingIndoor/v_RockClimbingIndoor_g02_c04 73 +JugglingBalls/v_JugglingBalls_g05_c01 45 +PlayingCello/v_PlayingCello_g04_c07 58 +Hammering/v_Hammering_g07_c03 34 +JumpingJack/v_JumpingJack_g06_c02 46 +FrontCrawl/v_FrontCrawl_g05_c04 31 +BlowingCandles/v_BlowingCandles_g07_c02 13 +BoxingPunchingBag/v_BoxingPunchingBag_g07_c01 16 +Nunchucks/v_Nunchucks_g06_c03 55 +VolleyballSpiking/v_VolleyballSpiking_g06_c02 96 +Lunges/v_Lunges_g03_c04 51 +TennisSwing/v_TennisSwing_g05_c03 91 +WalkingWithDog/v_WalkingWithDog_g06_c02 97 +JumpingJack/v_JumpingJack_g06_c07 46 +CricketShot/v_CricketShot_g05_c01 23 +PlayingGuitar/v_PlayingGuitar_g05_c01 62 +TrampolineJumping/v_TrampolineJumping_g04_c02 93 +TrampolineJumping/v_TrampolineJumping_g04_c01 93 +Drumming/v_Drumming_g05_c01 26 +MilitaryParade/v_MilitaryParade_g04_c04 52 +MoppingFloor/v_MoppingFloor_g05_c05 54 +CricketShot/v_CricketShot_g01_c01 23 +BenchPress/v_BenchPress_g07_c01 9 +CleanAndJerk/v_CleanAndJerk_g04_c01 20 +HulaHoop/v_HulaHoop_g05_c02 42 +ApplyLipstick/v_ApplyLipstick_g04_c05 1 +Diving/v_Diving_g01_c03 25 +FrontCrawl/v_FrontCrawl_g07_c02 31 +Billiards/v_Billiards_g01_c06 11 +BoxingPunchingBag/v_BoxingPunchingBag_g07_c07 16 +Rafting/v_Rafting_g01_c04 72 +BrushingTeeth/v_BrushingTeeth_g07_c03 19 +SkyDiving/v_SkyDiving_g03_c03 82 +PommelHorse/v_PommelHorse_g05_c03 68 +Bowling/v_Bowling_g02_c04 15 +SoccerJuggling/v_SoccerJuggling_g07_c07 83 +HorseRace/v_HorseRace_g03_c03 40 +PommelHorse/v_PommelHorse_g01_c01 68 +BenchPress/v_BenchPress_g06_c06 9 +Kayaking/v_Kayaking_g04_c05 48 +PlayingViolin/v_PlayingViolin_g04_c03 66 +PlayingSitar/v_PlayingSitar_g03_c02 64 +BrushingTeeth/v_BrushingTeeth_g07_c02 19 +TaiChi/v_TaiChi_g01_c01 90 +BodyWeightSquats/v_BodyWeightSquats_g03_c04 14 +BoxingPunchingBag/v_BoxingPunchingBag_g02_c07 16 +TaiChi/v_TaiChi_g01_c02 90 +CricketShot/v_CricketShot_g05_c05 23 +PlayingTabla/v_PlayingTabla_g05_c01 65 +WalkingWithDog/v_WalkingWithDog_g07_c06 97 +Haircut/v_Haircut_g03_c06 33 +Rafting/v_Rafting_g06_c02 72 +VolleyballSpiking/v_VolleyballSpiking_g07_c03 96 +Nunchucks/v_Nunchucks_g03_c01 55 +CuttingInKitchen/v_CuttingInKitchen_g06_c02 24 +SkateBoarding/v_SkateBoarding_g02_c06 79 +SalsaSpin/v_SalsaSpin_g04_c02 76 +JumpRope/v_JumpRope_g07_c02 47 +IceDancing/v_IceDancing_g02_c06 43 +Fencing/v_Fencing_g03_c01 27 +Punch/v_Punch_g03_c02 70 +PlayingDaf/v_PlayingDaf_g01_c01 59 +Kayaking/v_Kayaking_g05_c04 48 +BaseballPitch/v_BaseballPitch_g05_c03 6 +IceDancing/v_IceDancing_g06_c02 43 +PlayingViolin/v_PlayingViolin_g04_c04 66 +JavelinThrow/v_JavelinThrow_g06_c01 44 +IceDancing/v_IceDancing_g04_c07 43 +SkateBoarding/v_SkateBoarding_g04_c03 79 +BlowDryHair/v_BlowDryHair_g03_c01 12 +PlayingDhol/v_PlayingDhol_g06_c07 60 +Basketball/v_Basketball_g03_c04 7 +ShavingBeard/v_ShavingBeard_g03_c01 77 +Lunges/v_Lunges_g02_c01 51 +BreastStroke/v_BreastStroke_g03_c04 18 +BrushingTeeth/v_BrushingTeeth_g06_c02 19 +PlayingGuitar/v_PlayingGuitar_g07_c02 62 +JumpingJack/v_JumpingJack_g05_c05 46 +BandMarching/v_BandMarching_g01_c05 5 +Basketball/v_Basketball_g03_c02 7 +BabyCrawling/v_BabyCrawling_g02_c02 3 +JugglingBalls/v_JugglingBalls_g07_c07 45 +Bowling/v_Bowling_g07_c06 15 +CricketShot/v_CricketShot_g07_c02 23 +Knitting/v_Knitting_g03_c05 49 +ParallelBars/v_ParallelBars_g06_c07 56 +Rowing/v_Rowing_g05_c04 75 +HulaHoop/v_HulaHoop_g06_c04 42 +HeadMassage/v_HeadMassage_g06_c06 38 +HighJump/v_HighJump_g04_c06 39 +HammerThrow/v_HammerThrow_g01_c01 35 +UnevenBars/v_UnevenBars_g01_c03 95 +LongJump/v_LongJump_g05_c01 50 +RopeClimbing/v_RopeClimbing_g05_c06 74 +JumpingJack/v_JumpingJack_g05_c02 46 +PlayingFlute/v_PlayingFlute_g05_c04 61 +Mixing/v_Mixing_g01_c07 53 +Mixing/v_Mixing_g05_c05 53 +PlayingViolin/v_PlayingViolin_g07_c03 66 +JumpRope/v_JumpRope_g05_c02 47 +HammerThrow/v_HammerThrow_g01_c03 35 +RockClimbingIndoor/v_RockClimbingIndoor_g07_c06 73 +PlayingGuitar/v_PlayingGuitar_g06_c01 62 +BalanceBeam/v_BalanceBeam_g06_c03 4 +HorseRiding/v_HorseRiding_g07_c03 41 +ShavingBeard/v_ShavingBeard_g03_c03 77 +Kayaking/v_Kayaking_g07_c01 48 +Skijet/v_Skijet_g07_c04 81 +HighJump/v_HighJump_g05_c02 39 +JugglingBalls/v_JugglingBalls_g07_c01 45 +PushUps/v_PushUps_g07_c03 71 +Nunchucks/v_Nunchucks_g02_c06 55 +CricketShot/v_CricketShot_g01_c03 23 +TennisSwing/v_TennisSwing_g04_c03 91 +PlayingSitar/v_PlayingSitar_g03_c05 64 +JumpingJack/v_JumpingJack_g01_c05 46 +BaseballPitch/v_BaseballPitch_g06_c02 6 +ApplyEyeMakeup/v_ApplyEyeMakeup_g06_c05 0 +JumpRope/v_JumpRope_g05_c03 47 +Nunchucks/v_Nunchucks_g04_c01 55 +Biking/v_Biking_g06_c05 10 +HulaHoop/v_HulaHoop_g03_c03 42 +BoxingPunchingBag/v_BoxingPunchingBag_g05_c07 16 +JumpRope/v_JumpRope_g02_c07 47 +Fencing/v_Fencing_g02_c01 27 +MilitaryParade/v_MilitaryParade_g07_c01 52 +PlayingGuitar/v_PlayingGuitar_g04_c05 62 +ShavingBeard/v_ShavingBeard_g05_c03 77 +Skiing/v_Skiing_g04_c04 80 +FrontCrawl/v_FrontCrawl_g07_c01 31 +SkyDiving/v_SkyDiving_g05_c04 82 +WritingOnBoard/v_WritingOnBoard_g03_c02 99 +Diving/v_Diving_g06_c01 25 +FrisbeeCatch/v_FrisbeeCatch_g04_c05 30 +WalkingWithDog/v_WalkingWithDog_g04_c05 97 +CricketShot/v_CricketShot_g01_c05 23 +Diving/v_Diving_g07_c03 25 +Skijet/v_Skijet_g01_c02 81 +BoxingPunchingBag/v_BoxingPunchingBag_g06_c07 16 +JumpingJack/v_JumpingJack_g01_c04 46 +WritingOnBoard/v_WritingOnBoard_g06_c01 99 +Skiing/v_Skiing_g04_c06 80 +Drumming/v_Drumming_g06_c06 26 +IceDancing/v_IceDancing_g07_c04 43 +Haircut/v_Haircut_g07_c06 33 +PommelHorse/v_PommelHorse_g04_c03 68 +TennisSwing/v_TennisSwing_g02_c03 91 +CliffDiving/v_CliffDiving_g03_c03 21 +PlayingSitar/v_PlayingSitar_g05_c04 64 +BasketballDunk/v_BasketballDunk_g07_c06 8 +Haircut/v_Haircut_g06_c02 33 +CleanAndJerk/v_CleanAndJerk_g05_c02 20 +Archery/v_Archery_g01_c07 2 +HeadMassage/v_HeadMassage_g03_c02 38 +FloorGymnastics/v_FloorGymnastics_g01_c04 29 +Surfing/v_Surfing_g07_c01 87 +HighJump/v_HighJump_g06_c01 39 +GolfSwing/v_GolfSwing_g06_c03 32 +SkyDiving/v_SkyDiving_g01_c03 82 +Haircut/v_Haircut_g07_c04 33 +HandstandPushups/v_HandStandPushups_g02_c02 36 +JumpRope/v_JumpRope_g01_c03 47 +JavelinThrow/v_JavelinThrow_g05_c05 44 +RopeClimbing/v_RopeClimbing_g05_c07 74 +PushUps/v_PushUps_g06_c01 71 +FrontCrawl/v_FrontCrawl_g02_c04 31 +CricketBowling/v_CricketBowling_g01_c04 22 +HorseRiding/v_HorseRiding_g04_c01 41 +FieldHockeyPenalty/v_FieldHockeyPenalty_g03_c01 28 +CliffDiving/v_CliffDiving_g01_c03 21 +PlayingCello/v_PlayingCello_g05_c06 58 +TaiChi/v_TaiChi_g06_c02 90 +Lunges/v_Lunges_g06_c07 51 +ApplyEyeMakeup/v_ApplyEyeMakeup_g06_c06 0 +PullUps/v_PullUps_g05_c02 69 +TaiChi/v_TaiChi_g06_c03 90 +ParallelBars/v_ParallelBars_g07_c06 56 +WritingOnBoard/v_WritingOnBoard_g07_c03 99 +PushUps/v_PushUps_g03_c03 71 +Diving/v_Diving_g02_c07 25 +CuttingInKitchen/v_CuttingInKitchen_g05_c06 24 +BasketballDunk/v_BasketballDunk_g01_c05 8 +Shotput/v_Shotput_g07_c02 78 +Knitting/v_Knitting_g05_c02 49 +Bowling/v_Bowling_g06_c03 15 +PlayingPiano/v_PlayingPiano_g01_c01 63 +BlowingCandles/v_BlowingCandles_g04_c01 13 +Diving/v_Diving_g02_c05 25 +BoxingPunchingBag/v_BoxingPunchingBag_g01_c05 16 +ApplyLipstick/v_ApplyLipstick_g02_c02 1 +CleanAndJerk/v_CleanAndJerk_g06_c02 20 +PlayingDaf/v_PlayingDaf_g05_c03 59 +PullUps/v_PullUps_g05_c03 69 +PlayingViolin/v_PlayingViolin_g04_c01 66 +HammerThrow/v_HammerThrow_g02_c03 35 +CleanAndJerk/v_CleanAndJerk_g05_c03 20 +PlayingDaf/v_PlayingDaf_g04_c06 59 +Rafting/v_Rafting_g07_c01 72 +YoYo/v_YoYo_g01_c06 100 +FieldHockeyPenalty/v_FieldHockeyPenalty_g01_c04 28 +Hammering/v_Hammering_g03_c02 34 +JugglingBalls/v_JugglingBalls_g02_c06 45 +SkateBoarding/v_SkateBoarding_g02_c03 79 +Fencing/v_Fencing_g02_c05 27 +RockClimbingIndoor/v_RockClimbingIndoor_g07_c01 73 +StillRings/v_StillRings_g02_c04 85 +SalsaSpin/v_SalsaSpin_g03_c06 76 +SkyDiving/v_SkyDiving_g03_c05 82 +CricketBowling/v_CricketBowling_g07_c04 22 +HeadMassage/v_HeadMassage_g03_c03 38 +SumoWrestling/v_SumoWrestling_g01_c03 86 +Rowing/v_Rowing_g06_c02 75 +Rowing/v_Rowing_g02_c05 75 +IceDancing/v_IceDancing_g01_c07 43 +ApplyEyeMakeup/v_ApplyEyeMakeup_g03_c01 0 +PlayingViolin/v_PlayingViolin_g06_c02 66 +VolleyballSpiking/v_VolleyballSpiking_g02_c03 96 +Basketball/v_Basketball_g03_c05 7 +Bowling/v_Bowling_g04_c01 15 +BasketballDunk/v_BasketballDunk_g04_c03 8 +SalsaSpin/v_SalsaSpin_g02_c07 76 +ApplyLipstick/v_ApplyLipstick_g02_c01 1 +Swing/v_Swing_g01_c01 88 +BrushingTeeth/v_BrushingTeeth_g03_c01 19 +Basketball/v_Basketball_g02_c03 7 +ApplyEyeMakeup/v_ApplyEyeMakeup_g05_c04 0 +YoYo/v_YoYo_g03_c02 100 +PlayingTabla/v_PlayingTabla_g01_c01 65 +BaseballPitch/v_BaseballPitch_g05_c04 6 +PoleVault/v_PoleVault_g01_c04 67 +BoxingPunchingBag/v_BoxingPunchingBag_g03_c01 16 +JumpingJack/v_JumpingJack_g02_c01 46 +FieldHockeyPenalty/v_FieldHockeyPenalty_g06_c07 28 +JugglingBalls/v_JugglingBalls_g07_c05 45 +MilitaryParade/v_MilitaryParade_g03_c02 52 +YoYo/v_YoYo_g06_c01 100 +BoxingSpeedBag/v_BoxingSpeedBag_g07_c02 17 +Lunges/v_Lunges_g03_c03 51 +RockClimbingIndoor/v_RockClimbingIndoor_g06_c02 73 +PlayingCello/v_PlayingCello_g03_c02 58 +HeadMassage/v_HeadMassage_g05_c06 38 +PommelHorse/v_PommelHorse_g03_c04 68 +Swing/v_Swing_g03_c02 88 +IceDancing/v_IceDancing_g02_c02 43 +BoxingPunchingBag/v_BoxingPunchingBag_g04_c01 16 +Drumming/v_Drumming_g07_c07 26 +ApplyLipstick/v_ApplyLipstick_g06_c04 1 +Mixing/v_Mixing_g07_c03 53 +Surfing/v_Surfing_g05_c03 87 +PlayingDhol/v_PlayingDhol_g02_c04 60 +BandMarching/v_BandMarching_g04_c04 5 +WalkingWithDog/v_WalkingWithDog_g06_c05 97 +IceDancing/v_IceDancing_g02_c07 43 +BenchPress/v_BenchPress_g01_c06 9 +Punch/v_Punch_g04_c02 70 +Knitting/v_Knitting_g04_c03 49 +PlayingCello/v_PlayingCello_g02_c07 58 +TennisSwing/v_TennisSwing_g05_c07 91 +HeadMassage/v_HeadMassage_g03_c04 38 +HulaHoop/v_HulaHoop_g02_c02 42 +ApplyLipstick/v_ApplyLipstick_g06_c05 1 +Knitting/v_Knitting_g01_c03 49 +BoxingSpeedBag/v_BoxingSpeedBag_g03_c05 17 +VolleyballSpiking/v_VolleyballSpiking_g04_c07 96 +BoxingPunchingBag/v_BoxingPunchingBag_g07_c05 16 +Swing/v_Swing_g07_c03 88 +HorseRiding/v_HorseRiding_g06_c07 41 +Shotput/v_Shotput_g03_c05 78 +JumpingJack/v_JumpingJack_g04_c01 46 +BodyWeightSquats/v_BodyWeightSquats_g02_c02 14 +PlayingSitar/v_PlayingSitar_g04_c02 64 +CliffDiving/v_CliffDiving_g05_c03 21 +IceDancing/v_IceDancing_g07_c05 43 +GolfSwing/v_GolfSwing_g02_c04 32 +BlowDryHair/v_BlowDryHair_g06_c03 12 +ApplyEyeMakeup/v_ApplyEyeMakeup_g05_c07 0 +MilitaryParade/v_MilitaryParade_g01_c04 52 +Lunges/v_Lunges_g01_c02 51 +CliffDiving/v_CliffDiving_g03_c05 21 +BlowDryHair/v_BlowDryHair_g06_c01 12 +Biking/v_Biking_g02_c07 10 +BandMarching/v_BandMarching_g03_c05 5 +PlayingDhol/v_PlayingDhol_g06_c04 60 +Nunchucks/v_Nunchucks_g05_c03 55 +Typing/v_Typing_g02_c05 94 +PlayingViolin/v_PlayingViolin_g01_c03 66 +FrontCrawl/v_FrontCrawl_g03_c05 31 +RockClimbingIndoor/v_RockClimbingIndoor_g02_c05 73 +Swing/v_Swing_g03_c04 88 +Drumming/v_Drumming_g04_c04 26 +LongJump/v_LongJump_g02_c01 50 +Skijet/v_Skijet_g05_c02 81 +HammerThrow/v_HammerThrow_g04_c05 35 +Punch/v_Punch_g02_c02 70 +BaseballPitch/v_BaseballPitch_g02_c04 6 +SoccerJuggling/v_SoccerJuggling_g06_c02 83 +Surfing/v_Surfing_g05_c04 87 +PlayingCello/v_PlayingCello_g01_c02 58 +PoleVault/v_PoleVault_g02_c05 67 +Knitting/v_Knitting_g01_c04 49 +BlowDryHair/v_BlowDryHair_g01_c03 12 +PlayingGuitar/v_PlayingGuitar_g03_c01 62 +JumpRope/v_JumpRope_g07_c03 47 +PlayingTabla/v_PlayingTabla_g02_c03 65 +Hammering/v_Hammering_g03_c01 34 +Basketball/v_Basketball_g07_c04 7 +CricketBowling/v_CricketBowling_g02_c03 22 +TennisSwing/v_TennisSwing_g06_c07 91 +HammerThrow/v_HammerThrow_g04_c02 35 +Biking/v_Biking_g02_c03 10 +SkateBoarding/v_SkateBoarding_g03_c03 79 +UnevenBars/v_UnevenBars_g02_c03 95 +HulaHoop/v_HulaHoop_g06_c01 42 +Swing/v_Swing_g06_c01 88 +GolfSwing/v_GolfSwing_g03_c07 32 +CricketShot/v_CricketShot_g03_c07 23 +JumpRope/v_JumpRope_g02_c03 47 +HighJump/v_HighJump_g02_c06 39 +Biking/v_Biking_g06_c03 10 +VolleyballSpiking/v_VolleyballSpiking_g01_c02 96 +ApplyLipstick/v_ApplyLipstick_g03_c02 1 +JumpingJack/v_JumpingJack_g05_c04 46 +PullUps/v_PullUps_g01_c03 69 +Typing/v_Typing_g06_c02 94 +GolfSwing/v_GolfSwing_g05_c06 32 +ApplyLipstick/v_ApplyLipstick_g03_c01 1 +FrisbeeCatch/v_FrisbeeCatch_g04_c02 30 +TennisSwing/v_TennisSwing_g03_c06 91 +CuttingInKitchen/v_CuttingInKitchen_g04_c04 24 +Mixing/v_Mixing_g06_c05 53 +MilitaryParade/v_MilitaryParade_g06_c01 52 +BenchPress/v_BenchPress_g07_c06 9 +BodyWeightSquats/v_BodyWeightSquats_g02_c01 14 +PlayingTabla/v_PlayingTabla_g04_c01 65 +FieldHockeyPenalty/v_FieldHockeyPenalty_g04_c01 28 +PlayingSitar/v_PlayingSitar_g02_c01 64 +TableTennisShot/v_TableTennisShot_g06_c06 89 +YoYo/v_YoYo_g02_c03 100 +Fencing/v_Fencing_g04_c02 27 +TableTennisShot/v_TableTennisShot_g03_c04 89 +CuttingInKitchen/v_CuttingInKitchen_g04_c05 24 +SumoWrestling/v_SumoWrestling_g05_c02 86 +BabyCrawling/v_BabyCrawling_g02_c03 3 +Surfing/v_Surfing_g03_c04 87 +Mixing/v_Mixing_g02_c02 53 +Diving/v_Diving_g03_c02 25 +PizzaTossing/v_PizzaTossing_g01_c04 57 +PlayingTabla/v_PlayingTabla_g04_c04 65 +BandMarching/v_BandMarching_g01_c01 5 +PlayingDhol/v_PlayingDhol_g05_c07 60 +LongJump/v_LongJump_g06_c01 50 +PlayingCello/v_PlayingCello_g01_c03 58 +BandMarching/v_BandMarching_g06_c02 5 +LongJump/v_LongJump_g01_c02 50 +Biking/v_Biking_g07_c03 10 +PlayingDaf/v_PlayingDaf_g05_c02 59 +Bowling/v_Bowling_g01_c07 15 +Swing/v_Swing_g02_c05 88 +BreastStroke/v_BreastStroke_g05_c04 18 +Surfing/v_Surfing_g07_c03 87 +JumpRope/v_JumpRope_g04_c02 47 +Archery/v_Archery_g02_c02 2 +Billiards/v_Billiards_g04_c01 11 +Punch/v_Punch_g07_c01 70 +BenchPress/v_BenchPress_g07_c02 9 +Drumming/v_Drumming_g01_c01 26 +CricketBowling/v_CricketBowling_g05_c03 22 +Rafting/v_Rafting_g01_c01 72 +TrampolineJumping/v_TrampolineJumping_g04_c05 93 +HeadMassage/v_HeadMassage_g06_c02 38 +CleanAndJerk/v_CleanAndJerk_g06_c04 20 +MoppingFloor/v_MoppingFloor_g02_c02 54 +ThrowDiscus/v_ThrowDiscus_g07_c07 92 +GolfSwing/v_GolfSwing_g04_c02 32 +BoxingPunchingBag/v_BoxingPunchingBag_g03_c02 16 +HeadMassage/v_HeadMassage_g07_c05 38 +ParallelBars/v_ParallelBars_g03_c02 56 +CleanAndJerk/v_CleanAndJerk_g03_c02 20 +SkyDiving/v_SkyDiving_g06_c03 82 +BoxingSpeedBag/v_BoxingSpeedBag_g02_c01 17 +CricketBowling/v_CricketBowling_g05_c01 22 +ShavingBeard/v_ShavingBeard_g03_c02 77 +JugglingBalls/v_JugglingBalls_g03_c04 45 +SkyDiving/v_SkyDiving_g07_c05 82 +WallPushups/v_WallPushups_g06_c01 98 +ApplyEyeMakeup/v_ApplyEyeMakeup_g02_c01 0 +SoccerJuggling/v_SoccerJuggling_g02_c06 83 +Mixing/v_Mixing_g01_c05 53 +ApplyEyeMakeup/v_ApplyEyeMakeup_g05_c01 0 +ParallelBars/v_ParallelBars_g05_c01 56 +Basketball/v_Basketball_g02_c05 7 +GolfSwing/v_GolfSwing_g04_c04 32 +JumpRope/v_JumpRope_g01_c01 47 +SoccerPenalty/v_SoccerPenalty_g03_c02 84 +LongJump/v_LongJump_g04_c07 50 +BreastStroke/v_BreastStroke_g04_c04 18 +BandMarching/v_BandMarching_g06_c03 5 +ApplyEyeMakeup/v_ApplyEyeMakeup_g04_c05 0 +BabyCrawling/v_BabyCrawling_g07_c06 3 +VolleyballSpiking/v_VolleyballSpiking_g05_c03 96 +CuttingInKitchen/v_CuttingInKitchen_g05_c02 24 +WritingOnBoard/v_WritingOnBoard_g02_c01 99 +Typing/v_Typing_g05_c01 94 +BenchPress/v_BenchPress_g07_c07 9 +SalsaSpin/v_SalsaSpin_g01_c07 76 +SoccerJuggling/v_SoccerJuggling_g05_c06 83 +ApplyLipstick/v_ApplyLipstick_g04_c04 1 +CricketBowling/v_CricketBowling_g03_c01 22 +BlowDryHair/v_BlowDryHair_g05_c03 12 +FrontCrawl/v_FrontCrawl_g04_c03 31 +Nunchucks/v_Nunchucks_g04_c03 55 +TaiChi/v_TaiChi_g06_c01 90 +JumpRope/v_JumpRope_g01_c04 47 +BlowDryHair/v_BlowDryHair_g03_c04 12 +Mixing/v_Mixing_g05_c03 53 +GolfSwing/v_GolfSwing_g01_c04 32 +RockClimbingIndoor/v_RockClimbingIndoor_g03_c06 73 +PlayingGuitar/v_PlayingGuitar_g04_c01 62 +Archery/v_Archery_g05_c03 2 +ThrowDiscus/v_ThrowDiscus_g04_c03 92 +ThrowDiscus/v_ThrowDiscus_g01_c02 92 +CricketShot/v_CricketShot_g02_c05 23 +CricketShot/v_CricketShot_g04_c02 23 +RopeClimbing/v_RopeClimbing_g06_c02 74 +Fencing/v_Fencing_g06_c02 27 +WritingOnBoard/v_WritingOnBoard_g05_c04 99 +WritingOnBoard/v_WritingOnBoard_g03_c01 99 +BandMarching/v_BandMarching_g04_c01 5 +Skijet/v_Skijet_g07_c02 81 +TableTennisShot/v_TableTennisShot_g03_c05 89 +TableTennisShot/v_TableTennisShot_g04_c02 89 +PlayingPiano/v_PlayingPiano_g01_c02 63 +Typing/v_Typing_g04_c03 94 +PlayingFlute/v_PlayingFlute_g07_c06 61 +Lunges/v_Lunges_g01_c07 51 +BenchPress/v_BenchPress_g05_c04 9 +Mixing/v_Mixing_g05_c01 53 +HammerThrow/v_HammerThrow_g02_c01 35 +Hammering/v_Hammering_g06_c02 34 +FrisbeeCatch/v_FrisbeeCatch_g07_c01 30 +Swing/v_Swing_g07_c01 88 +FloorGymnastics/v_FloorGymnastics_g02_c03 29 +BoxingPunchingBag/v_BoxingPunchingBag_g06_c05 16 +GolfSwing/v_GolfSwing_g03_c05 32 +BabyCrawling/v_BabyCrawling_g05_c03 3 +TrampolineJumping/v_TrampolineJumping_g01_c02 93 +RopeClimbing/v_RopeClimbing_g07_c04 74 +Bowling/v_Bowling_g07_c02 15 +PushUps/v_PushUps_g02_c01 71 +PlayingGuitar/v_PlayingGuitar_g05_c02 62 +GolfSwing/v_GolfSwing_g04_c01 32 +Knitting/v_Knitting_g02_c03 49 +Kayaking/v_Kayaking_g06_c06 48 +Skijet/v_Skijet_g03_c04 81 +Nunchucks/v_Nunchucks_g05_c02 55 +Rafting/v_Rafting_g02_c02 72 +HorseRiding/v_HorseRiding_g01_c02 41 +CricketShot/v_CricketShot_g02_c06 23 +MilitaryParade/v_MilitaryParade_g01_c02 52 +SkyDiving/v_SkyDiving_g03_c02 82 +HandstandWalking/v_HandstandWalking_g05_c01 37 +HorseRace/v_HorseRace_g04_c06 40 +Biking/v_Biking_g05_c02 10 +HulaHoop/v_HulaHoop_g04_c05 42 +SoccerJuggling/v_SoccerJuggling_g06_c05 83 +WalkingWithDog/v_WalkingWithDog_g06_c04 97 +StillRings/v_StillRings_g04_c03 85 +HandstandPushups/v_HandStandPushups_g07_c04 36 +PommelHorse/v_PommelHorse_g07_c02 68 +Swing/v_Swing_g05_c03 88 +TaiChi/v_TaiChi_g02_c03 90 +PlayingViolin/v_PlayingViolin_g06_c04 66 +RockClimbingIndoor/v_RockClimbingIndoor_g04_c04 73 +Typing/v_Typing_g03_c06 94 +StillRings/v_StillRings_g03_c05 85 +Biking/v_Biking_g05_c05 10 +Shotput/v_Shotput_g04_c02 78 +YoYo/v_YoYo_g01_c03 100 +PlayingSitar/v_PlayingSitar_g01_c02 64 +JumpRope/v_JumpRope_g07_c05 47 +HandstandWalking/v_HandstandWalking_g06_c02 37 +ThrowDiscus/v_ThrowDiscus_g06_c06 92 +SkateBoarding/v_SkateBoarding_g04_c02 79 +Swing/v_Swing_g02_c04 88 +Lunges/v_Lunges_g06_c06 51 +PullUps/v_PullUps_g02_c04 69 +UnevenBars/v_UnevenBars_g04_c03 95 +FrisbeeCatch/v_FrisbeeCatch_g01_c01 30 +SkateBoarding/v_SkateBoarding_g03_c01 79 +GolfSwing/v_GolfSwing_g03_c01 32 +WritingOnBoard/v_WritingOnBoard_g07_c05 99 +Punch/v_Punch_g02_c04 70 +BrushingTeeth/v_BrushingTeeth_g07_c01 19 +PlayingFlute/v_PlayingFlute_g05_c01 61 +Skiing/v_Skiing_g06_c02 80 +YoYo/v_YoYo_g01_c02 100 +TennisSwing/v_TennisSwing_g03_c01 91 +RopeClimbing/v_RopeClimbing_g07_c05 74 +TennisSwing/v_TennisSwing_g03_c05 91 +Drumming/v_Drumming_g07_c04 26 +TableTennisShot/v_TableTennisShot_g05_c03 89 +JavelinThrow/v_JavelinThrow_g05_c06 44 +Lunges/v_Lunges_g05_c03 51 +PlayingGuitar/v_PlayingGuitar_g07_c06 62 +TrampolineJumping/v_TrampolineJumping_g05_c02 93 +BlowDryHair/v_BlowDryHair_g04_c04 12 +JavelinThrow/v_JavelinThrow_g01_c02 44 +Kayaking/v_Kayaking_g03_c01 48 +BenchPress/v_BenchPress_g05_c07 9 +ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c01 0 +Hammering/v_Hammering_g03_c04 34 +Biking/v_Biking_g05_c03 10 +Swing/v_Swing_g06_c04 88 +SkyDiving/v_SkyDiving_g03_c04 82 +RockClimbingIndoor/v_RockClimbingIndoor_g01_c01 73 +PlayingTabla/v_PlayingTabla_g06_c04 65 +ThrowDiscus/v_ThrowDiscus_g06_c04 92 +BlowingCandles/v_BlowingCandles_g03_c04 13 +CleanAndJerk/v_CleanAndJerk_g04_c03 20 +Bowling/v_Bowling_g04_c04 15 +PlayingTabla/v_PlayingTabla_g07_c03 65 +BandMarching/v_BandMarching_g02_c06 5 +WalkingWithDog/v_WalkingWithDog_g07_c03 97 +HighJump/v_HighJump_g06_c02 39 +CliffDiving/v_CliffDiving_g04_c03 21 +Archery/v_Archery_g02_c03 2 +BreastStroke/v_BreastStroke_g07_c03 18 +Shotput/v_Shotput_g06_c04 78 +ShavingBeard/v_ShavingBeard_g07_c06 77 +FrisbeeCatch/v_FrisbeeCatch_g01_c05 30 +PoleVault/v_PoleVault_g06_c01 67 +ApplyEyeMakeup/v_ApplyEyeMakeup_g02_c02 0 +PlayingTabla/v_PlayingTabla_g07_c01 65 +FrisbeeCatch/v_FrisbeeCatch_g06_c02 30 +HeadMassage/v_HeadMassage_g01_c05 38 +Haircut/v_Haircut_g05_c04 33 +GolfSwing/v_GolfSwing_g03_c02 32 +JumpRope/v_JumpRope_g04_c03 47 +HandstandWalking/v_HandstandWalking_g01_c02 37 +BenchPress/v_BenchPress_g05_c06 9 +PizzaTossing/v_PizzaTossing_g02_c03 57 +PlayingCello/v_PlayingCello_g01_c05 58 +FloorGymnastics/v_FloorGymnastics_g04_c02 29 +Basketball/v_Basketball_g07_c02 7 +BaseballPitch/v_BaseballPitch_g05_c01 6 +WalkingWithDog/v_WalkingWithDog_g03_c05 97 +Rowing/v_Rowing_g04_c04 75 +PlayingViolin/v_PlayingViolin_g02_c03 66 +HorseRiding/v_HorseRiding_g02_c02 41 +Diving/v_Diving_g02_c01 25 +RopeClimbing/v_RopeClimbing_g06_c01 74 +StillRings/v_StillRings_g01_c01 85 +PlayingCello/v_PlayingCello_g03_c03 58 +ApplyLipstick/v_ApplyLipstick_g04_c03 1 +JumpingJack/v_JumpingJack_g04_c04 46 +Shotput/v_Shotput_g03_c04 78 +MilitaryParade/v_MilitaryParade_g07_c04 52 +BrushingTeeth/v_BrushingTeeth_g07_c06 19 +Drumming/v_Drumming_g02_c03 26 +ApplyEyeMakeup/v_ApplyEyeMakeup_g03_c05 0 +JumpingJack/v_JumpingJack_g07_c05 46 +HulaHoop/v_HulaHoop_g01_c02 42 +Swing/v_Swing_g01_c04 88 +SoccerJuggling/v_SoccerJuggling_g03_c02 83 +CliffDiving/v_CliffDiving_g05_c06 21 +Shotput/v_Shotput_g04_c04 78 +CricketShot/v_CricketShot_g06_c03 23 +JumpingJack/v_JumpingJack_g03_c04 46 +Rowing/v_Rowing_g01_c02 75 +Punch/v_Punch_g07_c02 70 +SkateBoarding/v_SkateBoarding_g02_c04 79 +PlayingPiano/v_PlayingPiano_g05_c03 63 +BasketballDunk/v_BasketballDunk_g04_c01 8 +SumoWrestling/v_SumoWrestling_g07_c01 86 +WalkingWithDog/v_WalkingWithDog_g03_c01 97 +Typing/v_Typing_g01_c03 94 +PlayingDaf/v_PlayingDaf_g05_c07 59 +BodyWeightSquats/v_BodyWeightSquats_g06_c03 14 +JugglingBalls/v_JugglingBalls_g02_c04 45 +UnevenBars/v_UnevenBars_g01_c04 95 +PlayingFlute/v_PlayingFlute_g07_c05 61 +Rowing/v_Rowing_g01_c04 75 +TrampolineJumping/v_TrampolineJumping_g01_c01 93 +SoccerPenalty/v_SoccerPenalty_g03_c03 84 +PizzaTossing/v_PizzaTossing_g04_c06 57 +WallPushups/v_WallPushups_g05_c02 98 +PlayingSitar/v_PlayingSitar_g06_c03 64 +MoppingFloor/v_MoppingFloor_g05_c03 54 +Nunchucks/v_Nunchucks_g07_c04 55 +Diving/v_Diving_g03_c04 25 +SalsaSpin/v_SalsaSpin_g07_c05 76 +HulaHoop/v_HulaHoop_g05_c04 42 +ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c05 0 +Drumming/v_Drumming_g07_c02 26 +BabyCrawling/v_BabyCrawling_g01_c03 3 +BalanceBeam/v_BalanceBeam_g01_c01 4 +PlayingSitar/v_PlayingSitar_g07_c06 64 +PommelHorse/v_PommelHorse_g01_c04 68 +BreastStroke/v_BreastStroke_g02_c04 18 +BreastStroke/v_BreastStroke_g03_c03 18 +HighJump/v_HighJump_g03_c02 39 +LongJump/v_LongJump_g01_c06 50 +Skijet/v_Skijet_g02_c03 81 +BoxingSpeedBag/v_BoxingSpeedBag_g02_c03 17 +RopeClimbing/v_RopeClimbing_g04_c03 74 +Bowling/v_Bowling_g03_c03 15 +CricketShot/v_CricketShot_g04_c04 23 +HeadMassage/v_HeadMassage_g07_c01 38 +PlayingTabla/v_PlayingTabla_g04_c03 65 +CuttingInKitchen/v_CuttingInKitchen_g03_c03 24 +BandMarching/v_BandMarching_g02_c05 5 +Hammering/v_Hammering_g06_c03 34 +BandMarching/v_BandMarching_g03_c07 5 +JumpRope/v_JumpRope_g02_c04 47 +MilitaryParade/v_MilitaryParade_g07_c02 52 +CleanAndJerk/v_CleanAndJerk_g02_c01 20 +Hammering/v_Hammering_g05_c01 34 +CliffDiving/v_CliffDiving_g07_c01 21 +Billiards/v_Billiards_g05_c06 11 +PlayingGuitar/v_PlayingGuitar_g01_c05 62 +BandMarching/v_BandMarching_g07_c01 5 +Punch/v_Punch_g01_c04 70 +PlayingDhol/v_PlayingDhol_g04_c03 60 +LongJump/v_LongJump_g02_c02 50 +Mixing/v_Mixing_g01_c04 53 +JavelinThrow/v_JavelinThrow_g04_c04 44 +BenchPress/v_BenchPress_g01_c01 9 +Skiing/v_Skiing_g04_c03 80 +WalkingWithDog/v_WalkingWithDog_g05_c04 97 +Kayaking/v_Kayaking_g07_c04 48 +Punch/v_Punch_g03_c04 70 +PlayingGuitar/v_PlayingGuitar_g03_c04 62 +HandstandPushups/v_HandStandPushups_g03_c01 36 +BaseballPitch/v_BaseballPitch_g04_c01 6 +BenchPress/v_BenchPress_g02_c01 9 +BabyCrawling/v_BabyCrawling_g06_c04 3 +MilitaryParade/v_MilitaryParade_g02_c03 52 +CliffDiving/v_CliffDiving_g06_c01 21 +CricketBowling/v_CricketBowling_g02_c04 22 +BalanceBeam/v_BalanceBeam_g05_c01 4 +PommelHorse/v_PommelHorse_g07_c07 68 +BasketballDunk/v_BasketballDunk_g07_c04 8 +Archery/v_Archery_g05_c02 2 +JugglingBalls/v_JugglingBalls_g07_c02 45 +Fencing/v_Fencing_g07_c02 27 +HammerThrow/v_HammerThrow_g05_c02 35 +PoleVault/v_PoleVault_g02_c07 67 +BandMarching/v_BandMarching_g04_c02 5 +Kayaking/v_Kayaking_g06_c03 48 +BaseballPitch/v_BaseballPitch_g04_c05 6 +Swing/v_Swing_g06_c05 88 +TennisSwing/v_TennisSwing_g07_c07 91 +IceDancing/v_IceDancing_g06_c03 43 +Biking/v_Biking_g07_c01 10 +PizzaTossing/v_PizzaTossing_g04_c02 57 +HeadMassage/v_HeadMassage_g05_c03 38 +GolfSwing/v_GolfSwing_g05_c05 32 +GolfSwing/v_GolfSwing_g05_c03 32 +PullUps/v_PullUps_g07_c04 69 +UnevenBars/v_UnevenBars_g07_c03 95 +HandstandPushups/v_HandStandPushups_g07_c02 36 +PushUps/v_PushUps_g05_c03 71 +ThrowDiscus/v_ThrowDiscus_g06_c01 92 +Knitting/v_Knitting_g05_c03 49 +SoccerPenalty/v_SoccerPenalty_g04_c02 84 +Kayaking/v_Kayaking_g04_c02 48 +Rowing/v_Rowing_g03_c06 75 +ShavingBeard/v_ShavingBeard_g05_c02 77 +BlowingCandles/v_BlowingCandles_g03_c02 13 +Rafting/v_Rafting_g07_c04 72 +HeadMassage/v_HeadMassage_g06_c04 38 +JavelinThrow/v_JavelinThrow_g01_c01 44 +SalsaSpin/v_SalsaSpin_g01_c01 76 +PlayingCello/v_PlayingCello_g01_c04 58 +PlayingCello/v_PlayingCello_g04_c06 58 +PullUps/v_PullUps_g06_c02 69 +HighJump/v_HighJump_g04_c01 39 +FloorGymnastics/v_FloorGymnastics_g06_c03 29 +SoccerPenalty/v_SoccerPenalty_g02_c02 84 +Swing/v_Swing_g03_c03 88 +PoleVault/v_PoleVault_g01_c05 67 +BlowDryHair/v_BlowDryHair_g07_c04 12 +JumpRope/v_JumpRope_g06_c01 47 +HighJump/v_HighJump_g04_c04 39 +BabyCrawling/v_BabyCrawling_g02_c04 3 +Skiing/v_Skiing_g05_c01 80 +PlayingFlute/v_PlayingFlute_g02_c05 61 +FieldHockeyPenalty/v_FieldHockeyPenalty_g05_c07 28 +Skiing/v_Skiing_g04_c07 80 +MilitaryParade/v_MilitaryParade_g07_c06 52 +PlayingPiano/v_PlayingPiano_g07_c04 63 +HulaHoop/v_HulaHoop_g01_c06 42 +Typing/v_Typing_g06_c01 94 +HammerThrow/v_HammerThrow_g02_c07 35 +JavelinThrow/v_JavelinThrow_g04_c02 44 +SumoWrestling/v_SumoWrestling_g07_c03 86 +PushUps/v_PushUps_g01_c03 71 +BreastStroke/v_BreastStroke_g01_c04 18 +PoleVault/v_PoleVault_g03_c01 67 +Mixing/v_Mixing_g03_c04 53 +Surfing/v_Surfing_g02_c01 87 +HorseRiding/v_HorseRiding_g04_c04 41 +TrampolineJumping/v_TrampolineJumping_g06_c03 93 +ShavingBeard/v_ShavingBeard_g02_c06 77 +FieldHockeyPenalty/v_FieldHockeyPenalty_g05_c01 28 +Skijet/v_Skijet_g05_c01 81 +Archery/v_Archery_g07_c03 2 +Skiing/v_Skiing_g03_c04 80 +BandMarching/v_BandMarching_g01_c03 5 +BaseballPitch/v_BaseballPitch_g02_c02 6 +ThrowDiscus/v_ThrowDiscus_g02_c04 92 +FrontCrawl/v_FrontCrawl_g03_c01 31 +CricketShot/v_CricketShot_g04_c06 23 +Diving/v_Diving_g04_c03 25 +BrushingTeeth/v_BrushingTeeth_g04_c03 19 +Bowling/v_Bowling_g01_c01 15 +ThrowDiscus/v_ThrowDiscus_g05_c05 92 +BlowingCandles/v_BlowingCandles_g07_c04 13 +SoccerJuggling/v_SoccerJuggling_g02_c05 83 +TennisSwing/v_TennisSwing_g02_c05 91 +BenchPress/v_BenchPress_g04_c02 9 +Skijet/v_Skijet_g03_c02 81 +PlayingSitar/v_PlayingSitar_g02_c03 64 +Rowing/v_Rowing_g02_c03 75 +ApplyEyeMakeup/v_ApplyEyeMakeup_g04_c01 0 +Hammering/v_Hammering_g01_c03 34 +Kayaking/v_Kayaking_g03_c03 48 +BoxingPunchingBag/v_BoxingPunchingBag_g04_c02 16 +HandstandPushups/v_HandStandPushups_g01_c01 36 +FrontCrawl/v_FrontCrawl_g07_c04 31 +ApplyEyeMakeup/v_ApplyEyeMakeup_g02_c03 0 +PlayingTabla/v_PlayingTabla_g03_c03 65 +Archery/v_Archery_g03_c04 2 +Archery/v_Archery_g01_c01 2 +FrontCrawl/v_FrontCrawl_g07_c03 31 +PlayingGuitar/v_PlayingGuitar_g03_c03 62 +HulaHoop/v_HulaHoop_g03_c02 42 +IceDancing/v_IceDancing_g04_c05 43 +FieldHockeyPenalty/v_FieldHockeyPenalty_g01_c02 28 +Lunges/v_Lunges_g05_c04 51 +JumpingJack/v_JumpingJack_g05_c06 46 +TennisSwing/v_TennisSwing_g05_c05 91 +PlayingCello/v_PlayingCello_g04_c03 58 +Drumming/v_Drumming_g03_c01 26 +JumpingJack/v_JumpingJack_g07_c04 46 +Skijet/v_Skijet_g04_c03 81 +JumpRope/v_JumpRope_g04_c06 47 +Nunchucks/v_Nunchucks_g04_c04 55 +HandstandWalking/v_HandstandWalking_g05_c07 37 +PlayingDhol/v_PlayingDhol_g01_c05 60 +PlayingSitar/v_PlayingSitar_g05_c03 64 +RockClimbingIndoor/v_RockClimbingIndoor_g03_c02 73 +Surfing/v_Surfing_g04_c01 87 +FrisbeeCatch/v_FrisbeeCatch_g02_c04 30 +BlowingCandles/v_BlowingCandles_g06_c05 13 +BandMarching/v_BandMarching_g01_c02 5 +JumpRope/v_JumpRope_g04_c05 47 +BoxingPunchingBag/v_BoxingPunchingBag_g06_c02 16 +PlayingSitar/v_PlayingSitar_g05_c07 64 +SumoWrestling/v_SumoWrestling_g04_c02 86 +WritingOnBoard/v_WritingOnBoard_g06_c07 99 +BalanceBeam/v_BalanceBeam_g03_c04 4 +Kayaking/v_Kayaking_g01_c03 48 +Archery/v_Archery_g03_c05 2 +JumpRope/v_JumpRope_g07_c04 47 +Bowling/v_Bowling_g06_c05 15 +Typing/v_Typing_g03_c03 94 +RockClimbingIndoor/v_RockClimbingIndoor_g04_c01 73 +Lunges/v_Lunges_g06_c01 51 +TennisSwing/v_TennisSwing_g03_c03 91 +BlowDryHair/v_BlowDryHair_g04_c05 12 +Archery/v_Archery_g05_c01 2 +BoxingPunchingBag/v_BoxingPunchingBag_g06_c04 16 +PlayingDhol/v_PlayingDhol_g02_c02 60 +FrontCrawl/v_FrontCrawl_g03_c06 31 +PoleVault/v_PoleVault_g07_c02 67 +BrushingTeeth/v_BrushingTeeth_g04_c04 19 +WalkingWithDog/v_WalkingWithDog_g05_c05 97 +RockClimbingIndoor/v_RockClimbingIndoor_g03_c07 73 +PushUps/v_PushUps_g04_c03 71 +PushUps/v_PushUps_g05_c01 71 +PlayingCello/v_PlayingCello_g06_c03 58 +RockClimbingIndoor/v_RockClimbingIndoor_g05_c02 73 +BandMarching/v_BandMarching_g01_c06 5 +Typing/v_Typing_g06_c05 94 +Billiards/v_Billiards_g04_c06 11 +BaseballPitch/v_BaseballPitch_g06_c05 6 +BoxingSpeedBag/v_BoxingSpeedBag_g07_c03 17 +Haircut/v_Haircut_g02_c04 33 +RockClimbingIndoor/v_RockClimbingIndoor_g03_c04 73 +PlayingDhol/v_PlayingDhol_g03_c01 60 +VolleyballSpiking/v_VolleyballSpiking_g07_c06 96 +Bowling/v_Bowling_g02_c01 15 +Haircut/v_Haircut_g01_c04 33 +MoppingFloor/v_MoppingFloor_g05_c04 54 +Fencing/v_Fencing_g02_c04 27 +SoccerJuggling/v_SoccerJuggling_g05_c04 83 +BrushingTeeth/v_BrushingTeeth_g07_c05 19 +SkyDiving/v_SkyDiving_g06_c01 82 +Hammering/v_Hammering_g07_c05 34 +BalanceBeam/v_BalanceBeam_g02_c01 4 +JumpRope/v_JumpRope_g07_c06 47 +HighJump/v_HighJump_g01_c03 39 +Shotput/v_Shotput_g01_c02 78 +Drumming/v_Drumming_g02_c07 26 +HorseRace/v_HorseRace_g05_c04 40 +BrushingTeeth/v_BrushingTeeth_g05_c01 19 +Lunges/v_Lunges_g07_c03 51 +BodyWeightSquats/v_BodyWeightSquats_g05_c03 14 +WallPushups/v_WallPushups_g04_c03 98 +BoxingPunchingBag/v_BoxingPunchingBag_g05_c01 16 +Bowling/v_Bowling_g01_c03 15 +IceDancing/v_IceDancing_g01_c02 43 +Diving/v_Diving_g01_c02 25 +CuttingInKitchen/v_CuttingInKitchen_g07_c03 24 +SoccerJuggling/v_SoccerJuggling_g03_c03 83 +SkyDiving/v_SkyDiving_g01_c04 82 +CleanAndJerk/v_CleanAndJerk_g02_c04 20 +HeadMassage/v_HeadMassage_g02_c03 38 +HandstandWalking/v_HandstandWalking_g05_c02 37 +VolleyballSpiking/v_VolleyballSpiking_g05_c01 96 +Knitting/v_Knitting_g07_c01 49 +FrisbeeCatch/v_FrisbeeCatch_g01_c02 30 +Lunges/v_Lunges_g01_c05 51 +BoxingSpeedBag/v_BoxingSpeedBag_g05_c03 17 +JugglingBalls/v_JugglingBalls_g06_c06 45 +PlayingDaf/v_PlayingDaf_g02_c03 59 +JugglingBalls/v_JugglingBalls_g04_c03 45 +YoYo/v_YoYo_g06_c04 100 +CuttingInKitchen/v_CuttingInKitchen_g06_c04 24 +ThrowDiscus/v_ThrowDiscus_g07_c04 92 +MoppingFloor/v_MoppingFloor_g02_c04 54 +PlayingGuitar/v_PlayingGuitar_g01_c02 62 +PoleVault/v_PoleVault_g05_c03 67 +PoleVault/v_PoleVault_g04_c03 67 +Bowling/v_Bowling_g05_c03 15 +VolleyballSpiking/v_VolleyballSpiking_g01_c03 96 +Haircut/v_Haircut_g04_c01 33 +ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c04 0 +ShavingBeard/v_ShavingBeard_g06_c02 77 +SkateBoarding/v_SkateBoarding_g05_c03 79 +BrushingTeeth/v_BrushingTeeth_g05_c03 19 +ApplyLipstick/v_ApplyLipstick_g02_c04 1 +Drumming/v_Drumming_g01_c07 26 +Rowing/v_Rowing_g01_c01 75 +SumoWrestling/v_SumoWrestling_g05_c01 86 +TableTennisShot/v_TableTennisShot_g06_c01 89 +CleanAndJerk/v_CleanAndJerk_g03_c01 20 +Kayaking/v_Kayaking_g04_c01 48 +RockClimbingIndoor/v_RockClimbingIndoor_g02_c01 73 +Punch/v_Punch_g02_c03 70 +ParallelBars/v_ParallelBars_g05_c03 56 +Skiing/v_Skiing_g06_c04 80 +HorseRiding/v_HorseRiding_g06_c06 41 +PizzaTossing/v_PizzaTossing_g04_c07 57 +Billiards/v_Billiards_g06_c05 11 +HorseRiding/v_HorseRiding_g07_c06 41 +Bowling/v_Bowling_g03_c06 15 +PlayingFlute/v_PlayingFlute_g05_c05 61 +Skijet/v_Skijet_g07_c03 81 +Hammering/v_Hammering_g07_c02 34 +CleanAndJerk/v_CleanAndJerk_g03_c05 20 +StillRings/v_StillRings_g07_c01 85 +WritingOnBoard/v_WritingOnBoard_g03_c05 99 +HeadMassage/v_HeadMassage_g01_c03 38 +IceDancing/v_IceDancing_g01_c04 43 +StillRings/v_StillRings_g06_c02 85 +PlayingFlute/v_PlayingFlute_g02_c06 61 +SalsaSpin/v_SalsaSpin_g05_c02 76 +HighJump/v_HighJump_g07_c04 39 +ThrowDiscus/v_ThrowDiscus_g06_c02 92 +BlowingCandles/v_BlowingCandles_g02_c02 13 +Diving/v_Diving_g05_c05 25 +MoppingFloor/v_MoppingFloor_g04_c01 54 +Biking/v_Biking_g04_c04 10 +RopeClimbing/v_RopeClimbing_g07_c02 74 +FrontCrawl/v_FrontCrawl_g04_c01 31 +CliffDiving/v_CliffDiving_g01_c04 21 +Billiards/v_Billiards_g04_c02 11 +RockClimbingIndoor/v_RockClimbingIndoor_g05_c04 73 +SkateBoarding/v_SkateBoarding_g03_c02 79 +HighJump/v_HighJump_g07_c02 39 +BaseballPitch/v_BaseballPitch_g04_c04 6 +HammerThrow/v_HammerThrow_g03_c01 35 +Mixing/v_Mixing_g01_c03 53 +PlayingCello/v_PlayingCello_g04_c05 58 +Drumming/v_Drumming_g05_c05 26 +PlayingCello/v_PlayingCello_g03_c01 58 +PlayingDaf/v_PlayingDaf_g05_c01 59 +BoxingSpeedBag/v_BoxingSpeedBag_g05_c02 17 +Lunges/v_Lunges_g01_c03 51 +Rowing/v_Rowing_g06_c04 75 +JavelinThrow/v_JavelinThrow_g07_c03 44 +Haircut/v_Haircut_g05_c02 33 +Drumming/v_Drumming_g07_c05 26 +SoccerJuggling/v_SoccerJuggling_g05_c03 83 +BrushingTeeth/v_BrushingTeeth_g06_c04 19 +HeadMassage/v_HeadMassage_g03_c01 38 +PlayingFlute/v_PlayingFlute_g03_c04 61 +Mixing/v_Mixing_g01_c02 53 +PlayingFlute/v_PlayingFlute_g07_c01 61 +HandstandPushups/v_HandStandPushups_g05_c02 36 +HorseRiding/v_HorseRiding_g04_c06 41 +VolleyballSpiking/v_VolleyballSpiking_g03_c01 96 +PlayingDaf/v_PlayingDaf_g06_c05 59 +Swing/v_Swing_g01_c02 88 +Rafting/v_Rafting_g03_c01 72 +ThrowDiscus/v_ThrowDiscus_g01_c04 92 +UnevenBars/v_UnevenBars_g03_c04 95 +PlayingCello/v_PlayingCello_g06_c05 58 +TennisSwing/v_TennisSwing_g06_c02 91 +HighJump/v_HighJump_g02_c03 39 +PlayingTabla/v_PlayingTabla_g03_c02 65 +TrampolineJumping/v_TrampolineJumping_g06_c01 93 +ApplyEyeMakeup/v_ApplyEyeMakeup_g04_c03 0 +HighJump/v_HighJump_g07_c06 39 +WalkingWithDog/v_WalkingWithDog_g04_c03 97 +PlayingSitar/v_PlayingSitar_g04_c05 64 +HandstandPushups/v_HandStandPushups_g03_c04 36 +CricketShot/v_CricketShot_g06_c07 23 +Swing/v_Swing_g05_c06 88 +TaiChi/v_TaiChi_g03_c03 90 +Basketball/v_Basketball_g01_c05 7 +ShavingBeard/v_ShavingBeard_g06_c06 77 +BoxingPunchingBag/v_BoxingPunchingBag_g02_c03 16 +Basketball/v_Basketball_g02_c04 7 +ParallelBars/v_ParallelBars_g05_c02 56 +JavelinThrow/v_JavelinThrow_g02_c01 44 +WalkingWithDog/v_WalkingWithDog_g05_c01 97 +PlayingDhol/v_PlayingDhol_g03_c05 60 +PlayingPiano/v_PlayingPiano_g06_c04 63 +Shotput/v_Shotput_g04_c03 78 +CleanAndJerk/v_CleanAndJerk_g07_c04 20 +ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c01 0 +MoppingFloor/v_MoppingFloor_g03_c01 54 +FrontCrawl/v_FrontCrawl_g01_c04 31 +WallPushups/v_WallPushups_g05_c03 98 +Archery/v_Archery_g03_c03 2 +HorseRiding/v_HorseRiding_g06_c03 41 +VolleyballSpiking/v_VolleyballSpiking_g02_c04 96 +WritingOnBoard/v_WritingOnBoard_g02_c07 99 +Mixing/v_Mixing_g04_c01 53 +PoleVault/v_PoleVault_g03_c06 67 +BoxingSpeedBag/v_BoxingSpeedBag_g04_c05 17 +Drumming/v_Drumming_g06_c02 26 +JavelinThrow/v_JavelinThrow_g03_c03 44 +BasketballDunk/v_BasketballDunk_g01_c03 8 +FieldHockeyPenalty/v_FieldHockeyPenalty_g02_c02 28 +TaiChi/v_TaiChi_g07_c01 90 +BrushingTeeth/v_BrushingTeeth_g03_c02 19 +Punch/v_Punch_g06_c06 70 +HulaHoop/v_HulaHoop_g07_c01 42 +CricketShot/v_CricketShot_g01_c07 23 +ShavingBeard/v_ShavingBeard_g01_c01 77 +Shotput/v_Shotput_g07_c07 78 +GolfSwing/v_GolfSwing_g03_c06 32 +HorseRace/v_HorseRace_g02_c02 40 +BalanceBeam/v_BalanceBeam_g05_c03 4 +CliffDiving/v_CliffDiving_g01_c01 21 +Basketball/v_Basketball_g06_c02 7 +HighJump/v_HighJump_g02_c01 39 +CuttingInKitchen/v_CuttingInKitchen_g06_c03 24 +Billiards/v_Billiards_g06_c03 11 +Surfing/v_Surfing_g01_c02 87 +TennisSwing/v_TennisSwing_g03_c07 91 +FieldHockeyPenalty/v_FieldHockeyPenalty_g07_c01 28 +CricketBowling/v_CricketBowling_g04_c04 22 +Shotput/v_Shotput_g03_c03 78 +CricketShot/v_CricketShot_g03_c06 23 +PommelHorse/v_PommelHorse_g02_c03 68 +HighJump/v_HighJump_g04_c05 39 +RopeClimbing/v_RopeClimbing_g01_c04 74 +PizzaTossing/v_PizzaTossing_g06_c04 57 +JumpRope/v_JumpRope_g01_c02 47 +YoYo/v_YoYo_g04_c04 100 +PlayingCello/v_PlayingCello_g07_c02 58 +HighJump/v_HighJump_g04_c03 39 +Shotput/v_Shotput_g01_c06 78 +WalkingWithDog/v_WalkingWithDog_g02_c01 97 +PlayingDaf/v_PlayingDaf_g02_c04 59 +FrisbeeCatch/v_FrisbeeCatch_g05_c03 30 +PommelHorse/v_PommelHorse_g04_c02 68 +CricketShot/v_CricketShot_g05_c03 23 +Basketball/v_Basketball_g04_c01 7 +PullUps/v_PullUps_g04_c01 69 +WritingOnBoard/v_WritingOnBoard_g01_c01 99 +PlayingFlute/v_PlayingFlute_g01_c07 61 +PoleVault/v_PoleVault_g02_c02 67 +CricketShot/v_CricketShot_g03_c02 23 +Diving/v_Diving_g04_c04 25 +SumoWrestling/v_SumoWrestling_g06_c05 86 +SumoWrestling/v_SumoWrestling_g05_c04 86 +HorseRace/v_HorseRace_g03_c05 40 +ApplyLipstick/v_ApplyLipstick_g03_c04 1 +Billiards/v_Billiards_g02_c03 11 +SoccerPenalty/v_SoccerPenalty_g04_c03 84 +TennisSwing/v_TennisSwing_g07_c04 91 +TrampolineJumping/v_TrampolineJumping_g06_c04 93 +BasketballDunk/v_BasketballDunk_g05_c05 8 +Lunges/v_Lunges_g05_c01 51 +Billiards/v_Billiards_g03_c04 11 +CricketBowling/v_CricketBowling_g01_c02 22 +VolleyballSpiking/v_VolleyballSpiking_g06_c04 96 +Basketball/v_Basketball_g01_c06 7 +PlayingDhol/v_PlayingDhol_g02_c05 60 +TrampolineJumping/v_TrampolineJumping_g06_c02 93 +TrampolineJumping/v_TrampolineJumping_g03_c02 93 +PlayingPiano/v_PlayingPiano_g04_c01 63 +Rafting/v_Rafting_g01_c03 72 +Archery/v_Archery_g06_c02 2 +ShavingBeard/v_ShavingBeard_g07_c04 77 +BasketballDunk/v_BasketballDunk_g06_c01 8 +BlowDryHair/v_BlowDryHair_g07_c06 12 +Surfing/v_Surfing_g02_c02 87 +BasketballDunk/v_BasketballDunk_g01_c01 8 +IceDancing/v_IceDancing_g05_c04 43 +PlayingDhol/v_PlayingDhol_g01_c06 60 +Fencing/v_Fencing_g01_c04 27 +PizzaTossing/v_PizzaTossing_g06_c01 57 +UnevenBars/v_UnevenBars_g06_c03 95 +FrontCrawl/v_FrontCrawl_g05_c01 31 +TrampolineJumping/v_TrampolineJumping_g05_c03 93 +SkyDiving/v_SkyDiving_g02_c03 82 +PushUps/v_PushUps_g06_c03 71 +Hammering/v_Hammering_g06_c01 34 +ApplyEyeMakeup/v_ApplyEyeMakeup_g06_c07 0 +MilitaryParade/v_MilitaryParade_g01_c03 52 +HorseRiding/v_HorseRiding_g03_c03 41 +PlayingGuitar/v_PlayingGuitar_g03_c06 62 +PizzaTossing/v_PizzaTossing_g03_c02 57 +SoccerJuggling/v_SoccerJuggling_g06_c03 83 +BlowingCandles/v_BlowingCandles_g05_c02 13 +ShavingBeard/v_ShavingBeard_g04_c02 77 +MoppingFloor/v_MoppingFloor_g02_c06 54 +PizzaTossing/v_PizzaTossing_g04_c04 57 +StillRings/v_StillRings_g07_c03 85 +SoccerPenalty/v_SoccerPenalty_g06_c01 84 +Haircut/v_Haircut_g06_c03 33 +BalanceBeam/v_BalanceBeam_g01_c04 4 +SkyDiving/v_SkyDiving_g05_c01 82 +Archery/v_Archery_g04_c01 2 +VolleyballSpiking/v_VolleyballSpiking_g04_c06 96 +RopeClimbing/v_RopeClimbing_g02_c06 74 +Bowling/v_Bowling_g07_c04 15 +MoppingFloor/v_MoppingFloor_g03_c03 54 +TennisSwing/v_TennisSwing_g01_c01 91 +Typing/v_Typing_g05_c06 94 +BasketballDunk/v_BasketballDunk_g03_c01 8 +FloorGymnastics/v_FloorGymnastics_g05_c01 29 +SumoWrestling/v_SumoWrestling_g07_c04 86 +Rowing/v_Rowing_g07_c04 75 +VolleyballSpiking/v_VolleyballSpiking_g07_c02 96 +MilitaryParade/v_MilitaryParade_g06_c02 52 +FieldHockeyPenalty/v_FieldHockeyPenalty_g06_c01 28 +ApplyEyeMakeup/v_ApplyEyeMakeup_g05_c06 0 +Fencing/v_Fencing_g04_c03 27 +PullUps/v_PullUps_g07_c01 69 +UnevenBars/v_UnevenBars_g05_c02 95 +BoxingSpeedBag/v_BoxingSpeedBag_g07_c01 17 +TennisSwing/v_TennisSwing_g07_c05 91 +ApplyLipstick/v_ApplyLipstick_g07_c01 1 +ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c03 0 +JugglingBalls/v_JugglingBalls_g02_c02 45 +BrushingTeeth/v_BrushingTeeth_g02_c06 19 +HighJump/v_HighJump_g01_c02 39 +ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c06 0 +Hammering/v_Hammering_g04_c02 34 +CleanAndJerk/v_CleanAndJerk_g05_c04 20 +WallPushups/v_WallPushups_g06_c03 98 +SkateBoarding/v_SkateBoarding_g05_c04 79 +BrushingTeeth/v_BrushingTeeth_g04_c02 19 +Archery/v_Archery_g06_c04 2 +SumoWrestling/v_SumoWrestling_g04_c01 86 +WritingOnBoard/v_WritingOnBoard_g06_c06 99 +BoxingSpeedBag/v_BoxingSpeedBag_g07_c05 17 +Drumming/v_Drumming_g04_c06 26 +HandstandPushups/v_HandStandPushups_g03_c03 36 +Skiing/v_Skiing_g01_c05 80 +Shotput/v_Shotput_g02_c03 78 +BasketballDunk/v_BasketballDunk_g07_c03 8 +BreastStroke/v_BreastStroke_g04_c03 18 +Rafting/v_Rafting_g04_c01 72 +PlayingTabla/v_PlayingTabla_g06_c03 65 +MilitaryParade/v_MilitaryParade_g05_c01 52 +PizzaTossing/v_PizzaTossing_g05_c04 57 +Surfing/v_Surfing_g05_c01 87 +PushUps/v_PushUps_g04_c04 71 +Skiing/v_Skiing_g07_c02 80 +Skijet/v_Skijet_g06_c01 81 +PlayingCello/v_PlayingCello_g06_c04 58 +HeadMassage/v_HeadMassage_g07_c02 38 +ApplyEyeMakeup/v_ApplyEyeMakeup_g02_c04 0 +Skiing/v_Skiing_g02_c05 80 +CliffDiving/v_CliffDiving_g06_c06 21 +BalanceBeam/v_BalanceBeam_g05_c02 4 +Archery/v_Archery_g01_c04 2 +BalanceBeam/v_BalanceBeam_g06_c05 4 +PlayingDaf/v_PlayingDaf_g07_c02 59 +HighJump/v_HighJump_g01_c05 39 +SumoWrestling/v_SumoWrestling_g06_c04 86 +Haircut/v_Haircut_g03_c01 33 +Shotput/v_Shotput_g06_c05 78 +Archery/v_Archery_g02_c01 2 +Lunges/v_Lunges_g02_c04 51 +WritingOnBoard/v_WritingOnBoard_g01_c03 99 +HammerThrow/v_HammerThrow_g05_c01 35 +Diving/v_Diving_g05_c03 25 +BasketballDunk/v_BasketballDunk_g03_c05 8 +Drumming/v_Drumming_g06_c05 26 +Swing/v_Swing_g04_c04 88 +BabyCrawling/v_BabyCrawling_g06_c03 3 +IceDancing/v_IceDancing_g04_c06 43 +PommelHorse/v_PommelHorse_g02_c01 68 +JumpRope/v_JumpRope_g05_c04 47 +BodyWeightSquats/v_BodyWeightSquats_g04_c01 14 +TableTennisShot/v_TableTennisShot_g01_c05 89 +PlayingDaf/v_PlayingDaf_g04_c04 59 +SkyDiving/v_SkyDiving_g02_c04 82 +Diving/v_Diving_g03_c03 25 +BabyCrawling/v_BabyCrawling_g01_c04 3 +PlayingViolin/v_PlayingViolin_g03_c04 66 +Haircut/v_Haircut_g03_c04 33 +RopeClimbing/v_RopeClimbing_g05_c02 74 +BenchPress/v_BenchPress_g05_c03 9 +PushUps/v_PushUps_g07_c01 71 +CliffDiving/v_CliffDiving_g05_c04 21 +ShavingBeard/v_ShavingBeard_g02_c03 77 +Knitting/v_Knitting_g02_c05 49 +FloorGymnastics/v_FloorGymnastics_g07_c02 29 +HammerThrow/v_HammerThrow_g02_c04 35 +CuttingInKitchen/v_CuttingInKitchen_g07_c02 24 +FrisbeeCatch/v_FrisbeeCatch_g05_c02 30 +WritingOnBoard/v_WritingOnBoard_g03_c03 99 +BabyCrawling/v_BabyCrawling_g05_c04 3 +BenchPress/v_BenchPress_g03_c06 9 +PlayingFlute/v_PlayingFlute_g02_c03 61 +Drumming/v_Drumming_g06_c01 26 +Bowling/v_Bowling_g05_c01 15 +YoYo/v_YoYo_g02_c01 100 +TennisSwing/v_TennisSwing_g06_c03 91 +HorseRiding/v_HorseRiding_g04_c05 41 +PommelHorse/v_PommelHorse_g07_c05 68 +PlayingFlute/v_PlayingFlute_g06_c02 61 +VolleyballSpiking/v_VolleyballSpiking_g04_c02 96 +BandMarching/v_BandMarching_g05_c03 5 +BenchPress/v_BenchPress_g01_c05 9 +TennisSwing/v_TennisSwing_g06_c05 91 +IceDancing/v_IceDancing_g02_c05 43 +Punch/v_Punch_g04_c05 70 +PushUps/v_PushUps_g02_c02 71 +JumpingJack/v_JumpingJack_g05_c01 46 +PullUps/v_PullUps_g01_c02 69 +GolfSwing/v_GolfSwing_g01_c02 32 +Surfing/v_Surfing_g01_c04 87 +PlayingFlute/v_PlayingFlute_g06_c03 61 +FieldHockeyPenalty/v_FieldHockeyPenalty_g05_c03 28 +PlayingCello/v_PlayingCello_g01_c01 58 +HorseRace/v_HorseRace_g02_c01 40 +BalanceBeam/v_BalanceBeam_g04_c01 4 +BasketballDunk/v_BasketballDunk_g01_c04 8 +HandstandWalking/v_HandstandWalking_g05_c05 37 +PlayingDhol/v_PlayingDhol_g07_c05 60 +BasketballDunk/v_BasketballDunk_g05_c02 8 +CricketBowling/v_CricketBowling_g06_c05 22 +Basketball/v_Basketball_g07_c03 7 +PullUps/v_PullUps_g03_c01 69 +Billiards/v_Billiards_g01_c03 11 +Knitting/v_Knitting_g02_c04 49 +IceDancing/v_IceDancing_g07_c01 43 +PushUps/v_PushUps_g02_c04 71 +Diving/v_Diving_g06_c05 25 +TaiChi/v_TaiChi_g05_c01 90 +MilitaryParade/v_MilitaryParade_g05_c02 52 +Rafting/v_Rafting_g04_c04 72 +BlowDryHair/v_BlowDryHair_g06_c02 12 +Basketball/v_Basketball_g01_c03 7 +Shotput/v_Shotput_g01_c03 78 +TaiChi/v_TaiChi_g06_c04 90 +Archery/v_Archery_g06_c03 2 +Kayaking/v_Kayaking_g01_c04 48 +ThrowDiscus/v_ThrowDiscus_g05_c04 92 +HulaHoop/v_HulaHoop_g01_c07 42 +Drumming/v_Drumming_g04_c07 26 +CuttingInKitchen/v_CuttingInKitchen_g07_c01 24 +Skiing/v_Skiing_g05_c04 80 +WritingOnBoard/v_WritingOnBoard_g01_c07 99 +Skijet/v_Skijet_g01_c04 81 +TableTennisShot/v_TableTennisShot_g02_c02 89 +Fencing/v_Fencing_g06_c03 27 +JumpingJack/v_JumpingJack_g01_c03 46 +SkyDiving/v_SkyDiving_g07_c02 82 +FieldHockeyPenalty/v_FieldHockeyPenalty_g03_c02 28 +Surfing/v_Surfing_g02_c06 87 +Nunchucks/v_Nunchucks_g07_c01 55 +BalanceBeam/v_BalanceBeam_g05_c04 4 +Nunchucks/v_Nunchucks_g04_c02 55 +BalanceBeam/v_BalanceBeam_g06_c06 4 +Biking/v_Biking_g05_c06 10 +PommelHorse/v_PommelHorse_g06_c02 68 +Surfing/v_Surfing_g03_c01 87 +FloorGymnastics/v_FloorGymnastics_g07_c03 29 +ShavingBeard/v_ShavingBeard_g07_c02 77 +PlayingPiano/v_PlayingPiano_g07_c02 63 +Mixing/v_Mixing_g06_c03 53 +BoxingPunchingBag/v_BoxingPunchingBag_g01_c04 16 +FrisbeeCatch/v_FrisbeeCatch_g05_c04 30 +TennisSwing/v_TennisSwing_g01_c05 91 +PlayingCello/v_PlayingCello_g05_c03 58 +Lunges/v_Lunges_g06_c04 51 +Shotput/v_Shotput_g06_c06 78 +Bowling/v_Bowling_g01_c05 15 +Typing/v_Typing_g04_c02 94 +WallPushups/v_WallPushups_g06_c05 98 +BabyCrawling/v_BabyCrawling_g02_c05 3 +HorseRace/v_HorseRace_g07_c05 40 +Rowing/v_Rowing_g04_c06 75 +FrisbeeCatch/v_FrisbeeCatch_g06_c01 30 +LongJump/v_LongJump_g05_c05 50 +ParallelBars/v_ParallelBars_g06_c06 56 +PushUps/v_PushUps_g03_c01 71 +ThrowDiscus/v_ThrowDiscus_g05_c01 92 +WritingOnBoard/v_WritingOnBoard_g04_c02 99 +PushUps/v_PushUps_g01_c05 71 +CliffDiving/v_CliffDiving_g04_c04 21 +RopeClimbing/v_RopeClimbing_g04_c04 74 +Rowing/v_Rowing_g05_c01 75 +BaseballPitch/v_BaseballPitch_g06_c01 6 +BoxingSpeedBag/v_BoxingSpeedBag_g03_c01 17 +SoccerPenalty/v_SoccerPenalty_g02_c03 84 +SkyDiving/v_SkyDiving_g04_c02 82 +WallPushups/v_WallPushups_g05_c01 98 +JumpRope/v_JumpRope_g03_c01 47 +BoxingPunchingBag/v_BoxingPunchingBag_g03_c03 16 +ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c05 0 +TaiChi/v_TaiChi_g04_c04 90 +ApplyLipstick/v_ApplyLipstick_g01_c05 1 +SalsaSpin/v_SalsaSpin_g03_c03 76 +ShavingBeard/v_ShavingBeard_g05_c07 77 +Swing/v_Swing_g01_c05 88 +Typing/v_Typing_g06_c04 94 +LongJump/v_LongJump_g04_c02 50 +Rowing/v_Rowing_g06_c03 75 +JugglingBalls/v_JugglingBalls_g01_c04 45 +JugglingBalls/v_JugglingBalls_g01_c03 45 +StillRings/v_StillRings_g06_c01 85 +PlayingSitar/v_PlayingSitar_g03_c07 64 +Kayaking/v_Kayaking_g02_c01 48 +Swing/v_Swing_g04_c05 88 +RopeClimbing/v_RopeClimbing_g01_c03 74 +HandstandWalking/v_HandstandWalking_g02_c02 37 +Hammering/v_Hammering_g06_c05 34 +HorseRiding/v_HorseRiding_g05_c05 41 +HandstandPushups/v_HandStandPushups_g01_c03 36 +JugglingBalls/v_JugglingBalls_g03_c06 45 +BoxingPunchingBag/v_BoxingPunchingBag_g04_c07 16 +TableTennisShot/v_TableTennisShot_g03_c02 89 +Haircut/v_Haircut_g03_c03 33 +PlayingViolin/v_PlayingViolin_g05_c04 66 +TaiChi/v_TaiChi_g05_c02 90 +YoYo/v_YoYo_g03_c03 100 +SalsaSpin/v_SalsaSpin_g01_c05 76 +HammerThrow/v_HammerThrow_g06_c01 35 +JumpRope/v_JumpRope_g03_c02 47 +Bowling/v_Bowling_g06_c07 15 +CricketBowling/v_CricketBowling_g02_c02 22 +BaseballPitch/v_BaseballPitch_g07_c06 6 +WalkingWithDog/v_WalkingWithDog_g02_c04 97 +HandstandWalking/v_HandstandWalking_g06_c03 37 +HulaHoop/v_HulaHoop_g01_c05 42 +PlayingDhol/v_PlayingDhol_g04_c04 60 +PommelHorse/v_PommelHorse_g05_c01 68 +Lunges/v_Lunges_g07_c06 51 +Hammering/v_Hammering_g01_c02 34 +BreastStroke/v_BreastStroke_g07_c04 18 +BabyCrawling/v_BabyCrawling_g04_c02 3 +Kayaking/v_Kayaking_g01_c05 48 +PoleVault/v_PoleVault_g06_c04 67 +PlayingViolin/v_PlayingViolin_g06_c03 66 +CricketShot/v_CricketShot_g07_c04 23 +Diving/v_Diving_g02_c04 25 +ThrowDiscus/v_ThrowDiscus_g04_c04 92 +PoleVault/v_PoleVault_g01_c03 67 +JavelinThrow/v_JavelinThrow_g06_c04 44 +RockClimbingIndoor/v_RockClimbingIndoor_g04_c03 73 +WallPushups/v_WallPushups_g02_c01 98 +CuttingInKitchen/v_CuttingInKitchen_g01_c05 24 +Shotput/v_Shotput_g06_c07 78 +Surfing/v_Surfing_g04_c02 87 +TrampolineJumping/v_TrampolineJumping_g01_c03 93 +Haircut/v_Haircut_g07_c03 33 +HandstandPushups/v_HandStandPushups_g01_c02 36 +TaiChi/v_TaiChi_g07_c04 90 +RopeClimbing/v_RopeClimbing_g04_c01 74 +BalanceBeam/v_BalanceBeam_g01_c03 4 +Knitting/v_Knitting_g07_c04 49 +HammerThrow/v_HammerThrow_g01_c04 35 +UnevenBars/v_UnevenBars_g04_c01 95 +HorseRiding/v_HorseRiding_g06_c02 41 +Archery/v_Archery_g07_c06 2 +MoppingFloor/v_MoppingFloor_g04_c03 54 +RockClimbingIndoor/v_RockClimbingIndoor_g05_c03 73 +Rowing/v_Rowing_g04_c01 75 +SumoWrestling/v_SumoWrestling_g03_c02 86 +Shotput/v_Shotput_g01_c05 78 +Basketball/v_Basketball_g05_c01 7 +SoccerPenalty/v_SoccerPenalty_g01_c01 84 +Haircut/v_Haircut_g01_c03 33 +LongJump/v_LongJump_g04_c01 50 +WritingOnBoard/v_WritingOnBoard_g02_c05 99 +TableTennisShot/v_TableTennisShot_g05_c05 89 +HighJump/v_HighJump_g06_c03 39 +PlayingDaf/v_PlayingDaf_g01_c03 59 +MoppingFloor/v_MoppingFloor_g07_c01 54 +BaseballPitch/v_BaseballPitch_g01_c02 6 +HandstandPushups/v_HandStandPushups_g03_c02 36 +Basketball/v_Basketball_g03_c01 7 +BlowDryHair/v_BlowDryHair_g01_c02 12 +PlayingDaf/v_PlayingDaf_g07_c04 59 +HandstandPushups/v_HandStandPushups_g06_c04 36 +Fencing/v_Fencing_g01_c06 27 +BaseballPitch/v_BaseballPitch_g07_c04 6 +UnevenBars/v_UnevenBars_g02_c04 95 +PizzaTossing/v_PizzaTossing_g06_c03 57 +HulaHoop/v_HulaHoop_g07_c05 42 +TableTennisShot/v_TableTennisShot_g04_c07 89 +CleanAndJerk/v_CleanAndJerk_g03_c03 20 +WritingOnBoard/v_WritingOnBoard_g04_c04 99 +PizzaTossing/v_PizzaTossing_g03_c04 57 +ParallelBars/v_ParallelBars_g02_c01 56 +IceDancing/v_IceDancing_g07_c03 43 +Punch/v_Punch_g06_c07 70 +Basketball/v_Basketball_g04_c02 7 +BandMarching/v_BandMarching_g07_c05 5 +FrisbeeCatch/v_FrisbeeCatch_g06_c04 30 +FrisbeeCatch/v_FrisbeeCatch_g02_c01 30 +JavelinThrow/v_JavelinThrow_g07_c02 44 +HandstandWalking/v_HandstandWalking_g06_c04 37 +SoccerJuggling/v_SoccerJuggling_g06_c01 83 +PushUps/v_PushUps_g04_c05 71 +PlayingViolin/v_PlayingViolin_g01_c01 66 +PlayingDhol/v_PlayingDhol_g07_c02 60 +PlayingSitar/v_PlayingSitar_g07_c01 64 +RockClimbingIndoor/v_RockClimbingIndoor_g04_c02 73 +BabyCrawling/v_BabyCrawling_g06_c06 3 +BoxingPunchingBag/v_BoxingPunchingBag_g04_c04 16 +PlayingCello/v_PlayingCello_g07_c04 58 +BoxingSpeedBag/v_BoxingSpeedBag_g06_c05 17 +Nunchucks/v_Nunchucks_g03_c05 55 +HandstandWalking/v_HandstandWalking_g01_c03 37 +Basketball/v_Basketball_g05_c03 7 +TaiChi/v_TaiChi_g04_c03 90 +HorseRiding/v_HorseRiding_g06_c04 41 +VolleyballSpiking/v_VolleyballSpiking_g03_c03 96 +Rafting/v_Rafting_g01_c02 72 +BodyWeightSquats/v_BodyWeightSquats_g02_c04 14 +Mixing/v_Mixing_g02_c01 53 +SkateBoarding/v_SkateBoarding_g06_c01 79 +BodyWeightSquats/v_BodyWeightSquats_g06_c05 14 +PlayingFlute/v_PlayingFlute_g04_c07 61 +YoYo/v_YoYo_g01_c05 100 +PlayingFlute/v_PlayingFlute_g03_c06 61 +CricketBowling/v_CricketBowling_g05_c02 22 +Swing/v_Swing_g07_c06 88 +WalkingWithDog/v_WalkingWithDog_g04_c04 97 +Knitting/v_Knitting_g07_c05 49 +Hammering/v_Hammering_g05_c02 34 +ThrowDiscus/v_ThrowDiscus_g03_c03 92 +CricketBowling/v_CricketBowling_g04_c02 22 +RopeClimbing/v_RopeClimbing_g03_c03 74 +BoxingPunchingBag/v_BoxingPunchingBag_g01_c07 16 +PlayingDaf/v_PlayingDaf_g06_c03 59 +Biking/v_Biking_g05_c04 10 +JavelinThrow/v_JavelinThrow_g01_c04 44 +TrampolineJumping/v_TrampolineJumping_g03_c04 93 +HeadMassage/v_HeadMassage_g02_c02 38 +HorseRiding/v_HorseRiding_g06_c01 41 +CricketShot/v_CricketShot_g06_c04 23 +Surfing/v_Surfing_g07_c04 87 +BalanceBeam/v_BalanceBeam_g03_c02 4 +PlayingDaf/v_PlayingDaf_g04_c02 59 +ApplyEyeMakeup/v_ApplyEyeMakeup_g05_c05 0 +Archery/v_Archery_g04_c03 2 +LongJump/v_LongJump_g01_c01 50 +Bowling/v_Bowling_g06_c04 15 +BlowDryHair/v_BlowDryHair_g02_c01 12 +Knitting/v_Knitting_g07_c03 49 +JugglingBalls/v_JugglingBalls_g06_c04 45 +Shotput/v_Shotput_g02_c04 78 +FieldHockeyPenalty/v_FieldHockeyPenalty_g07_c04 28 +BrushingTeeth/v_BrushingTeeth_g04_c01 19 +FrontCrawl/v_FrontCrawl_g06_c05 31 +WritingOnBoard/v_WritingOnBoard_g03_c06 99 +CleanAndJerk/v_CleanAndJerk_g07_c02 20 +CleanAndJerk/v_CleanAndJerk_g06_c03 20 +BaseballPitch/v_BaseballPitch_g06_c04 6 +Haircut/v_Haircut_g05_c01 33 +PushUps/v_PushUps_g07_c04 71 +TaiChi/v_TaiChi_g04_c01 90 +Hammering/v_Hammering_g02_c03 34 +Drumming/v_Drumming_g07_c06 26 +TennisSwing/v_TennisSwing_g07_c01 91 +FieldHockeyPenalty/v_FieldHockeyPenalty_g07_c02 28 +PlayingViolin/v_PlayingViolin_g06_c01 66 +FrontCrawl/v_FrontCrawl_g04_c04 31 +Knitting/v_Knitting_g03_c01 49 +HorseRace/v_HorseRace_g02_c04 40 +Nunchucks/v_Nunchucks_g02_c05 55 +Biking/v_Biking_g04_c03 10 +Skijet/v_Skijet_g02_c04 81 +PullUps/v_PullUps_g02_c03 69 +IceDancing/v_IceDancing_g03_c01 43 +BodyWeightSquats/v_BodyWeightSquats_g07_c01 14 +Knitting/v_Knitting_g02_c02 49 +TableTennisShot/v_TableTennisShot_g02_c01 89 +JugglingBalls/v_JugglingBalls_g04_c05 45 +ShavingBeard/v_ShavingBeard_g02_c01 77 +HorseRiding/v_HorseRiding_g03_c05 41 +PlayingFlute/v_PlayingFlute_g04_c01 61 +Shotput/v_Shotput_g05_c01 78 +BoxingPunchingBag/v_BoxingPunchingBag_g01_c02 16 +Hammering/v_Hammering_g02_c02 34 +Mixing/v_Mixing_g01_c06 53 +ParallelBars/v_ParallelBars_g04_c03 56 +BabyCrawling/v_BabyCrawling_g07_c04 3 +PommelHorse/v_PommelHorse_g01_c05 68 +BandMarching/v_BandMarching_g03_c02 5 +Bowling/v_Bowling_g03_c05 15 +CricketBowling/v_CricketBowling_g02_c01 22 +PlayingSitar/v_PlayingSitar_g02_c05 64 +ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c03 0 +CricketShot/v_CricketShot_g02_c07 23 +Nunchucks/v_Nunchucks_g06_c01 55 +Skiing/v_Skiing_g06_c06 80 +SkateBoarding/v_SkateBoarding_g02_c05 79 +Punch/v_Punch_g06_c02 70 +JumpingJack/v_JumpingJack_g06_c04 46 +GolfSwing/v_GolfSwing_g02_c01 32 +StillRings/v_StillRings_g01_c05 85 +Billiards/v_Billiards_g05_c01 11 +PlayingDaf/v_PlayingDaf_g03_c04 59 +CricketShot/v_CricketShot_g03_c04 23 +Knitting/v_Knitting_g05_c05 49 +BenchPress/v_BenchPress_g03_c01 9 +HandstandWalking/v_HandstandWalking_g05_c06 37 +PlayingPiano/v_PlayingPiano_g03_c04 63 +ThrowDiscus/v_ThrowDiscus_g01_c01 92 +Kayaking/v_Kayaking_g04_c07 48 +TennisSwing/v_TennisSwing_g01_c04 91 +StillRings/v_StillRings_g06_c04 85 +ApplyLipstick/v_ApplyLipstick_g06_c01 1 +Swing/v_Swing_g04_c03 88 +FrisbeeCatch/v_FrisbeeCatch_g02_c02 30 +PlayingDhol/v_PlayingDhol_g07_c04 60 +PlayingFlute/v_PlayingFlute_g05_c02 61 +SoccerPenalty/v_SoccerPenalty_g03_c05 84 +SkyDiving/v_SkyDiving_g05_c03 82 +TennisSwing/v_TennisSwing_g01_c07 91 +BlowingCandles/v_BlowingCandles_g02_c03 13 +ThrowDiscus/v_ThrowDiscus_g02_c02 92 +SoccerPenalty/v_SoccerPenalty_g05_c03 84 +CuttingInKitchen/v_CuttingInKitchen_g06_c05 24 +Rafting/v_Rafting_g06_c01 72 +ShavingBeard/v_ShavingBeard_g04_c03 77 +HammerThrow/v_HammerThrow_g04_c01 35 +BoxingPunchingBag/v_BoxingPunchingBag_g06_c03 16 +BodyWeightSquats/v_BodyWeightSquats_g03_c03 14 +HandstandPushups/v_HandStandPushups_g04_c01 36 +GolfSwing/v_GolfSwing_g07_c03 32 +Punch/v_Punch_g06_c04 70 +BandMarching/v_BandMarching_g02_c03 5 +HighJump/v_HighJump_g06_c04 39 +ApplyLipstick/v_ApplyLipstick_g06_c03 1 +RopeClimbing/v_RopeClimbing_g03_c04 74 +BrushingTeeth/v_BrushingTeeth_g06_c05 19 +JavelinThrow/v_JavelinThrow_g05_c03 44 +JavelinThrow/v_JavelinThrow_g02_c02 44 +BlowDryHair/v_BlowDryHair_g02_c05 12 +Lunges/v_Lunges_g07_c05 51 +PlayingDaf/v_PlayingDaf_g03_c03 59 +PullUps/v_PullUps_g05_c01 69 +WritingOnBoard/v_WritingOnBoard_g02_c06 99 +SoccerPenalty/v_SoccerPenalty_g07_c03 84 +RockClimbingIndoor/v_RockClimbingIndoor_g06_c03 73 +PlayingDhol/v_PlayingDhol_g04_c07 60 +PizzaTossing/v_PizzaTossing_g07_c02 57 +ThrowDiscus/v_ThrowDiscus_g02_c05 92 +HighJump/v_HighJump_g07_c03 39 +Drumming/v_Drumming_g01_c02 26 +BodyWeightSquats/v_BodyWeightSquats_g01_c01 14 +LongJump/v_LongJump_g03_c05 50 +PlayingGuitar/v_PlayingGuitar_g03_c07 62 +Drumming/v_Drumming_g05_c06 26 +TableTennisShot/v_TableTennisShot_g01_c03 89 +BalanceBeam/v_BalanceBeam_g06_c04 4 +PommelHorse/v_PommelHorse_g01_c07 68 +Mixing/v_Mixing_g03_c07 53 +FieldHockeyPenalty/v_FieldHockeyPenalty_g05_c05 28 +WallPushups/v_WallPushups_g07_c06 98 +IceDancing/v_IceDancing_g07_c07 43 +MilitaryParade/v_MilitaryParade_g01_c06 52 +RopeClimbing/v_RopeClimbing_g02_c02 74 +Billiards/v_Billiards_g01_c04 11 +FrisbeeCatch/v_FrisbeeCatch_g07_c05 30 +HorseRiding/v_HorseRiding_g06_c05 41 +BenchPress/v_BenchPress_g03_c03 9 +HammerThrow/v_HammerThrow_g07_c02 35 +SoccerPenalty/v_SoccerPenalty_g05_c04 84 +PlayingViolin/v_PlayingViolin_g05_c01 66 +FieldHockeyPenalty/v_FieldHockeyPenalty_g05_c02 28 +YoYo/v_YoYo_g07_c03 100 +GolfSwing/v_GolfSwing_g01_c01 32 +Skiing/v_Skiing_g02_c02 80 +Swing/v_Swing_g04_c01 88 +Diving/v_Diving_g03_c07 25 +PlayingSitar/v_PlayingSitar_g02_c02 64 +Archery/v_Archery_g04_c02 2 +HorseRace/v_HorseRace_g07_c02 40 +Biking/v_Biking_g02_c01 10 +SalsaSpin/v_SalsaSpin_g01_c04 76 +Typing/v_Typing_g01_c06 94 +HorseRiding/v_HorseRiding_g04_c02 41 +BandMarching/v_BandMarching_g03_c06 5 +HeadMassage/v_HeadMassage_g06_c05 38 +PoleVault/v_PoleVault_g07_c04 67 +SkateBoarding/v_SkateBoarding_g06_c04 79 +PlayingPiano/v_PlayingPiano_g02_c03 63 +PlayingSitar/v_PlayingSitar_g07_c04 64 +CliffDiving/v_CliffDiving_g06_c03 21 +PlayingFlute/v_PlayingFlute_g05_c07 61 +WritingOnBoard/v_WritingOnBoard_g06_c05 99 +Kayaking/v_Kayaking_g07_c03 48 +BlowingCandles/v_BlowingCandles_g02_c01 13 +BlowingCandles/v_BlowingCandles_g02_c04 13 +PlayingSitar/v_PlayingSitar_g07_c05 64 +PlayingDaf/v_PlayingDaf_g06_c02 59 +WallPushups/v_WallPushups_g07_c01 98 +Bowling/v_Bowling_g05_c05 15 +PlayingTabla/v_PlayingTabla_g03_c05 65 +JavelinThrow/v_JavelinThrow_g05_c04 44 +JumpRope/v_JumpRope_g04_c01 47 +WallPushups/v_WallPushups_g05_c05 98 +PizzaTossing/v_PizzaTossing_g02_c01 57 +SkyDiving/v_SkyDiving_g04_c01 82 +Mixing/v_Mixing_g04_c02 53 +RopeClimbing/v_RopeClimbing_g01_c02 74 +PlayingGuitar/v_PlayingGuitar_g04_c06 62 +BreastStroke/v_BreastStroke_g03_c01 18 +SoccerJuggling/v_SoccerJuggling_g07_c02 83 +PoleVault/v_PoleVault_g05_c05 67 +HighJump/v_HighJump_g05_c05 39 +UnevenBars/v_UnevenBars_g07_c01 95 +HorseRace/v_HorseRace_g04_c01 40 +ThrowDiscus/v_ThrowDiscus_g04_c02 92 +PlayingSitar/v_PlayingSitar_g03_c01 64 +Shotput/v_Shotput_g02_c02 78 +Bowling/v_Bowling_g05_c06 15 +BoxingPunchingBag/v_BoxingPunchingBag_g07_c04 16 +Skiing/v_Skiing_g01_c03 80 +HorseRiding/v_HorseRiding_g01_c06 41 +Rowing/v_Rowing_g07_c05 75 +TrampolineJumping/v_TrampolineJumping_g07_c03 93 +BoxingPunchingBag/v_BoxingPunchingBag_g05_c06 16 +SkyDiving/v_SkyDiving_g05_c05 82 +Billiards/v_Billiards_g06_c01 11 +TableTennisShot/v_TableTennisShot_g02_c04 89 +JugglingBalls/v_JugglingBalls_g05_c04 45 +YoYo/v_YoYo_g05_c04 100 +FrisbeeCatch/v_FrisbeeCatch_g03_c03 30 +Billiards/v_Billiards_g06_c02 11 +ShavingBeard/v_ShavingBeard_g07_c03 77 +SkateBoarding/v_SkateBoarding_g02_c02 79 +MoppingFloor/v_MoppingFloor_g03_c04 54 +YoYo/v_YoYo_g06_c02 100 +TennisSwing/v_TennisSwing_g07_c03 91 +Punch/v_Punch_g03_c01 70 +Skijet/v_Skijet_g06_c04 81 +JugglingBalls/v_JugglingBalls_g05_c03 45 +MoppingFloor/v_MoppingFloor_g06_c04 54 +JumpRope/v_JumpRope_g06_c03 47 +WritingOnBoard/v_WritingOnBoard_g06_c02 99 +Biking/v_Biking_g07_c02 10 +Nunchucks/v_Nunchucks_g07_c02 55 +HorseRace/v_HorseRace_g07_c04 40 +Nunchucks/v_Nunchucks_g04_c05 55 +WallPushups/v_WallPushups_g03_c02 98 +JumpingJack/v_JumpingJack_g06_c03 46 +Basketball/v_Basketball_g01_c01 7 +HulaHoop/v_HulaHoop_g01_c01 42 +BasketballDunk/v_BasketballDunk_g03_c03 8 +IceDancing/v_IceDancing_g03_c03 43 +SoccerJuggling/v_SoccerJuggling_g07_c03 83 +WritingOnBoard/v_WritingOnBoard_g06_c03 99 +PlayingCello/v_PlayingCello_g02_c01 58 +WallPushups/v_WallPushups_g07_c02 98 +FieldHockeyPenalty/v_FieldHockeyPenalty_g01_c01 28 +PlayingDhol/v_PlayingDhol_g03_c04 60 +FloorGymnastics/v_FloorGymnastics_g07_c05 29 +PlayingDaf/v_PlayingDaf_g04_c05 59 +PullUps/v_PullUps_g04_c03 69 +BandMarching/v_BandMarching_g07_c06 5 +BlowingCandles/v_BlowingCandles_g06_c07 13 +SoccerPenalty/v_SoccerPenalty_g06_c06 84 +YoYo/v_YoYo_g03_c04 100 +FrisbeeCatch/v_FrisbeeCatch_g07_c02 30 +PlayingGuitar/v_PlayingGuitar_g02_c01 62 +GolfSwing/v_GolfSwing_g01_c06 32 +HorseRace/v_HorseRace_g03_c02 40 +Skijet/v_Skijet_g07_c01 81 +Biking/v_Biking_g03_c03 10 +PlayingFlute/v_PlayingFlute_g01_c02 61 +PlayingPiano/v_PlayingPiano_g07_c01 63 +Typing/v_Typing_g05_c04 94 +BlowingCandles/v_BlowingCandles_g01_c03 13 +HorseRace/v_HorseRace_g06_c01 40 +FrontCrawl/v_FrontCrawl_g04_c02 31 +CliffDiving/v_CliffDiving_g06_c04 21 +FieldHockeyPenalty/v_FieldHockeyPenalty_g06_c06 28 +PlayingGuitar/v_PlayingGuitar_g05_c04 62 +HeadMassage/v_HeadMassage_g03_c07 38 +Shotput/v_Shotput_g06_c03 78 +BlowingCandles/v_BlowingCandles_g04_c05 13 +Hammering/v_Hammering_g01_c04 34 +Rafting/v_Rafting_g02_c03 72 +GolfSwing/v_GolfSwing_g06_c02 32 +HeadMassage/v_HeadMassage_g01_c04 38 +HighJump/v_HighJump_g02_c02 39 +FrisbeeCatch/v_FrisbeeCatch_g04_c01 30 +JumpRope/v_JumpRope_g04_c04 47 +FrisbeeCatch/v_FrisbeeCatch_g02_c03 30 +Shotput/v_Shotput_g03_c02 78 +Punch/v_Punch_g06_c03 70 +TaiChi/v_TaiChi_g05_c04 90 +RockClimbingIndoor/v_RockClimbingIndoor_g03_c03 73 +FloorGymnastics/v_FloorGymnastics_g06_c06 29 +ApplyEyeMakeup/v_ApplyEyeMakeup_g03_c04 0 +HorseRiding/v_HorseRiding_g02_c06 41 +Fencing/v_Fencing_g01_c03 27 +StillRings/v_StillRings_g02_c03 85 +SoccerPenalty/v_SoccerPenalty_g06_c07 84 +PlayingDaf/v_PlayingDaf_g07_c01 59 +CliffDiving/v_CliffDiving_g01_c06 21 +CricketBowling/v_CricketBowling_g05_c04 22 +SkateBoarding/v_SkateBoarding_g01_c01 79 +JavelinThrow/v_JavelinThrow_g03_c01 44 +Swing/v_Swing_g07_c04 88 +Shotput/v_Shotput_g03_c01 78 +Billiards/v_Billiards_g01_c01 11 +Mixing/v_Mixing_g07_c04 53 +Mixing/v_Mixing_g05_c02 53 +PlayingDhol/v_PlayingDhol_g06_c02 60 +LongJump/v_LongJump_g04_c05 50 +CliffDiving/v_CliffDiving_g07_c03 21 +FloorGymnastics/v_FloorGymnastics_g07_c04 29 +MoppingFloor/v_MoppingFloor_g01_c04 54 +BoxingPunchingBag/v_BoxingPunchingBag_g07_c02 16 +Fencing/v_Fencing_g01_c01 27 +ApplyEyeMakeup/v_ApplyEyeMakeup_g06_c04 0 +FieldHockeyPenalty/v_FieldHockeyPenalty_g05_c04 28 +ThrowDiscus/v_ThrowDiscus_g07_c02 92 +BoxingPunchingBag/v_BoxingPunchingBag_g05_c04 16 +BenchPress/v_BenchPress_g06_c05 9 +ShavingBeard/v_ShavingBeard_g07_c07 77 +PlayingTabla/v_PlayingTabla_g07_c02 65 +CricketBowling/v_CricketBowling_g01_c07 22 +CuttingInKitchen/v_CuttingInKitchen_g01_c02 24 +SkyDiving/v_SkyDiving_g05_c02 82 +PlayingViolin/v_PlayingViolin_g01_c02 66 +Lunges/v_Lunges_g07_c02 51 +Shotput/v_Shotput_g07_c06 78 +MoppingFloor/v_MoppingFloor_g04_c04 54 +Diving/v_Diving_g02_c06 25 +Lunges/v_Lunges_g03_c01 51 +RockClimbingIndoor/v_RockClimbingIndoor_g07_c04 73 +PushUps/v_PushUps_g01_c04 71 +BasketballDunk/v_BasketballDunk_g03_c02 8 +PlayingDaf/v_PlayingDaf_g02_c06 59 +HorseRace/v_HorseRace_g06_c05 40 +HighJump/v_HighJump_g07_c01 39 +StillRings/v_StillRings_g03_c04 85 +PizzaTossing/v_PizzaTossing_g01_c03 57 +JumpRope/v_JumpRope_g02_c05 47 +Shotput/v_Shotput_g07_c05 78 +JugglingBalls/v_JugglingBalls_g05_c05 45 +FloorGymnastics/v_FloorGymnastics_g05_c02 29 +Rafting/v_Rafting_g04_c02 72 +PoleVault/v_PoleVault_g06_c03 67 +BandMarching/v_BandMarching_g02_c02 5 +BodyWeightSquats/v_BodyWeightSquats_g07_c03 14 +PommelHorse/v_PommelHorse_g07_c01 68 +PlayingSitar/v_PlayingSitar_g07_c03 64 +Biking/v_Biking_g02_c02 10 +BabyCrawling/v_BabyCrawling_g04_c03 3 +PommelHorse/v_PommelHorse_g01_c03 68 +BoxingSpeedBag/v_BoxingSpeedBag_g05_c05 17 +GolfSwing/v_GolfSwing_g07_c05 32 +PoleVault/v_PoleVault_g03_c07 67 +PizzaTossing/v_PizzaTossing_g04_c01 57 +FrisbeeCatch/v_FrisbeeCatch_g05_c05 30 +HighJump/v_HighJump_g01_c01 39 +YoYo/v_YoYo_g03_c06 100 +CleanAndJerk/v_CleanAndJerk_g06_c01 20 +MoppingFloor/v_MoppingFloor_g02_c05 54 +TableTennisShot/v_TableTennisShot_g04_c05 89 +PlayingFlute/v_PlayingFlute_g07_c04 61 +TennisSwing/v_TennisSwing_g07_c06 91 +FieldHockeyPenalty/v_FieldHockeyPenalty_g02_c06 28 +PlayingSitar/v_PlayingSitar_g04_c07 64 +LongJump/v_LongJump_g02_c05 50 +BenchPress/v_BenchPress_g02_c06 9 +Fencing/v_Fencing_g06_c04 27 +PlayingFlute/v_PlayingFlute_g07_c07 61 +HulaHoop/v_HulaHoop_g07_c02 42 +Nunchucks/v_Nunchucks_g03_c03 55 +CuttingInKitchen/v_CuttingInKitchen_g05_c01 24 +HorseRiding/v_HorseRiding_g03_c06 41 +BlowingCandles/v_BlowingCandles_g06_c03 13 +PlayingDaf/v_PlayingDaf_g07_c05 59 +Kayaking/v_Kayaking_g02_c04 48 +BodyWeightSquats/v_BodyWeightSquats_g05_c01 14 +YoYo/v_YoYo_g03_c01 100 +PlayingViolin/v_PlayingViolin_g05_c02 66 +BandMarching/v_BandMarching_g02_c07 5 +PizzaTossing/v_PizzaTossing_g05_c03 57 +BreastStroke/v_BreastStroke_g03_c02 18 +CliffDiving/v_CliffDiving_g07_c06 21 +BabyCrawling/v_BabyCrawling_g06_c01 3 +TaiChi/v_TaiChi_g02_c01 90 +JugglingBalls/v_JugglingBalls_g03_c05 45 +YoYo/v_YoYo_g07_c01 100 +PlayingCello/v_PlayingCello_g04_c01 58 +ParallelBars/v_ParallelBars_g07_c01 56 +Typing/v_Typing_g02_c04 94 +VolleyballSpiking/v_VolleyballSpiking_g07_c01 96 +BabyCrawling/v_BabyCrawling_g06_c02 3 +Nunchucks/v_Nunchucks_g03_c04 55 +PlayingGuitar/v_PlayingGuitar_g01_c03 62 +Diving/v_Diving_g05_c04 25 +ParallelBars/v_ParallelBars_g01_c04 56 +RockClimbingIndoor/v_RockClimbingIndoor_g01_c04 73 +Biking/v_Biking_g01_c01 10 +FloorGymnastics/v_FloorGymnastics_g06_c05 29 +Biking/v_Biking_g03_c02 10 +PoleVault/v_PoleVault_g05_c02 67 +IceDancing/v_IceDancing_g06_c06 43 +Surfing/v_Surfing_g01_c07 87 +HorseRace/v_HorseRace_g04_c02 40 +TableTennisShot/v_TableTennisShot_g01_c06 89 +CliffDiving/v_CliffDiving_g01_c02 21 +UnevenBars/v_UnevenBars_g06_c02 95 +SalsaSpin/v_SalsaSpin_g01_c06 76 +WalkingWithDog/v_WalkingWithDog_g07_c04 97 +MoppingFloor/v_MoppingFloor_g05_c02 54 +PommelHorse/v_PommelHorse_g04_c04 68 +BalanceBeam/v_BalanceBeam_g06_c07 4 +PlayingDaf/v_PlayingDaf_g04_c07 59 +BlowDryHair/v_BlowDryHair_g03_c05 12 +PlayingFlute/v_PlayingFlute_g01_c06 61 +Knitting/v_Knitting_g04_c04 49 +ApplyLipstick/v_ApplyLipstick_g06_c02 1 +SkateBoarding/v_SkateBoarding_g02_c01 79 +HeadMassage/v_HeadMassage_g02_c07 38 +RockClimbingIndoor/v_RockClimbingIndoor_g07_c07 73 +TableTennisShot/v_TableTennisShot_g04_c06 89 +Rowing/v_Rowing_g01_c03 75 +Bowling/v_Bowling_g02_c02 15 +UnevenBars/v_UnevenBars_g06_c04 95 +Punch/v_Punch_g04_c03 70 +Rafting/v_Rafting_g05_c03 72 +PlayingFlute/v_PlayingFlute_g01_c04 61 +Billiards/v_Billiards_g02_c07 11 +Punch/v_Punch_g01_c03 70 +JugglingBalls/v_JugglingBalls_g04_c01 45 +JumpingJack/v_JumpingJack_g02_c02 46 +TrampolineJumping/v_TrampolineJumping_g02_c03 93 +PlayingTabla/v_PlayingTabla_g02_c01 65 +Skiing/v_Skiing_g01_c02 80 +GolfSwing/v_GolfSwing_g03_c04 32 +LongJump/v_LongJump_g01_c04 50 +HorseRiding/v_HorseRiding_g03_c02 41 +SoccerPenalty/v_SoccerPenalty_g07_c05 84 +ApplyLipstick/v_ApplyLipstick_g01_c03 1 +WritingOnBoard/v_WritingOnBoard_g03_c07 99 +Skijet/v_Skijet_g04_c04 81 +BlowingCandles/v_BlowingCandles_g03_c03 13 +PlayingPiano/v_PlayingPiano_g03_c03 63 +PlayingDaf/v_PlayingDaf_g03_c01 59 +RockClimbingIndoor/v_RockClimbingIndoor_g05_c06 73 +HorseRiding/v_HorseRiding_g05_c03 41 +CricketShot/v_CricketShot_g01_c06 23 +WritingOnBoard/v_WritingOnBoard_g02_c02 99 +BlowDryHair/v_BlowDryHair_g07_c01 12 +HulaHoop/v_HulaHoop_g06_c03 42 +BodyWeightSquats/v_BodyWeightSquats_g07_c02 14 +HandstandPushups/v_HandStandPushups_g02_c03 36 +HorseRace/v_HorseRace_g07_c03 40 +PlayingDhol/v_PlayingDhol_g03_c03 60 +Fencing/v_Fencing_g04_c01 27 +CricketBowling/v_CricketBowling_g06_c02 22 +PlayingDhol/v_PlayingDhol_g01_c03 60 +BasketballDunk/v_BasketballDunk_g05_c01 8 +SalsaSpin/v_SalsaSpin_g05_c04 76 +HorseRace/v_HorseRace_g01_c03 40 +ShavingBeard/v_ShavingBeard_g07_c01 77 +BoxingSpeedBag/v_BoxingSpeedBag_g06_c04 17 +VolleyballSpiking/v_VolleyballSpiking_g05_c04 96 +PlayingCello/v_PlayingCello_g02_c04 58 +PlayingDhol/v_PlayingDhol_g03_c02 60 +IceDancing/v_IceDancing_g05_c01 43 +BodyWeightSquats/v_BodyWeightSquats_g07_c04 14 +HeadMassage/v_HeadMassage_g01_c01 38 +HandstandWalking/v_HandstandWalking_g03_c01 37 +FloorGymnastics/v_FloorGymnastics_g01_c01 29 +Typing/v_Typing_g02_c02 94 +JavelinThrow/v_JavelinThrow_g06_c02 44 +PlayingGuitar/v_PlayingGuitar_g06_c02 62 +BlowingCandles/v_BlowingCandles_g07_c03 13 +JugglingBalls/v_JugglingBalls_g04_c04 45 +CliffDiving/v_CliffDiving_g02_c04 21 +TrampolineJumping/v_TrampolineJumping_g03_c01 93 +PlayingFlute/v_PlayingFlute_g07_c03 61 +BlowDryHair/v_BlowDryHair_g07_c07 12 +HorseRiding/v_HorseRiding_g02_c05 41 +FloorGymnastics/v_FloorGymnastics_g07_c01 29 +PlayingGuitar/v_PlayingGuitar_g02_c02 62 +ApplyEyeMakeup/v_ApplyEyeMakeup_g07_c02 0 +CricketBowling/v_CricketBowling_g06_c03 22 +JugglingBalls/v_JugglingBalls_g01_c01 45 +WritingOnBoard/v_WritingOnBoard_g07_c02 99 +BreastStroke/v_BreastStroke_g07_c01 18 +PoleVault/v_PoleVault_g03_c04 67 +CricketShot/v_CricketShot_g05_c04 23 +BenchPress/v_BenchPress_g04_c07 9 +BrushingTeeth/v_BrushingTeeth_g05_c02 19 +CricketShot/v_CricketShot_g04_c05 23 +FieldHockeyPenalty/v_FieldHockeyPenalty_g02_c04 28 +TrampolineJumping/v_TrampolineJumping_g02_c06 93 +PullUps/v_PullUps_g03_c04 69 +Lunges/v_Lunges_g06_c02 51 +PlayingDhol/v_PlayingDhol_g02_c03 60 +WallPushups/v_WallPushups_g04_c04 98 +SkyDiving/v_SkyDiving_g06_c02 82 +ParallelBars/v_ParallelBars_g06_c01 56 +SumoWrestling/v_SumoWrestling_g02_c01 86 +BenchPress/v_BenchPress_g07_c03 9 +SoccerJuggling/v_SoccerJuggling_g04_c03 83 +BabyCrawling/v_BabyCrawling_g03_c02 3 +Bowling/v_Bowling_g01_c04 15 +CricketBowling/v_CricketBowling_g07_c02 22 +BoxingPunchingBag/v_BoxingPunchingBag_g05_c02 16 +BoxingPunchingBag/v_BoxingPunchingBag_g02_c02 16 +StillRings/v_StillRings_g06_c03 85 +PizzaTossing/v_PizzaTossing_g07_c03 57 +SoccerPenalty/v_SoccerPenalty_g05_c01 84 +CliffDiving/v_CliffDiving_g05_c02 21 +BaseballPitch/v_BaseballPitch_g01_c04 6 +PlayingTabla/v_PlayingTabla_g05_c03 65 +FrisbeeCatch/v_FrisbeeCatch_g03_c04 30 +TableTennisShot/v_TableTennisShot_g04_c03 89 +Rowing/v_Rowing_g07_c01 75 +BlowingCandles/v_BlowingCandles_g06_c02 13 +Diving/v_Diving_g04_c02 25 +StillRings/v_StillRings_g04_c01 85 +PushUps/v_PushUps_g02_c03 71 +Swing/v_Swing_g02_c02 88 +IceDancing/v_IceDancing_g04_c02 43 +PlayingDaf/v_PlayingDaf_g02_c01 59 +Fencing/v_Fencing_g05_c02 27 +SoccerJuggling/v_SoccerJuggling_g01_c05 83 +StillRings/v_StillRings_g02_c02 85 +PlayingGuitar/v_PlayingGuitar_g04_c07 62 +Hammering/v_Hammering_g04_c01 34 +PlayingCello/v_PlayingCello_g05_c04 58 +BodyWeightSquats/v_BodyWeightSquats_g04_c02 14 +Skiing/v_Skiing_g05_c02 80 +BenchPress/v_BenchPress_g01_c03 9 +Biking/v_Biking_g03_c01 10 +BabyCrawling/v_BabyCrawling_g03_c01 3 +BenchPress/v_BenchPress_g07_c04 9 +JumpRope/v_JumpRope_g07_c01 47 +UnevenBars/v_UnevenBars_g03_c02 95 +HorseRiding/v_HorseRiding_g01_c05 41 +Biking/v_Biking_g01_c03 10 +StillRings/v_StillRings_g03_c07 85 +JugglingBalls/v_JugglingBalls_g06_c05 45 +WritingOnBoard/v_WritingOnBoard_g01_c02 99 +HulaHoop/v_HulaHoop_g04_c01 42 +Diving/v_Diving_g04_c05 25 +BlowingCandles/v_BlowingCandles_g05_c04 13 +PoleVault/v_PoleVault_g02_c04 67 +Kayaking/v_Kayaking_g06_c07 48 +FrontCrawl/v_FrontCrawl_g06_c03 31 +Bowling/v_Bowling_g06_c06 15 +Skijet/v_Skijet_g06_c03 81 +FieldHockeyPenalty/v_FieldHockeyPenalty_g04_c02 28 +FrontCrawl/v_FrontCrawl_g01_c03 31 +Surfing/v_Surfing_g04_c04 87 +HeadMassage/v_HeadMassage_g06_c03 38 +PlayingDaf/v_PlayingDaf_g05_c06 59 +Mixing/v_Mixing_g02_c06 53 +Drumming/v_Drumming_g04_c01 26 +UnevenBars/v_UnevenBars_g03_c01 95 +Shotput/v_Shotput_g05_c07 78 +Basketball/v_Basketball_g03_c03 7 +MilitaryParade/v_MilitaryParade_g06_c04 52 +HandstandPushups/v_HandStandPushups_g06_c03 36 +MilitaryParade/v_MilitaryParade_g03_c04 52 +HorseRace/v_HorseRace_g03_c04 40 +PlayingGuitar/v_PlayingGuitar_g06_c06 62 +PlayingDhol/v_PlayingDhol_g04_c05 60 +CricketBowling/v_CricketBowling_g06_c04 22 +HorseRiding/v_HorseRiding_g01_c07 41 +VolleyballSpiking/v_VolleyballSpiking_g02_c01 96 +Swing/v_Swing_g04_c06 88 +Bowling/v_Bowling_g05_c07 15 +PlayingViolin/v_PlayingViolin_g03_c03 66 +RockClimbingIndoor/v_RockClimbingIndoor_g06_c01 73 +SoccerPenalty/v_SoccerPenalty_g01_c02 84 +WallPushups/v_WallPushups_g07_c05 98 +PlayingCello/v_PlayingCello_g06_c06 58 +HorseRiding/v_HorseRiding_g05_c02 41 +StillRings/v_StillRings_g03_c06 85 +LongJump/v_LongJump_g02_c03 50 +BrushingTeeth/v_BrushingTeeth_g03_c03 19 +Fencing/v_Fencing_g04_c04 27 +Shotput/v_Shotput_g02_c06 78 +Mixing/v_Mixing_g05_c04 53 +Nunchucks/v_Nunchucks_g05_c01 55 +SalsaSpin/v_SalsaSpin_g04_c05 76 +YoYo/v_YoYo_g02_c02 100 +Mixing/v_Mixing_g07_c02 53 +PlayingSitar/v_PlayingSitar_g03_c03 64 +FieldHockeyPenalty/v_FieldHockeyPenalty_g02_c01 28 +Skiing/v_Skiing_g06_c03 80 +JumpingJack/v_JumpingJack_g04_c02 46 +Shotput/v_Shotput_g01_c01 78 +TaiChi/v_TaiChi_g01_c03 90 +TableTennisShot/v_TableTennisShot_g03_c03 89 +CuttingInKitchen/v_CuttingInKitchen_g02_c04 24 +UnevenBars/v_UnevenBars_g02_c02 95 +SalsaSpin/v_SalsaSpin_g01_c03 76 +SalsaSpin/v_SalsaSpin_g05_c05 76 +IceDancing/v_IceDancing_g01_c03 43 +Kayaking/v_Kayaking_g05_c01 48 +SkyDiving/v_SkyDiving_g04_c04 82 +HammerThrow/v_HammerThrow_g06_c03 35 +Bowling/v_Bowling_g06_c01 15 +Diving/v_Diving_g06_c06 25 +BrushingTeeth/v_BrushingTeeth_g03_c04 19 +FrisbeeCatch/v_FrisbeeCatch_g03_c01 30 +BenchPress/v_BenchPress_g06_c02 9 +FrontCrawl/v_FrontCrawl_g03_c02 31 +Drumming/v_Drumming_g01_c03 26 +MoppingFloor/v_MoppingFloor_g04_c06 54 +BaseballPitch/v_BaseballPitch_g07_c02 6 +LongJump/v_LongJump_g06_c03 50 +CricketShot/v_CricketShot_g03_c05 23 +Rowing/v_Rowing_g03_c05 75 +Surfing/v_Surfing_g06_c03 87 +SumoWrestling/v_SumoWrestling_g06_c02 86 +HorseRace/v_HorseRace_g02_c03 40 +PushUps/v_PushUps_g05_c04 71 +HandstandWalking/v_HandstandWalking_g07_c06 37 +Basketball/v_Basketball_g04_c04 7 +PoleVault/v_PoleVault_g04_c01 67 +PlayingFlute/v_PlayingFlute_g03_c05 61 +WallPushups/v_WallPushups_g06_c02 98 +BlowDryHair/v_BlowDryHair_g02_c04 12 +BoxingPunchingBag/v_BoxingPunchingBag_g03_c05 16 +CricketShot/v_CricketShot_g03_c01 23 +Typing/v_Typing_g07_c03 94 +CuttingInKitchen/v_CuttingInKitchen_g03_c04 24 +SoccerJuggling/v_SoccerJuggling_g03_c01 83 +FieldHockeyPenalty/v_FieldHockeyPenalty_g04_c07 28 +ParallelBars/v_ParallelBars_g02_c03 56 +ApplyEyeMakeup/v_ApplyEyeMakeup_g01_c02 0 +JugglingBalls/v_JugglingBalls_g03_c01 45 +TaiChi/v_TaiChi_g04_c02 90 +SalsaSpin/v_SalsaSpin_g07_c03 76 +PlayingGuitar/v_PlayingGuitar_g07_c07 62 +JugglingBalls/v_JugglingBalls_g03_c02 45 +ParallelBars/v_ParallelBars_g05_c05 56 +Punch/v_Punch_g05_c05 70 +YoYo/v_YoYo_g01_c07 100 +JumpingJack/v_JumpingJack_g02_c04 46 +StillRings/v_StillRings_g02_c01 85 +Fencing/v_Fencing_g02_c02 27 +Bowling/v_Bowling_g01_c02 15 +Lunges/v_Lunges_g04_c02 51 +Basketball/v_Basketball_g01_c04 7 +FrontCrawl/v_FrontCrawl_g06_c02 31 +PizzaTossing/v_PizzaTossing_g06_c02 57 +Hammering/v_Hammering_g01_c01 34 +PlayingGuitar/v_PlayingGuitar_g03_c02 62 +Shotput/v_Shotput_g07_c03 78 +WalkingWithDog/v_WalkingWithDog_g07_c05 97 +SkyDiving/v_SkyDiving_g04_c03 82 +Shotput/v_Shotput_g05_c06 78 +SalsaSpin/v_SalsaSpin_g06_c05 76 +FieldHockeyPenalty/v_FieldHockeyPenalty_g05_c06 28 +TableTennisShot/v_TableTennisShot_g07_c04 89 +PlayingPiano/v_PlayingPiano_g01_c04 63 +PommelHorse/v_PommelHorse_g01_c02 68 +PlayingDhol/v_PlayingDhol_g02_c07 60 +Skiing/v_Skiing_g01_c01 80 +SoccerJuggling/v_SoccerJuggling_g05_c05 83 +PlayingDhol/v_PlayingDhol_g07_c01 60 +Swing/v_Swing_g02_c01 88 +PlayingDhol/v_PlayingDhol_g05_c02 60 +ThrowDiscus/v_ThrowDiscus_g07_c01 92 +HorseRace/v_HorseRace_g05_c01 40 +Skijet/v_Skijet_g03_c01 81 +JumpingJack/v_JumpingJack_g03_c01 46 +SoccerJuggling/v_SoccerJuggling_g02_c02 83 +HeadMassage/v_HeadMassage_g03_c06 38 +PommelHorse/v_PommelHorse_g01_c06 68 +CliffDiving/v_CliffDiving_g03_c02 21 +BasketballDunk/v_BasketballDunk_g05_c03 8 +BasketballDunk/v_BasketballDunk_g01_c07 8 +TaiChi/v_TaiChi_g07_c03 90 +LongJump/v_LongJump_g01_c05 50 +HandstandPushups/v_HandStandPushups_g02_c04 36 +HulaHoop/v_HulaHoop_g04_c03 42 +HammerThrow/v_HammerThrow_g03_c07 35 +PlayingPiano/v_PlayingPiano_g06_c02 63 +FrisbeeCatch/v_FrisbeeCatch_g07_c03 30 +Hammering/v_Hammering_g05_c03 34 +SumoWrestling/v_SumoWrestling_g04_c04 86 +Basketball/v_Basketball_g03_c06 7 +MoppingFloor/v_MoppingFloor_g05_c01 54 +PlayingSitar/v_PlayingSitar_g01_c04 64 +LongJump/v_LongJump_g07_c03 50 +PoleVault/v_PoleVault_g01_c01 67 +PlayingGuitar/v_PlayingGuitar_g06_c05 62 +Swing/v_Swing_g07_c02 88 +Biking/v_Biking_g06_c04 10 +Rowing/v_Rowing_g04_c03 75 +PlayingPiano/v_PlayingPiano_g03_c01 63 +PlayingViolin/v_PlayingViolin_g07_c02 66 +BaseballPitch/v_BaseballPitch_g02_c01 6 +GolfSwing/v_GolfSwing_g02_c03 32 +Billiards/v_Billiards_g04_c04 11 +Knitting/v_Knitting_g03_c02 49 +RopeClimbing/v_RopeClimbing_g04_c02 74 +Diving/v_Diving_g02_c02 25 +Mixing/v_Mixing_g02_c03 53 +IceDancing/v_IceDancing_g03_c05 43 +RockClimbingIndoor/v_RockClimbingIndoor_g03_c05 73 +CliffDiving/v_CliffDiving_g03_c04 21 +PizzaTossing/v_PizzaTossing_g01_c01 57 +Drumming/v_Drumming_g02_c04 26 +HandstandPushups/v_HandStandPushups_g05_c03 36 +UnevenBars/v_UnevenBars_g04_c02 95 +CricketShot/v_CricketShot_g05_c07 23 +BlowDryHair/v_BlowDryHair_g07_c05 12 +TennisSwing/v_TennisSwing_g04_c05 91 +ParallelBars/v_ParallelBars_g04_c04 56 +Bowling/v_Bowling_g03_c02 15 +StillRings/v_StillRings_g07_c04 85 +CliffDiving/v_CliffDiving_g05_c05 21 +Nunchucks/v_Nunchucks_g01_c03 55 +IceDancing/v_IceDancing_g05_c02 43 +MoppingFloor/v_MoppingFloor_g06_c02 54 +Rafting/v_Rafting_g07_c02 72 +Knitting/v_Knitting_g04_c06 49 +Billiards/v_Billiards_g02_c04 11 +IceDancing/v_IceDancing_g07_c02 43 +StillRings/v_StillRings_g01_c04 85 +Hammering/v_Hammering_g06_c04 34 +Archery/v_Archery_g02_c04 2 +FloorGymnastics/v_FloorGymnastics_g06_c02 29 +CricketBowling/v_CricketBowling_g06_c01 22 +FloorGymnastics/v_FloorGymnastics_g06_c01 29 +CricketBowling/v_CricketBowling_g02_c06 22 +FieldHockeyPenalty/v_FieldHockeyPenalty_g06_c03 28 +ParallelBars/v_ParallelBars_g02_c04 56 +ShavingBeard/v_ShavingBeard_g03_c05 77 +BalanceBeam/v_BalanceBeam_g02_c02 4 +ShavingBeard/v_ShavingBeard_g01_c02 77 +PlayingDhol/v_PlayingDhol_g05_c01 60 +CricketShot/v_CricketShot_g04_c07 23 +JugglingBalls/v_JugglingBalls_g06_c02 45 +BlowingCandles/v_BlowingCandles_g04_c04 13 +Biking/v_Biking_g07_c05 10 +VolleyballSpiking/v_VolleyballSpiking_g04_c05 96 +PlayingPiano/v_PlayingPiano_g04_c03 63 +PoleVault/v_PoleVault_g03_c02 67 +BrushingTeeth/v_BrushingTeeth_g01_c02 19 +BabyCrawling/v_BabyCrawling_g07_c01 3 +BasketballDunk/v_BasketballDunk_g01_c02 8 +BrushingTeeth/v_BrushingTeeth_g02_c03 19 +PlayingViolin/v_PlayingViolin_g04_c02 66 +PlayingViolin/v_PlayingViolin_g05_c03 66 +PlayingFlute/v_PlayingFlute_g02_c04 61 +VolleyballSpiking/v_VolleyballSpiking_g04_c01 96 +PizzaTossing/v_PizzaTossing_g05_c02 57 +PlayingViolin/v_PlayingViolin_g03_c01 66 +SoccerPenalty/v_SoccerPenalty_g06_c04 84 +BlowingCandles/v_BlowingCandles_g01_c04 13 +Diving/v_Diving_g07_c04 25 +Mixing/v_Mixing_g04_c05 53 +HammerThrow/v_HammerThrow_g01_c06 35 +BalanceBeam/v_BalanceBeam_g02_c03 4 +ShavingBeard/v_ShavingBeard_g02_c07 77 +TableTennisShot/v_TableTennisShot_g01_c04 89 +ThrowDiscus/v_ThrowDiscus_g02_c03 92 +TrampolineJumping/v_TrampolineJumping_g07_c01 93 +PizzaTossing/v_PizzaTossing_g02_c04 57 +SoccerPenalty/v_SoccerPenalty_g06_c05 84 +PushUps/v_PushUps_g03_c04 71 +HighJump/v_HighJump_g05_c04 39 +PoleVault/v_PoleVault_g07_c01 67 +Billiards/v_Billiards_g05_c05 11 +SkateBoarding/v_SkateBoarding_g05_c01 79 +Nunchucks/v_Nunchucks_g06_c02 55 +WritingOnBoard/v_WritingOnBoard_g05_c01 99 +BasketballDunk/v_BasketballDunk_g01_c06 8 +SumoWrestling/v_SumoWrestling_g04_c03 86 +BrushingTeeth/v_BrushingTeeth_g02_c04 19 +TrampolineJumping/v_TrampolineJumping_g04_c03 93 +PlayingSitar/v_PlayingSitar_g01_c01 64 +PoleVault/v_PoleVault_g04_c06 67 +SalsaSpin/v_SalsaSpin_g03_c02 76 +BandMarching/v_BandMarching_g05_c06 5 +BabyCrawling/v_BabyCrawling_g01_c01 3 +CricketShot/v_CricketShot_g04_c01 23 +CuttingInKitchen/v_CuttingInKitchen_g05_c05 24 +CuttingInKitchen/v_CuttingInKitchen_g05_c04 24 +TrampolineJumping/v_TrampolineJumping_g05_c04 93 +HammerThrow/v_HammerThrow_g07_c05 35 +BlowDryHair/v_BlowDryHair_g07_c02 12 +BaseballPitch/v_BaseballPitch_g04_c03 6 +CricketShot/v_CricketShot_g05_c02 23 +PlayingTabla/v_PlayingTabla_g01_c02 65 +PlayingFlute/v_PlayingFlute_g01_c01 61 +Mixing/v_Mixing_g03_c02 53 +PlayingPiano/v_PlayingPiano_g03_c02 63 +PlayingPiano/v_PlayingPiano_g02_c04 63 +MoppingFloor/v_MoppingFloor_g04_c05 54 +Swing/v_Swing_g07_c07 88 +BodyWeightSquats/v_BodyWeightSquats_g06_c02 14 +WallPushups/v_WallPushups_g01_c02 98 +RopeClimbing/v_RopeClimbing_g02_c03 74 +SoccerPenalty/v_SoccerPenalty_g02_c05 84 +WalkingWithDog/v_WalkingWithDog_g03_c03 97 +HammerThrow/v_HammerThrow_g02_c02 35 +HammerThrow/v_HammerThrow_g03_c06 35 +CliffDiving/v_CliffDiving_g02_c03 21 +Kayaking/v_Kayaking_g01_c02 48 +Bowling/v_Bowling_g05_c02 15 +Shotput/v_Shotput_g07_c04 78 +CleanAndJerk/v_CleanAndJerk_g07_c05 20 +IceDancing/v_IceDancing_g02_c04 43 +Punch/v_Punch_g04_c04 70 +BenchPress/v_BenchPress_g03_c07 9 +VolleyballSpiking/v_VolleyballSpiking_g01_c04 96 +BrushingTeeth/v_BrushingTeeth_g03_c05 19 +ThrowDiscus/v_ThrowDiscus_g04_c01 92 +JumpingJack/v_JumpingJack_g04_c03 46 +HandstandPushups/v_HandStandPushups_g04_c02 36 +PlayingGuitar/v_PlayingGuitar_g07_c05 62 +CricketShot/v_CricketShot_g01_c02 23 +HighJump/v_HighJump_g03_c01 39 +HighJump/v_HighJump_g05_c01 39 +FrontCrawl/v_FrontCrawl_g07_c05 31 +RopeClimbing/v_RopeClimbing_g03_c01 74 +LongJump/v_LongJump_g07_c05 50 +HammerThrow/v_HammerThrow_g05_c06 35 +HandstandPushups/v_HandStandPushups_g04_c03 36 +Punch/v_Punch_g01_c01 70 +TrampolineJumping/v_TrampolineJumping_g01_c04 93 +HorseRace/v_HorseRace_g04_c03 40 +HorseRiding/v_HorseRiding_g05_c06 41 +Skiing/v_Skiing_g04_c02 80 +SoccerJuggling/v_SoccerJuggling_g05_c01 83 +Rafting/v_Rafting_g03_c03 72 +Lunges/v_Lunges_g01_c04 51 +SoccerPenalty/v_SoccerPenalty_g04_c05 84 +StillRings/v_StillRings_g01_c03 85 +Swing/v_Swing_g02_c03 88 +BlowDryHair/v_BlowDryHair_g01_c01 12 +PlayingTabla/v_PlayingTabla_g06_c01 65 +FieldHockeyPenalty/v_FieldHockeyPenalty_g06_c05 28 +PlayingTabla/v_PlayingTabla_g05_c02 65 +Rafting/v_Rafting_g04_c03 72 +PullUps/v_PullUps_g04_c04 69 +FieldHockeyPenalty/v_FieldHockeyPenalty_g04_c05 28 +HandstandWalking/v_HandstandWalking_g06_c01 37 +BreastStroke/v_BreastStroke_g02_c02 18 +Rafting/v_Rafting_g02_c04 72 +PoleVault/v_PoleVault_g07_c03 67 +FloorGymnastics/v_FloorGymnastics_g02_c04 29 +Typing/v_Typing_g02_c06 94 +UnevenBars/v_UnevenBars_g05_c04 95 +Biking/v_Biking_g05_c01 10 +BenchPress/v_BenchPress_g02_c03 9 +BrushingTeeth/v_BrushingTeeth_g01_c03 19 +HulaHoop/v_HulaHoop_g01_c03 42 +Diving/v_Diving_g04_c06 25 +TennisSwing/v_TennisSwing_g02_c02 91 +SalsaSpin/v_SalsaSpin_g03_c04 76 +WallPushups/v_WallPushups_g02_c03 98 +HeadMassage/v_HeadMassage_g02_c01 38 +FloorGymnastics/v_FloorGymnastics_g06_c07 29 +Mixing/v_Mixing_g05_c06 53 +FrontCrawl/v_FrontCrawl_g07_c07 31 +HorseRace/v_HorseRace_g06_c02 40 +IceDancing/v_IceDancing_g03_c04 43 +FrisbeeCatch/v_FrisbeeCatch_g04_c03 30 +BodyWeightSquats/v_BodyWeightSquats_g04_c04 14 +ThrowDiscus/v_ThrowDiscus_g06_c03 92 +FrontCrawl/v_FrontCrawl_g03_c03 31 +Fencing/v_Fencing_g07_c03 27 +ParallelBars/v_ParallelBars_g06_c05 56 +Skijet/v_Skijet_g06_c02 81 +PlayingCello/v_PlayingCello_g02_c02 58 +CricketShot/v_CricketShot_g02_c01 23 +Basketball/v_Basketball_g01_c02 7 +FrisbeeCatch/v_FrisbeeCatch_g07_c04 30 +PlayingDhol/v_PlayingDhol_g05_c03 60 +Bowling/v_Bowling_g03_c07 15 +SoccerJuggling/v_SoccerJuggling_g07_c06 83 +HorseRiding/v_HorseRiding_g07_c01 41 +BreastStroke/v_BreastStroke_g07_c02 18 +StillRings/v_StillRings_g01_c02 85 +SoccerJuggling/v_SoccerJuggling_g04_c04 83 +IceDancing/v_IceDancing_g05_c03 43 +Diving/v_Diving_g07_c02 25 +HandstandWalking/v_HandstandWalking_g07_c03 37 +CricketShot/v_CricketShot_g01_c04 23 +Fencing/v_Fencing_g05_c05 27 +CliffDiving/v_CliffDiving_g06_c07 21 +HeadMassage/v_HeadMassage_g02_c05 38 +Knitting/v_Knitting_g05_c01 49 +HulaHoop/v_HulaHoop_g02_c01 42 +Skiing/v_Skiing_g06_c07 80 +Drumming/v_Drumming_g05_c03 26 +Knitting/v_Knitting_g04_c02 49 +CricketShot/v_CricketShot_g06_c01 23 +PlayingTabla/v_PlayingTabla_g02_c02 65 +PlayingSitar/v_PlayingSitar_g04_c01 64 +Diving/v_Diving_g06_c07 25 +Fencing/v_Fencing_g07_c01 27 +SkateBoarding/v_SkateBoarding_g07_c05 79 +Shotput/v_Shotput_g01_c04 78 +BalanceBeam/v_BalanceBeam_g06_c01 4 +RockClimbingIndoor/v_RockClimbingIndoor_g03_c01 73 +WalkingWithDog/v_WalkingWithDog_g02_c02 97 +Nunchucks/v_Nunchucks_g01_c04 55 +CricketShot/v_CricketShot_g07_c01 23 +Haircut/v_Haircut_g07_c02 33 +HulaHoop/v_HulaHoop_g07_c04 42 +HammerThrow/v_HammerThrow_g05_c03 35 +PlayingTabla/v_PlayingTabla_g05_c04 65 +HorseRiding/v_HorseRiding_g04_c07 41 +LongJump/v_LongJump_g07_c02 50 +PlayingTabla/v_PlayingTabla_g04_c06 65 +FloorGymnastics/v_FloorGymnastics_g02_c02 29 +SumoWrestling/v_SumoWrestling_g01_c02 86 +Skijet/v_Skijet_g04_c01 81 +BoxingPunchingBag/v_BoxingPunchingBag_g05_c05 16 +CliffDiving/v_CliffDiving_g05_c07 21 diff --git a/examples/LRCN_activity_recognition/ucf101_split1_trainVideos.txt b/examples/LRCN_activity_recognition/ucf101_split1_trainVideos.txt new file mode 100644 index 00000000000..9158885e5b5 --- /dev/null +++ b/examples/LRCN_activity_recognition/ucf101_split1_trainVideos.txt @@ -0,0 +1,9537 @@ +TableTennisShot/v_TableTennisShot_g19_c03 89 +MilitaryParade/v_MilitaryParade_g09_c06 52 +RopeClimbing/v_RopeClimbing_g16_c01 74 +HandstandPushups/v_HandStandPushups_g09_c01 36 +CliffDiving/v_CliffDiving_g21_c02 21 +Knitting/v_Knitting_g17_c05 49 +ShavingBeard/v_ShavingBeard_g08_c03 77 +BoxingSpeedBag/v_BoxingSpeedBag_g23_c04 17 +BabyCrawling/v_BabyCrawling_g20_c01 3 +Knitting/v_Knitting_g25_c07 49 +PlayingDhol/v_PlayingDhol_g16_c03 60 +HeadMassage/v_HeadMassage_g24_c01 38 +PlayingDaf/v_PlayingDaf_g12_c02 59 +JugglingBalls/v_JugglingBalls_g12_c03 45 +ParallelBars/v_ParallelBars_g24_c02 56 +Nunchucks/v_Nunchucks_g24_c05 55 +WallPushups/v_WallPushups_g21_c06 98 +ShavingBeard/v_ShavingBeard_g21_c05 77 +SalsaSpin/v_SalsaSpin_g19_c02 76 +HandstandWalking/v_HandstandWalking_g10_c03 37 +Skiing/v_Skiing_g18_c01 80 +PlayingDhol/v_PlayingDhol_g15_c04 60 +TrampolineJumping/v_TrampolineJumping_g21_c03 93 +Skiing/v_Skiing_g13_c05 80 +FrontCrawl/v_FrontCrawl_g17_c04 31 +LongJump/v_LongJump_g08_c02 50 +RockClimbingIndoor/v_RockClimbingIndoor_g25_c03 73 +BrushingTeeth/v_BrushingTeeth_g23_c02 19 +Knitting/v_Knitting_g15_c07 49 +Swing/v_Swing_g23_c05 88 +BasketballDunk/v_BasketballDunk_g10_c04 8 +Skiing/v_Skiing_g09_c05 80 +PullUps/v_PullUps_g09_c03 69 +BrushingTeeth/v_BrushingTeeth_g14_c01 19 +BandMarching/v_BandMarching_g08_c02 5 +Kayaking/v_Kayaking_g25_c01 48 +Nunchucks/v_Nunchucks_g21_c02 55 +JumpRope/v_JumpRope_g16_c03 47 +BaseballPitch/v_BaseballPitch_g18_c06 6 +PizzaTossing/v_PizzaTossing_g24_c02 57 +ThrowDiscus/v_ThrowDiscus_g17_c02 92 +JugglingBalls/v_JugglingBalls_g09_c03 45 +HandstandWalking/v_HandstandWalking_g11_c01 37 +IceDancing/v_IceDancing_g24_c05 43 +Bowling/v_Bowling_g11_c05 15 +HorseRiding/v_HorseRiding_g11_c03 41 +HandstandPushups/v_HandStandPushups_g20_c01 36 +TableTennisShot/v_TableTennisShot_g10_c03 89 +CliffDiving/v_CliffDiving_g22_c04 21 +HammerThrow/v_HammerThrow_g09_c07 35 +Rowing/v_Rowing_g22_c03 75 +Haircut/v_Haircut_g08_c05 33 +PlayingFlute/v_PlayingFlute_g13_c04 61 +HeadMassage/v_HeadMassage_g17_c03 38 +CricketShot/v_CricketShot_g19_c02 23 +Biking/v_Biking_g10_c04 10 +ParallelBars/v_ParallelBars_g08_c02 56 +PushUps/v_PushUps_g21_c03 71 +PlayingCello/v_PlayingCello_g24_c04 58 +ApplyEyeMakeup/v_ApplyEyeMakeup_g13_c01 0 +WalkingWithDog/v_WalkingWithDog_g22_c01 97 +TennisSwing/v_TennisSwing_g12_c02 91 +PizzaTossing/v_PizzaTossing_g23_c02 57 +SkyDiving/v_SkyDiving_g14_c04 82 +ApplyLipstick/v_ApplyLipstick_g15_c02 1 +ParallelBars/v_ParallelBars_g11_c06 56 +SumoWrestling/v_SumoWrestling_g18_c03 86 +JumpRope/v_JumpRope_g25_c03 47 +ThrowDiscus/v_ThrowDiscus_g25_c01 92 +HeadMassage/v_HeadMassage_g24_c02 38 +UnevenBars/v_UnevenBars_g13_c04 95 +Rowing/v_Rowing_g09_c01 75 +ApplyEyeMakeup/v_ApplyEyeMakeup_g11_c02 0 +Punch/v_Punch_g23_c03 70 +FrontCrawl/v_FrontCrawl_g17_c02 31 +VolleyballSpiking/v_VolleyballSpiking_g10_c02 96 +WritingOnBoard/v_WritingOnBoard_g21_c01 99 +PizzaTossing/v_PizzaTossing_g19_c04 57 +PlayingSitar/v_PlayingSitar_g08_c07 64 +FrisbeeCatch/v_FrisbeeCatch_g19_c04 30 +BoxingSpeedBag/v_BoxingSpeedBag_g19_c02 17 +CricketShot/v_CricketShot_g09_c02 23 +Lunges/v_Lunges_g15_c02 51 +Typing/v_Typing_g14_c03 94 +ApplyEyeMakeup/v_ApplyEyeMakeup_g11_c04 0 +YoYo/v_YoYo_g09_c04 100 +BoxingSpeedBag/v_BoxingSpeedBag_g12_c02 17 +PullUps/v_PullUps_g22_c01 69 +Hammering/v_Hammering_g19_c03 34 +WritingOnBoard/v_WritingOnBoard_g11_c06 99 +HandstandPushups/v_HandStandPushups_g09_c04 36 +FrontCrawl/v_FrontCrawl_g25_c04 31 +FloorGymnastics/v_FloorGymnastics_g08_c02 29 +Nunchucks/v_Nunchucks_g20_c04 55 +SoccerPenalty/v_SoccerPenalty_g22_c02 84 +ApplyEyeMakeup/v_ApplyEyeMakeup_g14_c01 0 +IceDancing/v_IceDancing_g12_c03 43 +Punch/v_Punch_g24_c07 70 +PlayingGuitar/v_PlayingGuitar_g14_c02 62 +StillRings/v_StillRings_g09_c02 85 +CricketShot/v_CricketShot_g11_c04 23 +Kayaking/v_Kayaking_g12_c01 48 +Basketball/v_Basketball_g22_c07 7 +HorseRace/v_HorseRace_g11_c03 40 +Rafting/v_Rafting_g13_c05 72 +CuttingInKitchen/v_CuttingInKitchen_g08_c02 24 +RockClimbingIndoor/v_RockClimbingIndoor_g20_c05 73 +Knitting/v_Knitting_g09_c05 49 +Knitting/v_Knitting_g25_c05 49 +ApplyEyeMakeup/v_ApplyEyeMakeup_g23_c01 0 +ApplyLipstick/v_ApplyLipstick_g08_c04 1 +JumpRope/v_JumpRope_g13_c03 47 +CuttingInKitchen/v_CuttingInKitchen_g21_c04 24 +StillRings/v_StillRings_g08_c04 85 +HammerThrow/v_HammerThrow_g19_c05 35 +BoxingPunchingBag/v_BoxingPunchingBag_g13_c01 16 +ShavingBeard/v_ShavingBeard_g10_c01 77 +Basketball/v_Basketball_g08_c02 7 +ApplyEyeMakeup/v_ApplyEyeMakeup_g12_c01 0 +CricketBowling/v_CricketBowling_g25_c05 22 +TaiChi/v_TaiChi_g09_c01 90 +YoYo/v_YoYo_g10_c03 100 +PlayingSitar/v_PlayingSitar_g22_c05 64 +LongJump/v_LongJump_g19_c01 50 +ApplyEyeMakeup/v_ApplyEyeMakeup_g18_c03 0 +WallPushups/v_WallPushups_g09_c04 98 +PizzaTossing/v_PizzaTossing_g24_c03 57 +BreastStroke/v_BreastStroke_g22_c01 18 +BabyCrawling/v_BabyCrawling_g16_c03 3 +WalkingWithDog/v_WalkingWithDog_g08_c03 97 +FrisbeeCatch/v_FrisbeeCatch_g20_c02 30 +BalanceBeam/v_BalanceBeam_g13_c01 4 +Knitting/v_Knitting_g12_c02 49 +ParallelBars/v_ParallelBars_g13_c03 56 +PlayingSitar/v_PlayingSitar_g13_c03 64 +Mixing/v_Mixing_g14_c07 53 +BandMarching/v_BandMarching_g09_c01 5 +HeadMassage/v_HeadMassage_g10_c02 38 +PlayingFlute/v_PlayingFlute_g21_c05 61 +PlayingSitar/v_PlayingSitar_g23_c05 64 +PommelHorse/v_PommelHorse_g17_c03 68 +BoxingPunchingBag/v_BoxingPunchingBag_g25_c05 16 +IceDancing/v_IceDancing_g10_c03 43 +SkateBoarding/v_SkateBoarding_g10_c03 79 +Mixing/v_Mixing_g23_c03 53 +PullUps/v_PullUps_g20_c04 69 +BaseballPitch/v_BaseballPitch_g13_c03 6 +Bowling/v_Bowling_g08_c01 15 +JavelinThrow/v_JavelinThrow_g21_c04 44 +ShavingBeard/v_ShavingBeard_g18_c06 77 +BalanceBeam/v_BalanceBeam_g13_c02 4 +BalanceBeam/v_BalanceBeam_g08_c01 4 +JugglingBalls/v_JugglingBalls_g23_c03 45 +GolfSwing/v_GolfSwing_g19_c03 32 +Haircut/v_Haircut_g20_c03 33 +YoYo/v_YoYo_g10_c05 100 +BasketballDunk/v_BasketballDunk_g15_c02 8 +PommelHorse/v_PommelHorse_g14_c03 68 +HorseRiding/v_HorseRiding_g16_c05 41 +BandMarching/v_BandMarching_g21_c03 5 +BlowDryHair/v_BlowDryHair_g22_c01 12 +BrushingTeeth/v_BrushingTeeth_g21_c01 19 +TennisSwing/v_TennisSwing_g11_c04 91 +Drumming/v_Drumming_g09_c02 26 +BodyWeightSquats/v_BodyWeightSquats_g09_c03 14 +Archery/v_Archery_g18_c03 2 +Skijet/v_Skijet_g20_c01 81 +MoppingFloor/v_MoppingFloor_g14_c03 54 +PlayingDaf/v_PlayingDaf_g21_c05 59 +PlayingDhol/v_PlayingDhol_g25_c03 60 +StillRings/v_StillRings_g16_c04 85 +Shotput/v_Shotput_g11_c05 78 +Haircut/v_Haircut_g08_c04 33 +Lunges/v_Lunges_g08_c01 51 +PlayingDhol/v_PlayingDhol_g17_c05 60 +Fencing/v_Fencing_g12_c02 27 +Surfing/v_Surfing_g12_c05 87 +RockClimbingIndoor/v_RockClimbingIndoor_g12_c01 73 +SkyDiving/v_SkyDiving_g22_c02 82 +Archery/v_Archery_g16_c03 2 +Swing/v_Swing_g16_c05 88 +CricketBowling/v_CricketBowling_g10_c01 22 +PlayingDaf/v_PlayingDaf_g20_c03 59 +CricketShot/v_CricketShot_g24_c04 23 +SumoWrestling/v_SumoWrestling_g16_c02 86 +SumoWrestling/v_SumoWrestling_g21_c03 86 +BaseballPitch/v_BaseballPitch_g15_c01 6 +CricketShot/v_CricketShot_g10_c05 23 +Skijet/v_Skijet_g23_c02 81 +TennisSwing/v_TennisSwing_g15_c04 91 +Punch/v_Punch_g25_c06 70 +TableTennisShot/v_TableTennisShot_g16_c03 89 +StillRings/v_StillRings_g12_c02 85 +CleanAndJerk/v_CleanAndJerk_g23_c02 20 +Kayaking/v_Kayaking_g12_c03 48 +PullUps/v_PullUps_g18_c04 69 +CricketShot/v_CricketShot_g17_c04 23 +Nunchucks/v_Nunchucks_g08_c03 55 +BlowDryHair/v_BlowDryHair_g25_c02 12 +HorseRace/v_HorseRace_g21_c02 40 +Surfing/v_Surfing_g13_c03 87 +HeadMassage/v_HeadMassage_g13_c03 38 +Typing/v_Typing_g24_c04 94 +BoxingSpeedBag/v_BoxingSpeedBag_g24_c02 17 +CliffDiving/v_CliffDiving_g16_c04 21 +ThrowDiscus/v_ThrowDiscus_g22_c04 92 +CricketBowling/v_CricketBowling_g17_c01 22 +BlowDryHair/v_BlowDryHair_g14_c04 12 +BenchPress/v_BenchPress_g20_c06 9 +SkyDiving/v_SkyDiving_g20_c03 82 +TaiChi/v_TaiChi_g20_c04 90 +PlayingGuitar/v_PlayingGuitar_g14_c05 62 +BreastStroke/v_BreastStroke_g24_c03 18 +TableTennisShot/v_TableTennisShot_g22_c01 89 +JavelinThrow/v_JavelinThrow_g09_c02 44 +Haircut/v_Haircut_g21_c03 33 +FloorGymnastics/v_FloorGymnastics_g09_c04 29 +HighJump/v_HighJump_g25_c04 39 +ThrowDiscus/v_ThrowDiscus_g20_c01 92 +ThrowDiscus/v_ThrowDiscus_g15_c03 92 +SoccerPenalty/v_SoccerPenalty_g24_c03 84 +SoccerPenalty/v_SoccerPenalty_g24_c05 84 +TableTennisShot/v_TableTennisShot_g15_c07 89 +BoxingPunchingBag/v_BoxingPunchingBag_g22_c01 16 +Bowling/v_Bowling_g14_c02 15 +TrampolineJumping/v_TrampolineJumping_g11_c02 93 +Billiards/v_Billiards_g12_c01 11 +JavelinThrow/v_JavelinThrow_g08_c04 44 +HorseRiding/v_HorseRiding_g18_c05 41 +PullUps/v_PullUps_g16_c04 69 +ApplyLipstick/v_ApplyLipstick_g08_c01 1 +PlayingViolin/v_PlayingViolin_g14_c03 66 +ThrowDiscus/v_ThrowDiscus_g16_c03 92 +JumpRope/v_JumpRope_g21_c04 47 +MilitaryParade/v_MilitaryParade_g20_c04 52 +HandstandWalking/v_HandstandWalking_g20_c04 37 +Archery/v_Archery_g24_c01 2 +FrontCrawl/v_FrontCrawl_g14_c04 31 +Skijet/v_Skijet_g19_c02 81 +GolfSwing/v_GolfSwing_g08_c04 32 +HulaHoop/v_HulaHoop_g23_c01 42 +TableTennisShot/v_TableTennisShot_g14_c05 89 +RockClimbingIndoor/v_RockClimbingIndoor_g13_c05 73 +BoxingPunchingBag/v_BoxingPunchingBag_g21_c02 16 +ShavingBeard/v_ShavingBeard_g23_c01 77 +Haircut/v_Haircut_g23_c06 33 +RockClimbingIndoor/v_RockClimbingIndoor_g13_c02 73 +FrontCrawl/v_FrontCrawl_g24_c06 31 +WritingOnBoard/v_WritingOnBoard_g15_c04 99 +HorseRiding/v_HorseRiding_g09_c05 41 +PizzaTossing/v_PizzaTossing_g12_c04 57 +Rafting/v_Rafting_g17_c02 72 +Drumming/v_Drumming_g14_c02 26 +HorseRiding/v_HorseRiding_g25_c02 41 +Basketball/v_Basketball_g11_c03 7 +PlayingGuitar/v_PlayingGuitar_g19_c01 62 +RockClimbingIndoor/v_RockClimbingIndoor_g21_c03 73 +PlayingViolin/v_PlayingViolin_g20_c03 66 +RopeClimbing/v_RopeClimbing_g19_c07 74 +PoleVault/v_PoleVault_g11_c04 67 +HulaHoop/v_HulaHoop_g17_c05 42 +Bowling/v_Bowling_g24_c06 15 +PlayingGuitar/v_PlayingGuitar_g16_c01 62 +ShavingBeard/v_ShavingBeard_g08_c07 77 +PlayingGuitar/v_PlayingGuitar_g23_c06 62 +Hammering/v_Hammering_g20_c04 34 +HulaHoop/v_HulaHoop_g17_c02 42 +FrontCrawl/v_FrontCrawl_g08_c03 31 +Fencing/v_Fencing_g12_c04 27 +BandMarching/v_BandMarching_g13_c05 5 +SumoWrestling/v_SumoWrestling_g10_c01 86 +PlayingDaf/v_PlayingDaf_g22_c07 59 +BandMarching/v_BandMarching_g19_c02 5 +HammerThrow/v_HammerThrow_g12_c05 35 +Fencing/v_Fencing_g10_c02 27 +MoppingFloor/v_MoppingFloor_g20_c04 54 +ThrowDiscus/v_ThrowDiscus_g10_c04 92 +Drumming/v_Drumming_g24_c07 26 +BasketballDunk/v_BasketballDunk_g09_c03 8 +CricketShot/v_CricketShot_g12_c05 23 +Typing/v_Typing_g16_c03 94 +PlayingSitar/v_PlayingSitar_g08_c06 64 +Bowling/v_Bowling_g19_c01 15 +Fencing/v_Fencing_g09_c01 27 +Hammering/v_Hammering_g17_c06 34 +Rowing/v_Rowing_g25_c04 75 +Nunchucks/v_Nunchucks_g23_c01 55 +GolfSwing/v_GolfSwing_g25_c03 32 +RopeClimbing/v_RopeClimbing_g15_c05 74 +BenchPress/v_BenchPress_g18_c02 9 +Lunges/v_Lunges_g10_c03 51 +PommelHorse/v_PommelHorse_g18_c01 68 +PlayingDhol/v_PlayingDhol_g16_c05 60 +IceDancing/v_IceDancing_g15_c02 43 +PushUps/v_PushUps_g16_c03 71 +BreastStroke/v_BreastStroke_g25_c04 18 +HulaHoop/v_HulaHoop_g16_c05 42 +SoccerJuggling/v_SoccerJuggling_g17_c03 83 +Billiards/v_Billiards_g10_c01 11 +Bowling/v_Bowling_g14_c01 15 +BabyCrawling/v_BabyCrawling_g19_c05 3 +HandstandWalking/v_HandstandWalking_g17_c04 37 +RopeClimbing/v_RopeClimbing_g12_c04 74 +Bowling/v_Bowling_g11_c06 15 +PlayingSitar/v_PlayingSitar_g18_c01 64 +PlayingDhol/v_PlayingDhol_g08_c02 60 +Billiards/v_Billiards_g19_c01 11 +Skijet/v_Skijet_g17_c04 81 +WritingOnBoard/v_WritingOnBoard_g15_c02 99 +SoccerJuggling/v_SoccerJuggling_g13_c04 83 +BoxingSpeedBag/v_BoxingSpeedBag_g15_c04 17 +HorseRiding/v_HorseRiding_g23_c01 41 +PlayingSitar/v_PlayingSitar_g18_c07 64 +FrisbeeCatch/v_FrisbeeCatch_g22_c06 30 +IceDancing/v_IceDancing_g14_c05 43 +Billiards/v_Billiards_g08_c02 11 +WritingOnBoard/v_WritingOnBoard_g13_c02 99 +FloorGymnastics/v_FloorGymnastics_g17_c04 29 +HandstandPushups/v_HandStandPushups_g25_c05 36 +Billiards/v_Billiards_g25_c07 11 +BenchPress/v_BenchPress_g20_c05 9 +PlayingGuitar/v_PlayingGuitar_g21_c03 62 +BoxingPunchingBag/v_BoxingPunchingBag_g23_c03 16 +StillRings/v_StillRings_g22_c04 85 +UnevenBars/v_UnevenBars_g17_c01 95 +SkateBoarding/v_SkateBoarding_g22_c03 79 +Bowling/v_Bowling_g10_c04 15 +Rowing/v_Rowing_g14_c04 75 +Rowing/v_Rowing_g13_c04 75 +Rowing/v_Rowing_g18_c01 75 +HorseRace/v_HorseRace_g14_c03 40 +SkyDiving/v_SkyDiving_g09_c01 82 +PoleVault/v_PoleVault_g25_c02 67 +Kayaking/v_Kayaking_g22_c03 48 +SalsaSpin/v_SalsaSpin_g10_c04 76 +PlayingViolin/v_PlayingViolin_g17_c01 66 +Bowling/v_Bowling_g08_c03 15 +Knitting/v_Knitting_g24_c04 49 +RockClimbingIndoor/v_RockClimbingIndoor_g21_c07 73 +Skiing/v_Skiing_g08_c05 80 +JumpRope/v_JumpRope_g25_c05 47 +Punch/v_Punch_g25_c03 70 +Rowing/v_Rowing_g10_c04 75 +HandstandWalking/v_HandstandWalking_g25_c01 37 +Billiards/v_Billiards_g19_c05 11 +CricketBowling/v_CricketBowling_g25_c06 22 +SkyDiving/v_SkyDiving_g17_c01 82 +Swing/v_Swing_g15_c07 88 +Punch/v_Punch_g12_c06 70 +Typing/v_Typing_g10_c02 94 +UnevenBars/v_UnevenBars_g10_c01 95 +MilitaryParade/v_MilitaryParade_g19_c02 52 +BasketballDunk/v_BasketballDunk_g13_c04 8 +FloorGymnastics/v_FloorGymnastics_g20_c02 29 +BalanceBeam/v_BalanceBeam_g22_c04 4 +CricketShot/v_CricketShot_g10_c07 23 +WallPushups/v_WallPushups_g25_c02 98 +WalkingWithDog/v_WalkingWithDog_g15_c04 97 +JumpingJack/v_JumpingJack_g25_c01 46 +BabyCrawling/v_BabyCrawling_g15_c05 3 +UnevenBars/v_UnevenBars_g20_c01 95 +TaiChi/v_TaiChi_g11_c01 90 +PlayingDaf/v_PlayingDaf_g25_c04 59 +CliffDiving/v_CliffDiving_g16_c01 21 +FloorGymnastics/v_FloorGymnastics_g12_c03 29 +RockClimbingIndoor/v_RockClimbingIndoor_g18_c02 73 +PlayingDhol/v_PlayingDhol_g20_c04 60 +Shotput/v_Shotput_g09_c03 78 +BodyWeightSquats/v_BodyWeightSquats_g14_c04 14 +Typing/v_Typing_g18_c04 94 +ThrowDiscus/v_ThrowDiscus_g25_c05 92 +BenchPress/v_BenchPress_g09_c07 9 +Surfing/v_Surfing_g12_c06 87 +LongJump/v_LongJump_g12_c01 50 +Swing/v_Swing_g11_c06 88 +Nunchucks/v_Nunchucks_g24_c01 55 +TaiChi/v_TaiChi_g15_c03 90 +Kayaking/v_Kayaking_g22_c01 48 +Surfing/v_Surfing_g18_c02 87 +Billiards/v_Billiards_g09_c05 11 +PoleVault/v_PoleVault_g24_c03 67 +FrontCrawl/v_FrontCrawl_g09_c06 31 +TableTennisShot/v_TableTennisShot_g19_c02 89 +ShavingBeard/v_ShavingBeard_g08_c04 77 +BlowDryHair/v_BlowDryHair_g20_c04 12 +WallPushups/v_WallPushups_g22_c04 98 +Typing/v_Typing_g14_c02 94 +BandMarching/v_BandMarching_g15_c02 5 +BreastStroke/v_BreastStroke_g20_c01 18 +BaseballPitch/v_BaseballPitch_g18_c02 6 +JugglingBalls/v_JugglingBalls_g08_c01 45 +HulaHoop/v_HulaHoop_g15_c05 42 +PlayingPiano/v_PlayingPiano_g12_c01 63 +Billiards/v_Billiards_g09_c04 11 +BandMarching/v_BandMarching_g14_c02 5 +Surfing/v_Surfing_g15_c06 87 +BandMarching/v_BandMarching_g19_c07 5 +BrushingTeeth/v_BrushingTeeth_g12_c05 19 +Kayaking/v_Kayaking_g16_c01 48 +ShavingBeard/v_ShavingBeard_g13_c05 77 +Skiing/v_Skiing_g12_c02 80 +Skijet/v_Skijet_g23_c04 81 +PlayingTabla/v_PlayingTabla_g12_c01 65 +PlayingTabla/v_PlayingTabla_g09_c03 65 +Lunges/v_Lunges_g09_c03 51 +Bowling/v_Bowling_g17_c01 15 +PlayingPiano/v_PlayingPiano_g25_c04 63 +PizzaTossing/v_PizzaTossing_g13_c04 57 +Shotput/v_Shotput_g17_c05 78 +WallPushups/v_WallPushups_g21_c02 98 +BaseballPitch/v_BaseballPitch_g18_c04 6 +Billiards/v_Billiards_g16_c03 11 +PullUps/v_PullUps_g21_c04 69 +BenchPress/v_BenchPress_g22_c04 9 +Haircut/v_Haircut_g15_c03 33 +Skiing/v_Skiing_g25_c03 80 +JumpRope/v_JumpRope_g09_c01 47 +PlayingSitar/v_PlayingSitar_g17_c02 64 +PommelHorse/v_PommelHorse_g14_c02 68 +TableTennisShot/v_TableTennisShot_g23_c04 89 +SoccerPenalty/v_SoccerPenalty_g21_c03 84 +Surfing/v_Surfing_g11_c03 87 +FrisbeeCatch/v_FrisbeeCatch_g14_c01 30 +HorseRiding/v_HorseRiding_g17_c05 41 +Biking/v_Biking_g14_c01 10 +HandstandPushups/v_HandStandPushups_g13_c03 36 +BabyCrawling/v_BabyCrawling_g18_c02 3 +Shotput/v_Shotput_g21_c03 78 +TennisSwing/v_TennisSwing_g24_c04 91 +PullUps/v_PullUps_g19_c01 69 +BreastStroke/v_BreastStroke_g16_c02 18 +WallPushups/v_WallPushups_g23_c01 98 +BandMarching/v_BandMarching_g24_c01 5 +SalsaSpin/v_SalsaSpin_g16_c03 76 +Typing/v_Typing_g12_c03 94 +CuttingInKitchen/v_CuttingInKitchen_g19_c01 24 +PlayingTabla/v_PlayingTabla_g13_c02 65 +Haircut/v_Haircut_g12_c01 33 +Surfing/v_Surfing_g20_c04 87 +PlayingCello/v_PlayingCello_g09_c02 58 +SoccerJuggling/v_SoccerJuggling_g12_c05 83 +PlayingPiano/v_PlayingPiano_g21_c02 63 +HandstandWalking/v_HandstandWalking_g21_c03 37 +UnevenBars/v_UnevenBars_g16_c02 95 +Fencing/v_Fencing_g12_c03 27 +Archery/v_Archery_g19_c03 2 +MilitaryParade/v_MilitaryParade_g15_c07 52 +CliffDiving/v_CliffDiving_g09_c04 21 +FloorGymnastics/v_FloorGymnastics_g10_c02 29 +UnevenBars/v_UnevenBars_g22_c04 95 +CuttingInKitchen/v_CuttingInKitchen_g13_c04 24 +PlayingTabla/v_PlayingTabla_g23_c02 65 +Lunges/v_Lunges_g10_c01 51 +Typing/v_Typing_g08_c01 94 +BoxingPunchingBag/v_BoxingPunchingBag_g24_c03 16 +JugglingBalls/v_JugglingBalls_g22_c05 45 +BandMarching/v_BandMarching_g09_c03 5 +PlayingFlute/v_PlayingFlute_g10_c01 61 +Kayaking/v_Kayaking_g20_c02 48 +Surfing/v_Surfing_g13_c01 87 +PlayingDaf/v_PlayingDaf_g19_c02 59 +ApplyLipstick/v_ApplyLipstick_g16_c01 1 +PoleVault/v_PoleVault_g19_c04 67 +Punch/v_Punch_g15_c05 70 +PlayingPiano/v_PlayingPiano_g22_c03 63 +BrushingTeeth/v_BrushingTeeth_g08_c01 19 +PlayingViolin/v_PlayingViolin_g17_c04 66 +Rowing/v_Rowing_g16_c02 75 +Haircut/v_Haircut_g10_c07 33 +SumoWrestling/v_SumoWrestling_g16_c03 86 +JumpRope/v_JumpRope_g17_c01 47 +UnevenBars/v_UnevenBars_g16_c04 95 +WritingOnBoard/v_WritingOnBoard_g17_c04 99 +JumpRope/v_JumpRope_g09_c07 47 +UnevenBars/v_UnevenBars_g12_c02 95 +FloorGymnastics/v_FloorGymnastics_g24_c01 29 +HorseRiding/v_HorseRiding_g24_c07 41 +Billiards/v_Billiards_g19_c03 11 +TableTennisShot/v_TableTennisShot_g17_c03 89 +PlayingFlute/v_PlayingFlute_g11_c04 61 +Bowling/v_Bowling_g23_c02 15 +JugglingBalls/v_JugglingBalls_g25_c02 45 +BenchPress/v_BenchPress_g17_c01 9 +BoxingSpeedBag/v_BoxingSpeedBag_g25_c03 17 +Diving/v_Diving_g15_c02 25 +HandstandPushups/v_HandStandPushups_g20_c06 36 +HulaHoop/v_HulaHoop_g16_c02 42 +Lunges/v_Lunges_g12_c03 51 +GolfSwing/v_GolfSwing_g18_c03 32 +PommelHorse/v_PommelHorse_g22_c02 68 +Drumming/v_Drumming_g13_c04 26 +IceDancing/v_IceDancing_g10_c01 43 +PoleVault/v_PoleVault_g14_c06 67 +Archery/v_Archery_g09_c03 2 +Surfing/v_Surfing_g23_c03 87 +CleanAndJerk/v_CleanAndJerk_g11_c04 20 +ShavingBeard/v_ShavingBeard_g12_c03 77 +FrisbeeCatch/v_FrisbeeCatch_g13_c03 30 +VolleyballSpiking/v_VolleyballSpiking_g25_c02 96 +BabyCrawling/v_BabyCrawling_g20_c05 3 +BaseballPitch/v_BaseballPitch_g22_c07 6 +ThrowDiscus/v_ThrowDiscus_g17_c05 92 +Fencing/v_Fencing_g10_c03 27 +HorseRiding/v_HorseRiding_g21_c05 41 +Rafting/v_Rafting_g23_c04 72 +SkyDiving/v_SkyDiving_g16_c02 82 +Archery/v_Archery_g15_c07 2 +GolfSwing/v_GolfSwing_g11_c01 32 +LongJump/v_LongJump_g09_c06 50 +PoleVault/v_PoleVault_g23_c07 67 +WalkingWithDog/v_WalkingWithDog_g09_c04 97 +Mixing/v_Mixing_g18_c03 53 +MoppingFloor/v_MoppingFloor_g23_c03 54 +PlayingSitar/v_PlayingSitar_g10_c07 64 +HandstandWalking/v_HandstandWalking_g16_c03 37 +GolfSwing/v_GolfSwing_g21_c04 32 +BenchPress/v_BenchPress_g24_c02 9 +SkyDiving/v_SkyDiving_g15_c05 82 +JavelinThrow/v_JavelinThrow_g16_c01 44 +PullUps/v_PullUps_g08_c02 69 +SumoWrestling/v_SumoWrestling_g24_c01 86 +PlayingCello/v_PlayingCello_g10_c07 58 +JumpRope/v_JumpRope_g25_c06 47 +HandstandWalking/v_HandstandWalking_g13_c03 37 +JumpRope/v_JumpRope_g20_c04 47 +JumpRope/v_JumpRope_g25_c02 47 +StillRings/v_StillRings_g25_c04 85 +WallPushups/v_WallPushups_g14_c02 98 +PizzaTossing/v_PizzaTossing_g21_c03 57 +GolfSwing/v_GolfSwing_g23_c05 32 +FieldHockeyPenalty/v_FieldHockeyPenalty_g15_c02 28 +BalanceBeam/v_BalanceBeam_g15_c03 4 +Drumming/v_Drumming_g13_c02 26 +CuttingInKitchen/v_CuttingInKitchen_g09_c02 24 +Skijet/v_Skijet_g21_c03 81 +Nunchucks/v_Nunchucks_g17_c02 55 +Punch/v_Punch_g14_c07 70 +BabyCrawling/v_BabyCrawling_g08_c03 3 +LongJump/v_LongJump_g11_c02 50 +IceDancing/v_IceDancing_g09_c04 43 +CliffDiving/v_CliffDiving_g15_c03 21 +TableTennisShot/v_TableTennisShot_g14_c04 89 +YoYo/v_YoYo_g14_c02 100 +Nunchucks/v_Nunchucks_g13_c05 55 +PlayingDhol/v_PlayingDhol_g09_c01 60 +Billiards/v_Billiards_g15_c06 11 +PlayingCello/v_PlayingCello_g13_c07 58 +HulaHoop/v_HulaHoop_g25_c05 42 +Haircut/v_Haircut_g15_c01 33 +CleanAndJerk/v_CleanAndJerk_g23_c04 20 +ApplyEyeMakeup/v_ApplyEyeMakeup_g12_c02 0 +PommelHorse/v_PommelHorse_g12_c07 68 +TennisSwing/v_TennisSwing_g15_c05 91 +PlayingFlute/v_PlayingFlute_g12_c01 61 +LongJump/v_LongJump_g08_c05 50 +MilitaryParade/v_MilitaryParade_g23_c04 52 +BlowingCandles/v_BlowingCandles_g10_c04 13 +LongJump/v_LongJump_g19_c03 50 +CuttingInKitchen/v_CuttingInKitchen_g17_c01 24 +PlayingPiano/v_PlayingPiano_g14_c02 63 +PlayingPiano/v_PlayingPiano_g22_c02 63 +SoccerJuggling/v_SoccerJuggling_g23_c04 83 +SkateBoarding/v_SkateBoarding_g21_c03 79 +HorseRiding/v_HorseRiding_g25_c04 41 +SalsaSpin/v_SalsaSpin_g12_c04 76 +SumoWrestling/v_SumoWrestling_g13_c02 86 +CricketBowling/v_CricketBowling_g25_c04 22 +SalsaSpin/v_SalsaSpin_g09_c06 76 +IceDancing/v_IceDancing_g25_c04 43 +Surfing/v_Surfing_g17_c01 87 +Punch/v_Punch_g23_c07 70 +WallPushups/v_WallPushups_g09_c01 98 +LongJump/v_LongJump_g16_c05 50 +JavelinThrow/v_JavelinThrow_g13_c02 44 +BasketballDunk/v_BasketballDunk_g24_c01 8 +YoYo/v_YoYo_g14_c03 100 +PlayingTabla/v_PlayingTabla_g18_c03 65 +YoYo/v_YoYo_g15_c03 100 +TableTennisShot/v_TableTennisShot_g21_c01 89 +MoppingFloor/v_MoppingFloor_g24_c03 54 +BalanceBeam/v_BalanceBeam_g14_c01 4 +ApplyEyeMakeup/v_ApplyEyeMakeup_g15_c07 0 +PlayingPiano/v_PlayingPiano_g08_c02 63 +Diving/v_Diving_g12_c02 25 +CricketShot/v_CricketShot_g13_c07 23 +ShavingBeard/v_ShavingBeard_g10_c07 77 +PlayingGuitar/v_PlayingGuitar_g22_c02 62 +CricketBowling/v_CricketBowling_g12_c04 22 +CricketBowling/v_CricketBowling_g25_c01 22 +PlayingCello/v_PlayingCello_g10_c02 58 +JumpRope/v_JumpRope_g20_c02 47 +FrontCrawl/v_FrontCrawl_g23_c06 31 +HighJump/v_HighJump_g15_c02 39 +PommelHorse/v_PommelHorse_g13_c07 68 +CricketShot/v_CricketShot_g23_c01 23 +TrampolineJumping/v_TrampolineJumping_g16_c05 93 +HorseRace/v_HorseRace_g11_c02 40 +SoccerJuggling/v_SoccerJuggling_g11_c02 83 +Drumming/v_Drumming_g13_c07 26 +PoleVault/v_PoleVault_g24_c05 67 +CleanAndJerk/v_CleanAndJerk_g17_c01 20 +FieldHockeyPenalty/v_FieldHockeyPenalty_g17_c02 28 +BlowDryHair/v_BlowDryHair_g23_c01 12 +HulaHoop/v_HulaHoop_g21_c04 42 +Haircut/v_Haircut_g10_c06 33 +Kayaking/v_Kayaking_g16_c02 48 +BandMarching/v_BandMarching_g15_c01 5 +Basketball/v_Basketball_g10_c02 7 +FrontCrawl/v_FrontCrawl_g24_c02 31 +HulaHoop/v_HulaHoop_g24_c01 42 +HammerThrow/v_HammerThrow_g22_c02 35 +Nunchucks/v_Nunchucks_g25_c01 55 +Mixing/v_Mixing_g19_c01 53 +JugglingBalls/v_JugglingBalls_g11_c05 45 +UnevenBars/v_UnevenBars_g23_c04 95 +BlowingCandles/v_BlowingCandles_g21_c02 13 +IceDancing/v_IceDancing_g16_c05 43 +Kayaking/v_Kayaking_g13_c02 48 +WallPushups/v_WallPushups_g13_c04 98 +ApplyLipstick/v_ApplyLipstick_g17_c04 1 +Skiing/v_Skiing_g15_c04 80 +PlayingGuitar/v_PlayingGuitar_g14_c04 62 +BoxingPunchingBag/v_BoxingPunchingBag_g13_c02 16 +Basketball/v_Basketball_g17_c02 7 +Bowling/v_Bowling_g16_c04 15 +PlayingFlute/v_PlayingFlute_g20_c02 61 +HandstandPushups/v_HandStandPushups_g15_c06 36 +CliffDiving/v_CliffDiving_g11_c03 21 +Nunchucks/v_Nunchucks_g12_c05 55 +PlayingSitar/v_PlayingSitar_g20_c03 64 +Hammering/v_Hammering_g15_c04 34 +TableTennisShot/v_TableTennisShot_g24_c04 89 +TennisSwing/v_TennisSwing_g13_c01 91 +FrontCrawl/v_FrontCrawl_g17_c07 31 +TaiChi/v_TaiChi_g13_c02 90 +MilitaryParade/v_MilitaryParade_g13_c02 52 +PlayingDaf/v_PlayingDaf_g23_c05 59 +PlayingCello/v_PlayingCello_g16_c02 58 +Knitting/v_Knitting_g11_c03 49 +BasketballDunk/v_BasketballDunk_g13_c01 8 +Skiing/v_Skiing_g15_c05 80 +BenchPress/v_BenchPress_g14_c07 9 +PullUps/v_PullUps_g11_c03 69 +FloorGymnastics/v_FloorGymnastics_g14_c04 29 +HandstandPushups/v_HandStandPushups_g08_c01 36 +VolleyballSpiking/v_VolleyballSpiking_g16_c02 96 +VolleyballSpiking/v_VolleyballSpiking_g16_c03 96 +Billiards/v_Billiards_g22_c04 11 +BodyWeightSquats/v_BodyWeightSquats_g15_c03 14 +IceDancing/v_IceDancing_g08_c03 43 +BodyWeightSquats/v_BodyWeightSquats_g12_c01 14 +Diving/v_Diving_g21_c02 25 +PushUps/v_PushUps_g17_c02 71 +JumpingJack/v_JumpingJack_g11_c04 46 +PlayingTabla/v_PlayingTabla_g10_c04 65 +CricketShot/v_CricketShot_g25_c03 23 +MilitaryParade/v_MilitaryParade_g09_c02 52 +HulaHoop/v_HulaHoop_g18_c02 42 +HammerThrow/v_HammerThrow_g11_c05 35 +PoleVault/v_PoleVault_g21_c07 67 +Billiards/v_Billiards_g22_c03 11 +SalsaSpin/v_SalsaSpin_g12_c01 76 +CricketBowling/v_CricketBowling_g20_c04 22 +IceDancing/v_IceDancing_g20_c05 43 +Typing/v_Typing_g11_c06 94 +SkateBoarding/v_SkateBoarding_g23_c02 79 +PlayingGuitar/v_PlayingGuitar_g13_c07 62 +BenchPress/v_BenchPress_g20_c01 9 +SumoWrestling/v_SumoWrestling_g14_c01 86 +HammerThrow/v_HammerThrow_g21_c05 35 +PoleVault/v_PoleVault_g10_c07 67 +FrontCrawl/v_FrontCrawl_g24_c05 31 +BaseballPitch/v_BaseballPitch_g25_c07 6 +Nunchucks/v_Nunchucks_g18_c05 55 +PlayingFlute/v_PlayingFlute_g22_c01 61 +PlayingDaf/v_PlayingDaf_g08_c05 59 +Shotput/v_Shotput_g22_c02 78 +Shotput/v_Shotput_g25_c05 78 +Bowling/v_Bowling_g15_c05 15 +BenchPress/v_BenchPress_g16_c04 9 +Knitting/v_Knitting_g11_c05 49 +LongJump/v_LongJump_g16_c01 50 +JumpingJack/v_JumpingJack_g09_c06 46 +ApplyLipstick/v_ApplyLipstick_g19_c04 1 +YoYo/v_YoYo_g08_c05 100 +TaiChi/v_TaiChi_g08_c01 90 +HorseRace/v_HorseRace_g20_c04 40 +PullUps/v_PullUps_g11_c02 69 +BabyCrawling/v_BabyCrawling_g21_c04 3 +FrontCrawl/v_FrontCrawl_g19_c03 31 +ApplyEyeMakeup/v_ApplyEyeMakeup_g13_c04 0 +BoxingSpeedBag/v_BoxingSpeedBag_g14_c02 17 +CricketShot/v_CricketShot_g15_c01 23 +PlayingTabla/v_PlayingTabla_g12_c04 65 +TableTennisShot/v_TableTennisShot_g16_c07 89 +SalsaSpin/v_SalsaSpin_g11_c04 76 +Shotput/v_Shotput_g14_c02 78 +PlayingDhol/v_PlayingDhol_g10_c05 60 +BreastStroke/v_BreastStroke_g18_c04 18 +HulaHoop/v_HulaHoop_g12_c01 42 +Surfing/v_Surfing_g23_c04 87 +BoxingSpeedBag/v_BoxingSpeedBag_g20_c05 17 +Punch/v_Punch_g10_c04 70 +HandstandWalking/v_HandstandWalking_g23_c01 37 +CricketBowling/v_CricketBowling_g09_c01 22 +ApplyEyeMakeup/v_ApplyEyeMakeup_g24_c01 0 +Bowling/v_Bowling_g24_c03 15 +BenchPress/v_BenchPress_g23_c05 9 +JumpRope/v_JumpRope_g18_c06 47 +CricketShot/v_CricketShot_g22_c04 23 +FrontCrawl/v_FrontCrawl_g12_c04 31 +PlayingSitar/v_PlayingSitar_g24_c06 64 +Diving/v_Diving_g11_c04 25 +StillRings/v_StillRings_g09_c03 85 +CliffDiving/v_CliffDiving_g23_c01 21 +HammerThrow/v_HammerThrow_g14_c06 35 +PlayingDaf/v_PlayingDaf_g15_c02 59 +HighJump/v_HighJump_g19_c02 39 +PoleVault/v_PoleVault_g13_c02 67 +PlayingPiano/v_PlayingPiano_g25_c03 63 +SalsaSpin/v_SalsaSpin_g11_c01 76 +Bowling/v_Bowling_g23_c04 15 +Diving/v_Diving_g18_c02 25 +ThrowDiscus/v_ThrowDiscus_g10_c03 92 +Skiing/v_Skiing_g24_c06 80 +UnevenBars/v_UnevenBars_g21_c02 95 +BaseballPitch/v_BaseballPitch_g25_c04 6 +VolleyballSpiking/v_VolleyballSpiking_g14_c02 96 +ThrowDiscus/v_ThrowDiscus_g12_c05 92 +Biking/v_Biking_g12_c02 10 +Nunchucks/v_Nunchucks_g11_c03 55 +FieldHockeyPenalty/v_FieldHockeyPenalty_g23_c03 28 +SoccerPenalty/v_SoccerPenalty_g09_c01 84 +WalkingWithDog/v_WalkingWithDog_g19_c02 97 +ApplyEyeMakeup/v_ApplyEyeMakeup_g19_c02 0 +BoxingPunchingBag/v_BoxingPunchingBag_g10_c02 16 +RopeClimbing/v_RopeClimbing_g20_c04 74 +Haircut/v_Haircut_g24_c04 33 +PlayingSitar/v_PlayingSitar_g15_c02 64 +PlayingGuitar/v_PlayingGuitar_g22_c04 62 +BrushingTeeth/v_BrushingTeeth_g12_c03 19 +PlayingTabla/v_PlayingTabla_g11_c02 65 +TrampolineJumping/v_TrampolineJumping_g17_c01 93 +WalkingWithDog/v_WalkingWithDog_g10_c02 97 +Biking/v_Biking_g17_c04 10 +BaseballPitch/v_BaseballPitch_g15_c02 6 +StillRings/v_StillRings_g22_c01 85 +PlayingTabla/v_PlayingTabla_g10_c02 65 +Biking/v_Biking_g08_c03 10 +FrisbeeCatch/v_FrisbeeCatch_g17_c05 30 +BoxingPunchingBag/v_BoxingPunchingBag_g14_c04 16 +Bowling/v_Bowling_g22_c06 15 +BreastStroke/v_BreastStroke_g09_c04 18 +FloorGymnastics/v_FloorGymnastics_g19_c03 29 +PlayingPiano/v_PlayingPiano_g12_c04 63 +JumpRope/v_JumpRope_g18_c04 47 +RockClimbingIndoor/v_RockClimbingIndoor_g18_c03 73 +FrontCrawl/v_FrontCrawl_g14_c01 31 +UnevenBars/v_UnevenBars_g18_c03 95 +BaseballPitch/v_BaseballPitch_g09_c04 6 +Archery/v_Archery_g20_c03 2 +HorseRiding/v_HorseRiding_g21_c01 41 +BoxingPunchingBag/v_BoxingPunchingBag_g08_c03 16 +HeadMassage/v_HeadMassage_g19_c01 38 +BlowDryHair/v_BlowDryHair_g08_c03 12 +HorseRiding/v_HorseRiding_g19_c07 41 +PlayingCello/v_PlayingCello_g22_c06 58 +BlowDryHair/v_BlowDryHair_g12_c05 12 +GolfSwing/v_GolfSwing_g10_c01 32 +Surfing/v_Surfing_g12_c02 87 +BlowDryHair/v_BlowDryHair_g09_c02 12 +YoYo/v_YoYo_g22_c02 100 +StillRings/v_StillRings_g21_c03 85 +ShavingBeard/v_ShavingBeard_g23_c06 77 +PullUps/v_PullUps_g20_c01 69 +PlayingTabla/v_PlayingTabla_g14_c02 65 +SumoWrestling/v_SumoWrestling_g18_c07 86 +BlowingCandles/v_BlowingCandles_g12_c01 13 +SkateBoarding/v_SkateBoarding_g08_c04 79 +CleanAndJerk/v_CleanAndJerk_g24_c03 20 +ShavingBeard/v_ShavingBeard_g16_c06 77 +PushUps/v_PushUps_g10_c01 71 +TableTennisShot/v_TableTennisShot_g20_c05 89 +Knitting/v_Knitting_g16_c04 49 +Knitting/v_Knitting_g23_c04 49 +MilitaryParade/v_MilitaryParade_g08_c01 52 +BlowingCandles/v_BlowingCandles_g21_c03 13 +WallPushups/v_WallPushups_g21_c07 98 +PommelHorse/v_PommelHorse_g20_c06 68 +PlayingGuitar/v_PlayingGuitar_g12_c05 62 +JumpRope/v_JumpRope_g22_c04 47 +VolleyballSpiking/v_VolleyballSpiking_g11_c01 96 +HandstandWalking/v_HandstandWalking_g19_c02 37 +Knitting/v_Knitting_g12_c04 49 +Biking/v_Biking_g16_c05 10 +TennisSwing/v_TennisSwing_g12_c05 91 +TrampolineJumping/v_TrampolineJumping_g08_c03 93 +TennisSwing/v_TennisSwing_g21_c02 91 +YoYo/v_YoYo_g17_c05 100 +SkyDiving/v_SkyDiving_g08_c03 82 +LongJump/v_LongJump_g08_c01 50 +Drumming/v_Drumming_g17_c04 26 +HighJump/v_HighJump_g20_c02 39 +WalkingWithDog/v_WalkingWithDog_g08_c01 97 +ApplyLipstick/v_ApplyLipstick_g25_c04 1 +TaiChi/v_TaiChi_g10_c03 90 +TrampolineJumping/v_TrampolineJumping_g09_c01 93 +JugglingBalls/v_JugglingBalls_g15_c04 45 +JumpingJack/v_JumpingJack_g24_c04 46 +TaiChi/v_TaiChi_g19_c03 90 +BodyWeightSquats/v_BodyWeightSquats_g22_c04 14 +SumoWrestling/v_SumoWrestling_g18_c02 86 +FieldHockeyPenalty/v_FieldHockeyPenalty_g24_c04 28 +BabyCrawling/v_BabyCrawling_g20_c07 3 +Surfing/v_Surfing_g25_c03 87 +Fencing/v_Fencing_g24_c01 27 +PlayingDhol/v_PlayingDhol_g09_c05 60 +BandMarching/v_BandMarching_g19_c04 5 +RopeClimbing/v_RopeClimbing_g15_c02 74 +PlayingSitar/v_PlayingSitar_g09_c06 64 +Shotput/v_Shotput_g09_c07 78 +RockClimbingIndoor/v_RockClimbingIndoor_g15_c04 73 +PlayingDaf/v_PlayingDaf_g13_c01 59 +HighJump/v_HighJump_g16_c04 39 +PlayingSitar/v_PlayingSitar_g18_c04 64 +Skijet/v_Skijet_g20_c02 81 +HandstandWalking/v_HandstandWalking_g18_c06 37 +FrisbeeCatch/v_FrisbeeCatch_g14_c05 30 +ApplyLipstick/v_ApplyLipstick_g14_c03 1 +Billiards/v_Billiards_g15_c04 11 +ThrowDiscus/v_ThrowDiscus_g17_c03 92 +UnevenBars/v_UnevenBars_g13_c02 95 +BlowingCandles/v_BlowingCandles_g24_c01 13 +LongJump/v_LongJump_g10_c05 50 +CricketBowling/v_CricketBowling_g21_c01 22 +HammerThrow/v_HammerThrow_g15_c02 35 +Basketball/v_Basketball_g25_c03 7 +Mixing/v_Mixing_g09_c07 53 +PlayingSitar/v_PlayingSitar_g20_c02 64 +SoccerJuggling/v_SoccerJuggling_g18_c04 83 +JavelinThrow/v_JavelinThrow_g11_c02 44 +HorseRace/v_HorseRace_g16_c01 40 +CricketShot/v_CricketShot_g13_c04 23 +Diving/v_Diving_g16_c05 25 +BaseballPitch/v_BaseballPitch_g19_c02 6 +CliffDiving/v_CliffDiving_g10_c06 21 +PommelHorse/v_PommelHorse_g08_c04 68 +PullUps/v_PullUps_g23_c01 69 +BandMarching/v_BandMarching_g25_c02 5 +PlayingCello/v_PlayingCello_g24_c05 58 +VolleyballSpiking/v_VolleyballSpiking_g12_c03 96 +PlayingSitar/v_PlayingSitar_g10_c02 64 +WritingOnBoard/v_WritingOnBoard_g10_c06 99 +ApplyLipstick/v_ApplyLipstick_g24_c02 1 +Diving/v_Diving_g15_c04 25 +FloorGymnastics/v_FloorGymnastics_g24_c03 29 +BrushingTeeth/v_BrushingTeeth_g16_c04 19 +PoleVault/v_PoleVault_g11_c01 67 +IceDancing/v_IceDancing_g15_c01 43 +RopeClimbing/v_RopeClimbing_g24_c04 74 +BoxingPunchingBag/v_BoxingPunchingBag_g17_c04 16 +WallPushups/v_WallPushups_g23_c03 98 +CliffDiving/v_CliffDiving_g10_c05 21 +BalanceBeam/v_BalanceBeam_g12_c01 4 +Hammering/v_Hammering_g14_c07 34 +BaseballPitch/v_BaseballPitch_g09_c03 6 +FloorGymnastics/v_FloorGymnastics_g09_c07 29 +ApplyLipstick/v_ApplyLipstick_g21_c03 1 +HeadMassage/v_HeadMassage_g16_c01 38 +Kayaking/v_Kayaking_g19_c03 48 +PlayingFlute/v_PlayingFlute_g14_c02 61 +CricketBowling/v_CricketBowling_g16_c07 22 +GolfSwing/v_GolfSwing_g22_c04 32 +RockClimbingIndoor/v_RockClimbingIndoor_g09_c02 73 +Archery/v_Archery_g19_c04 2 +BoxingSpeedBag/v_BoxingSpeedBag_g14_c04 17 +PlayingPiano/v_PlayingPiano_g25_c01 63 +MoppingFloor/v_MoppingFloor_g21_c01 54 +Kayaking/v_Kayaking_g22_c05 48 +FrisbeeCatch/v_FrisbeeCatch_g10_c06 30 +Kayaking/v_Kayaking_g17_c03 48 +VolleyballSpiking/v_VolleyballSpiking_g10_c04 96 +PlayingTabla/v_PlayingTabla_g20_c01 65 +FrisbeeCatch/v_FrisbeeCatch_g15_c04 30 +PlayingFlute/v_PlayingFlute_g25_c03 61 +JumpingJack/v_JumpingJack_g20_c01 46 +LongJump/v_LongJump_g20_c02 50 +RockClimbingIndoor/v_RockClimbingIndoor_g14_c04 73 +CuttingInKitchen/v_CuttingInKitchen_g19_c02 24 +YoYo/v_YoYo_g13_c03 100 +Lunges/v_Lunges_g13_c01 51 +Diving/v_Diving_g12_c06 25 +BabyCrawling/v_BabyCrawling_g24_c01 3 +TableTennisShot/v_TableTennisShot_g25_c04 89 +ParallelBars/v_ParallelBars_g18_c01 56 +SkateBoarding/v_SkateBoarding_g25_c04 79 +WallPushups/v_WallPushups_g15_c02 98 +BaseballPitch/v_BaseballPitch_g08_c02 6 +Swing/v_Swing_g24_c03 88 +PoleVault/v_PoleVault_g17_c07 67 +BlowingCandles/v_BlowingCandles_g20_c02 13 +Rowing/v_Rowing_g25_c03 75 +PommelHorse/v_PommelHorse_g13_c01 68 +Rowing/v_Rowing_g16_c03 75 +RopeClimbing/v_RopeClimbing_g22_c02 74 +BaseballPitch/v_BaseballPitch_g08_c03 6 +BasketballDunk/v_BasketballDunk_g14_c03 8 +CuttingInKitchen/v_CuttingInKitchen_g18_c04 24 +Lunges/v_Lunges_g22_c03 51 +IceDancing/v_IceDancing_g17_c05 43 +Shotput/v_Shotput_g12_c06 78 +RopeClimbing/v_RopeClimbing_g10_c02 74 +Surfing/v_Surfing_g23_c02 87 +ThrowDiscus/v_ThrowDiscus_g19_c01 92 +HandstandPushups/v_HandStandPushups_g22_c02 36 +HighJump/v_HighJump_g23_c02 39 +PlayingFlute/v_PlayingFlute_g19_c01 61 +WritingOnBoard/v_WritingOnBoard_g10_c01 99 +PlayingCello/v_PlayingCello_g20_c05 58 +ThrowDiscus/v_ThrowDiscus_g11_c02 92 +SoccerJuggling/v_SoccerJuggling_g10_c04 83 +WritingOnBoard/v_WritingOnBoard_g23_c05 99 +Biking/v_Biking_g09_c01 10 +ParallelBars/v_ParallelBars_g09_c03 56 +PushUps/v_PushUps_g11_c04 71 +PlayingDhol/v_PlayingDhol_g17_c03 60 +Bowling/v_Bowling_g19_c07 15 +SkyDiving/v_SkyDiving_g09_c03 82 +PlayingSitar/v_PlayingSitar_g11_c06 64 +Hammering/v_Hammering_g10_c07 34 +WritingOnBoard/v_WritingOnBoard_g19_c04 99 +PlayingSitar/v_PlayingSitar_g16_c06 64 +ApplyEyeMakeup/v_ApplyEyeMakeup_g22_c01 0 +BasketballDunk/v_BasketballDunk_g20_c04 8 +TableTennisShot/v_TableTennisShot_g15_c04 89 +Fencing/v_Fencing_g14_c03 27 +Basketball/v_Basketball_g21_c06 7 +BaseballPitch/v_BaseballPitch_g13_c02 6 +MoppingFloor/v_MoppingFloor_g22_c02 54 +ShavingBeard/v_ShavingBeard_g08_c02 77 +Shotput/v_Shotput_g09_c05 78 +Kayaking/v_Kayaking_g17_c06 48 +PlayingGuitar/v_PlayingGuitar_g15_c06 62 +BlowingCandles/v_BlowingCandles_g08_c04 13 +CuttingInKitchen/v_CuttingInKitchen_g14_c03 24 +Punch/v_Punch_g16_c04 70 +PlayingViolin/v_PlayingViolin_g11_c04 66 +PlayingDhol/v_PlayingDhol_g12_c04 60 +CuttingInKitchen/v_CuttingInKitchen_g25_c02 24 +CleanAndJerk/v_CleanAndJerk_g20_c04 20 +CleanAndJerk/v_CleanAndJerk_g08_c03 20 +BaseballPitch/v_BaseballPitch_g22_c06 6 +JugglingBalls/v_JugglingBalls_g08_c05 45 +FrontCrawl/v_FrontCrawl_g18_c03 31 +HorseRace/v_HorseRace_g17_c02 40 +Bowling/v_Bowling_g15_c02 15 +Billiards/v_Billiards_g17_c05 11 +WritingOnBoard/v_WritingOnBoard_g20_c06 99 +Basketball/v_Basketball_g18_c02 7 +PommelHorse/v_PommelHorse_g13_c04 68 +GolfSwing/v_GolfSwing_g17_c02 32 +Biking/v_Biking_g11_c01 10 +BaseballPitch/v_BaseballPitch_g23_c06 6 +Diving/v_Diving_g13_c06 25 +WallPushups/v_WallPushups_g25_c04 98 +BasketballDunk/v_BasketballDunk_g18_c06 8 +FrisbeeCatch/v_FrisbeeCatch_g22_c01 30 +CricketShot/v_CricketShot_g11_c02 23 +CleanAndJerk/v_CleanAndJerk_g08_c02 20 +Rafting/v_Rafting_g16_c07 72 +RopeClimbing/v_RopeClimbing_g08_c03 74 +SalsaSpin/v_SalsaSpin_g17_c04 76 +Basketball/v_Basketball_g09_c02 7 +TennisSwing/v_TennisSwing_g13_c05 91 +BlowDryHair/v_BlowDryHair_g12_c04 12 +JumpRope/v_JumpRope_g23_c04 47 +Punch/v_Punch_g18_c07 70 +BasketballDunk/v_BasketballDunk_g13_c03 8 +PlayingDhol/v_PlayingDhol_g18_c06 60 +RopeClimbing/v_RopeClimbing_g16_c02 74 +HorseRiding/v_HorseRiding_g24_c03 41 +JavelinThrow/v_JavelinThrow_g09_c01 44 +PlayingPiano/v_PlayingPiano_g14_c04 63 +Billiards/v_Billiards_g25_c05 11 +SalsaSpin/v_SalsaSpin_g21_c04 76 +Basketball/v_Basketball_g21_c03 7 +Haircut/v_Haircut_g13_c02 33 +RockClimbingIndoor/v_RockClimbingIndoor_g22_c02 73 +SumoWrestling/v_SumoWrestling_g09_c01 86 +ShavingBeard/v_ShavingBeard_g25_c05 77 +JumpRope/v_JumpRope_g14_c02 47 +UnevenBars/v_UnevenBars_g21_c03 95 +HeadMassage/v_HeadMassage_g19_c05 38 +ApplyLipstick/v_ApplyLipstick_g23_c03 1 +HighJump/v_HighJump_g13_c01 39 +JugglingBalls/v_JugglingBalls_g20_c01 45 +BoxingSpeedBag/v_BoxingSpeedBag_g19_c01 17 +SalsaSpin/v_SalsaSpin_g15_c01 76 +Billiards/v_Billiards_g15_c07 11 +Skiing/v_Skiing_g19_c03 80 +GolfSwing/v_GolfSwing_g22_c03 32 +Diving/v_Diving_g21_c04 25 +StillRings/v_StillRings_g17_c03 85 +PlayingGuitar/v_PlayingGuitar_g17_c06 62 +PlayingCello/v_PlayingCello_g12_c03 58 +WallPushups/v_WallPushups_g15_c03 98 +HorseRiding/v_HorseRiding_g14_c01 41 +FrisbeeCatch/v_FrisbeeCatch_g18_c04 30 +Basketball/v_Basketball_g18_c04 7 +Swing/v_Swing_g17_c01 88 +WritingOnBoard/v_WritingOnBoard_g11_c03 99 +Diving/v_Diving_g17_c01 25 +Shotput/v_Shotput_g10_c05 78 +Billiards/v_Billiards_g12_c03 11 +PlayingTabla/v_PlayingTabla_g22_c04 65 +TableTennisShot/v_TableTennisShot_g22_c05 89 +Skijet/v_Skijet_g10_c03 81 +LongJump/v_LongJump_g14_c01 50 +YoYo/v_YoYo_g15_c04 100 +HandstandWalking/v_HandstandWalking_g08_c04 37 +MilitaryParade/v_MilitaryParade_g15_c04 52 +ParallelBars/v_ParallelBars_g20_c01 56 +PlayingCello/v_PlayingCello_g14_c01 58 +FloorGymnastics/v_FloorGymnastics_g23_c04 29 +WallPushups/v_WallPushups_g11_c05 98 +StillRings/v_StillRings_g12_c04 85 +HorseRiding/v_HorseRiding_g24_c05 41 +RopeClimbing/v_RopeClimbing_g14_c05 74 +IceDancing/v_IceDancing_g24_c06 43 +ApplyLipstick/v_ApplyLipstick_g14_c04 1 +Mixing/v_Mixing_g13_c02 53 +Mixing/v_Mixing_g09_c02 53 +Mixing/v_Mixing_g15_c03 53 +TennisSwing/v_TennisSwing_g23_c01 91 +BaseballPitch/v_BaseballPitch_g10_c03 6 +Hammering/v_Hammering_g21_c05 34 +Lunges/v_Lunges_g19_c02 51 +HorseRace/v_HorseRace_g09_c01 40 +Knitting/v_Knitting_g18_c03 49 +PlayingTabla/v_PlayingTabla_g13_c03 65 +Rowing/v_Rowing_g11_c04 75 +Lunges/v_Lunges_g20_c04 51 +FrontCrawl/v_FrontCrawl_g10_c05 31 +Archery/v_Archery_g09_c07 2 +BalanceBeam/v_BalanceBeam_g11_c02 4 +BabyCrawling/v_BabyCrawling_g10_c02 3 +HighJump/v_HighJump_g08_c03 39 +CliffDiving/v_CliffDiving_g09_c01 21 +Punch/v_Punch_g17_c06 70 +Bowling/v_Bowling_g08_c06 15 +ThrowDiscus/v_ThrowDiscus_g23_c04 92 +Shotput/v_Shotput_g22_c04 78 +Punch/v_Punch_g09_c06 70 +CliffDiving/v_CliffDiving_g18_c07 21 +CliffDiving/v_CliffDiving_g11_c05 21 +SkyDiving/v_SkyDiving_g18_c06 82 +FieldHockeyPenalty/v_FieldHockeyPenalty_g10_c02 28 +JavelinThrow/v_JavelinThrow_g09_c04 44 +Kayaking/v_Kayaking_g20_c05 48 +Kayaking/v_Kayaking_g15_c01 48 +MilitaryParade/v_MilitaryParade_g25_c07 52 +PoleVault/v_PoleVault_g24_c06 67 +JugglingBalls/v_JugglingBalls_g20_c04 45 +Typing/v_Typing_g11_c07 94 +SoccerPenalty/v_SoccerPenalty_g13_c05 84 +Drumming/v_Drumming_g08_c05 26 +BlowDryHair/v_BlowDryHair_g19_c03 12 +MilitaryParade/v_MilitaryParade_g24_c02 52 +BodyWeightSquats/v_BodyWeightSquats_g11_c04 14 +ThrowDiscus/v_ThrowDiscus_g24_c05 92 +PlayingSitar/v_PlayingSitar_g14_c02 64 +Biking/v_Biking_g13_c04 10 +HorseRiding/v_HorseRiding_g08_c04 41 +IceDancing/v_IceDancing_g19_c04 43 +MoppingFloor/v_MoppingFloor_g17_c05 54 +PommelHorse/v_PommelHorse_g11_c01 68 +PommelHorse/v_PommelHorse_g24_c04 68 +SkateBoarding/v_SkateBoarding_g14_c01 79 +Shotput/v_Shotput_g19_c01 78 +HighJump/v_HighJump_g12_c01 39 +FieldHockeyPenalty/v_FieldHockeyPenalty_g09_c04 28 +Skiing/v_Skiing_g16_c02 80 +HandstandPushups/v_HandStandPushups_g12_c03 36 +BabyCrawling/v_BabyCrawling_g16_c01 3 +BasketballDunk/v_BasketballDunk_g22_c03 8 +Rowing/v_Rowing_g12_c04 75 +PlayingSitar/v_PlayingSitar_g11_c03 64 +Punch/v_Punch_g18_c06 70 +PlayingPiano/v_PlayingPiano_g11_c04 63 +BrushingTeeth/v_BrushingTeeth_g25_c04 19 +Nunchucks/v_Nunchucks_g15_c04 55 +ShavingBeard/v_ShavingBeard_g24_c05 77 +WalkingWithDog/v_WalkingWithDog_g12_c02 97 +JugglingBalls/v_JugglingBalls_g13_c01 45 +PlayingDaf/v_PlayingDaf_g23_c03 59 +BodyWeightSquats/v_BodyWeightSquats_g13_c04 14 +JumpingJack/v_JumpingJack_g21_c03 46 +Skiing/v_Skiing_g15_c02 80 +HighJump/v_HighJump_g16_c02 39 +HandstandWalking/v_HandstandWalking_g11_c02 37 +PlayingGuitar/v_PlayingGuitar_g13_c05 62 +JumpRope/v_JumpRope_g15_c03 47 +WritingOnBoard/v_WritingOnBoard_g24_c07 99 +TableTennisShot/v_TableTennisShot_g08_c01 89 +Shotput/v_Shotput_g18_c04 78 +WallPushups/v_WallPushups_g20_c05 98 +BaseballPitch/v_BaseballPitch_g24_c03 6 +Nunchucks/v_Nunchucks_g11_c04 55 +PlayingDhol/v_PlayingDhol_g08_c01 60 +CleanAndJerk/v_CleanAndJerk_g12_c01 20 +PommelHorse/v_PommelHorse_g17_c06 68 +GolfSwing/v_GolfSwing_g11_c06 32 +SoccerJuggling/v_SoccerJuggling_g20_c02 83 +Skijet/v_Skijet_g25_c01 81 +CuttingInKitchen/v_CuttingInKitchen_g08_c04 24 +HorseRiding/v_HorseRiding_g22_c03 41 +Biking/v_Biking_g10_c05 10 +HighJump/v_HighJump_g25_c05 39 +SalsaSpin/v_SalsaSpin_g08_c02 76 +PlayingDaf/v_PlayingDaf_g25_c06 59 +BodyWeightSquats/v_BodyWeightSquats_g20_c05 14 +PushUps/v_PushUps_g12_c03 71 +TaiChi/v_TaiChi_g08_c03 90 +PlayingDhol/v_PlayingDhol_g10_c04 60 +Punch/v_Punch_g11_c07 70 +Diving/v_Diving_g24_c03 25 +RopeClimbing/v_RopeClimbing_g13_c06 74 +JumpRope/v_JumpRope_g21_c07 47 +Lunges/v_Lunges_g09_c01 51 +JumpRope/v_JumpRope_g08_c03 47 +Swing/v_Swing_g20_c06 88 +VolleyballSpiking/v_VolleyballSpiking_g24_c04 96 +Bowling/v_Bowling_g17_c04 15 +Swing/v_Swing_g11_c03 88 +RopeClimbing/v_RopeClimbing_g10_c01 74 +PizzaTossing/v_PizzaTossing_g23_c07 57 +PlayingDhol/v_PlayingDhol_g09_c06 60 +Rowing/v_Rowing_g17_c04 75 +CuttingInKitchen/v_CuttingInKitchen_g16_c03 24 +Kayaking/v_Kayaking_g15_c04 48 +SoccerJuggling/v_SoccerJuggling_g22_c01 83 +Basketball/v_Basketball_g20_c02 7 +Skiing/v_Skiing_g16_c03 80 +HighJump/v_HighJump_g09_c04 39 +Biking/v_Biking_g14_c02 10 +FrontCrawl/v_FrontCrawl_g20_c02 31 +SoccerJuggling/v_SoccerJuggling_g19_c05 83 +SoccerPenalty/v_SoccerPenalty_g23_c01 84 +WallPushups/v_WallPushups_g11_c04 98 +BabyCrawling/v_BabyCrawling_g10_c03 3 +BrushingTeeth/v_BrushingTeeth_g09_c04 19 +WritingOnBoard/v_WritingOnBoard_g10_c05 99 +FieldHockeyPenalty/v_FieldHockeyPenalty_g11_c03 28 +ParallelBars/v_ParallelBars_g16_c04 56 +PlayingSitar/v_PlayingSitar_g12_c02 64 +JugglingBalls/v_JugglingBalls_g10_c02 45 +MilitaryParade/v_MilitaryParade_g19_c01 52 +Basketball/v_Basketball_g14_c02 7 +ApplyEyeMakeup/v_ApplyEyeMakeup_g16_c03 0 +ApplyLipstick/v_ApplyLipstick_g19_c01 1 +JumpingJack/v_JumpingJack_g23_c04 46 +JumpRope/v_JumpRope_g11_c02 47 +CricketBowling/v_CricketBowling_g22_c07 22 +Knitting/v_Knitting_g09_c03 49 +FloorGymnastics/v_FloorGymnastics_g22_c04 29 +BoxingPunchingBag/v_BoxingPunchingBag_g12_c03 16 +TaiChi/v_TaiChi_g23_c04 90 +SkyDiving/v_SkyDiving_g25_c01 82 +ShavingBeard/v_ShavingBeard_g13_c06 77 +BoxingPunchingBag/v_BoxingPunchingBag_g09_c07 16 +CleanAndJerk/v_CleanAndJerk_g24_c01 20 +HorseRace/v_HorseRace_g10_c02 40 +IceDancing/v_IceDancing_g14_c01 43 +FrontCrawl/v_FrontCrawl_g17_c03 31 +WallPushups/v_WallPushups_g14_c05 98 +SkyDiving/v_SkyDiving_g23_c04 82 +Hammering/v_Hammering_g15_c06 34 +WritingOnBoard/v_WritingOnBoard_g21_c04 99 +PlayingDhol/v_PlayingDhol_g17_c02 60 +JumpRope/v_JumpRope_g11_c07 47 +JumpRope/v_JumpRope_g10_c02 47 +Billiards/v_Billiards_g24_c01 11 +PushUps/v_PushUps_g23_c01 71 +JavelinThrow/v_JavelinThrow_g13_c01 44 +SoccerJuggling/v_SoccerJuggling_g21_c03 83 +PushUps/v_PushUps_g09_c02 71 +RopeClimbing/v_RopeClimbing_g24_c01 74 +BandMarching/v_BandMarching_g14_c04 5 +SoccerJuggling/v_SoccerJuggling_g20_c03 83 +BenchPress/v_BenchPress_g19_c03 9 +CricketBowling/v_CricketBowling_g19_c02 22 +BasketballDunk/v_BasketballDunk_g20_c01 8 +Knitting/v_Knitting_g20_c02 49 +TrampolineJumping/v_TrampolineJumping_g14_c02 93 +MoppingFloor/v_MoppingFloor_g23_c02 54 +HeadMassage/v_HeadMassage_g22_c03 38 +BlowingCandles/v_BlowingCandles_g17_c03 13 +JumpingJack/v_JumpingJack_g08_c04 46 +BoxingSpeedBag/v_BoxingSpeedBag_g08_c02 17 +PlayingDaf/v_PlayingDaf_g21_c04 59 +ApplyLipstick/v_ApplyLipstick_g16_c04 1 +BodyWeightSquats/v_BodyWeightSquats_g17_c01 14 +SkateBoarding/v_SkateBoarding_g09_c01 79 +SoccerPenalty/v_SoccerPenalty_g11_c02 84 +Rowing/v_Rowing_g12_c02 75 +PizzaTossing/v_PizzaTossing_g11_c05 57 +PlayingDhol/v_PlayingDhol_g19_c06 60 +PoleVault/v_PoleVault_g23_c01 67 +YoYo/v_YoYo_g20_c04 100 +HighJump/v_HighJump_g24_c02 39 +PlayingViolin/v_PlayingViolin_g22_c03 66 +Archery/v_Archery_g22_c03 2 +BabyCrawling/v_BabyCrawling_g11_c04 3 +MoppingFloor/v_MoppingFloor_g18_c04 54 +PlayingPiano/v_PlayingPiano_g20_c04 63 +Lunges/v_Lunges_g25_c01 51 +BenchPress/v_BenchPress_g12_c06 9 +Drumming/v_Drumming_g13_c01 26 +RockClimbingIndoor/v_RockClimbingIndoor_g24_c02 73 +CliffDiving/v_CliffDiving_g18_c03 21 +HandstandWalking/v_HandstandWalking_g11_c04 37 +LongJump/v_LongJump_g16_c04 50 +BaseballPitch/v_BaseballPitch_g16_c04 6 +BlowDryHair/v_BlowDryHair_g13_c01 12 +BoxingSpeedBag/v_BoxingSpeedBag_g10_c01 17 +PlayingFlute/v_PlayingFlute_g12_c04 61 +ShavingBeard/v_ShavingBeard_g18_c02 77 +Kayaking/v_Kayaking_g22_c06 48 +JumpingJack/v_JumpingJack_g22_c02 46 +HandstandPushups/v_HandStandPushups_g13_c02 36 +SumoWrestling/v_SumoWrestling_g22_c04 86 +RockClimbingIndoor/v_RockClimbingIndoor_g20_c04 73 +SoccerJuggling/v_SoccerJuggling_g21_c01 83 +Hammering/v_Hammering_g22_c02 34 +FrontCrawl/v_FrontCrawl_g18_c02 31 +RockClimbingIndoor/v_RockClimbingIndoor_g10_c07 73 +RockClimbingIndoor/v_RockClimbingIndoor_g25_c07 73 +Biking/v_Biking_g20_c01 10 +ApplyEyeMakeup/v_ApplyEyeMakeup_g11_c05 0 +SalsaSpin/v_SalsaSpin_g08_c01 76 +GolfSwing/v_GolfSwing_g24_c05 32 +Skijet/v_Skijet_g25_c03 81 +PlayingCello/v_PlayingCello_g21_c02 58 +ApplyEyeMakeup/v_ApplyEyeMakeup_g13_c06 0 +PlayingSitar/v_PlayingSitar_g24_c07 64 +StillRings/v_StillRings_g24_c01 85 +Punch/v_Punch_g23_c06 70 +SalsaSpin/v_SalsaSpin_g10_c05 76 +RopeClimbing/v_RopeClimbing_g20_c02 74 +SoccerPenalty/v_SoccerPenalty_g14_c06 84 +FrisbeeCatch/v_FrisbeeCatch_g16_c06 30 +BalanceBeam/v_BalanceBeam_g14_c04 4 +WalkingWithDog/v_WalkingWithDog_g09_c02 97 +TennisSwing/v_TennisSwing_g14_c01 91 +Swing/v_Swing_g23_c03 88 +TennisSwing/v_TennisSwing_g14_c07 91 +Drumming/v_Drumming_g10_c04 26 +Punch/v_Punch_g21_c01 70 +PlayingDhol/v_PlayingDhol_g10_c02 60 +Bowling/v_Bowling_g18_c05 15 +LongJump/v_LongJump_g21_c03 50 +PlayingDhol/v_PlayingDhol_g19_c07 60 +SumoWrestling/v_SumoWrestling_g19_c03 86 +RopeClimbing/v_RopeClimbing_g15_c03 74 +ApplyEyeMakeup/v_ApplyEyeMakeup_g13_c03 0 +FieldHockeyPenalty/v_FieldHockeyPenalty_g22_c03 28 +PizzaTossing/v_PizzaTossing_g15_c03 57 +RopeClimbing/v_RopeClimbing_g13_c03 74 +SoccerJuggling/v_SoccerJuggling_g16_c03 83 +CricketShot/v_CricketShot_g11_c06 23 +JumpRope/v_JumpRope_g21_c03 47 +PlayingCello/v_PlayingCello_g08_c04 58 +PlayingDaf/v_PlayingDaf_g11_c06 59 +BrushingTeeth/v_BrushingTeeth_g16_c02 19 +SalsaSpin/v_SalsaSpin_g15_c05 76 +Hammering/v_Hammering_g12_c03 34 +HandstandPushups/v_HandStandPushups_g17_c05 36 +Punch/v_Punch_g10_c02 70 +PushUps/v_PushUps_g15_c04 71 +TableTennisShot/v_TableTennisShot_g14_c02 89 +TableTennisShot/v_TableTennisShot_g25_c03 89 +HorseRiding/v_HorseRiding_g25_c05 41 +SalsaSpin/v_SalsaSpin_g10_c02 76 +FloorGymnastics/v_FloorGymnastics_g15_c03 29 +SkateBoarding/v_SkateBoarding_g22_c02 79 +VolleyballSpiking/v_VolleyballSpiking_g22_c03 96 +Diving/v_Diving_g09_c01 25 +Punch/v_Punch_g20_c02 70 +SoccerJuggling/v_SoccerJuggling_g15_c04 83 +Hammering/v_Hammering_g22_c05 34 +JumpRope/v_JumpRope_g21_c05 47 +Skijet/v_Skijet_g19_c04 81 +SumoWrestling/v_SumoWrestling_g14_c04 86 +YoYo/v_YoYo_g25_c01 100 +Fencing/v_Fencing_g13_c03 27 +BalanceBeam/v_BalanceBeam_g12_c04 4 +FieldHockeyPenalty/v_FieldHockeyPenalty_g16_c02 28 +RopeClimbing/v_RopeClimbing_g09_c03 74 +Hammering/v_Hammering_g22_c01 34 +Basketball/v_Basketball_g24_c04 7 +WritingOnBoard/v_WritingOnBoard_g11_c05 99 +CliffDiving/v_CliffDiving_g22_c01 21 +Basketball/v_Basketball_g23_c06 7 +BabyCrawling/v_BabyCrawling_g20_c04 3 +CliffDiving/v_CliffDiving_g15_c02 21 +Fencing/v_Fencing_g22_c03 27 +JumpingJack/v_JumpingJack_g16_c01 46 +BoxingPunchingBag/v_BoxingPunchingBag_g14_c01 16 +Drumming/v_Drumming_g19_c04 26 +RockClimbingIndoor/v_RockClimbingIndoor_g15_c01 73 +TableTennisShot/v_TableTennisShot_g09_c03 89 +PlayingViolin/v_PlayingViolin_g24_c03 66 +YoYo/v_YoYo_g11_c01 100 +Shotput/v_Shotput_g17_c04 78 +SoccerPenalty/v_SoccerPenalty_g15_c06 84 +UnevenBars/v_UnevenBars_g20_c03 95 +PushUps/v_PushUps_g15_c02 71 +PlayingViolin/v_PlayingViolin_g22_c04 66 +JumpingJack/v_JumpingJack_g14_c01 46 +GolfSwing/v_GolfSwing_g12_c02 32 +Basketball/v_Basketball_g17_c03 7 +FrontCrawl/v_FrontCrawl_g16_c04 31 +BodyWeightSquats/v_BodyWeightSquats_g25_c04 14 +HorseRiding/v_HorseRiding_g20_c01 41 +BoxingSpeedBag/v_BoxingSpeedBag_g19_c04 17 +PoleVault/v_PoleVault_g20_c01 67 +PushUps/v_PushUps_g11_c01 71 +BrushingTeeth/v_BrushingTeeth_g24_c04 19 +CuttingInKitchen/v_CuttingInKitchen_g10_c01 24 +BenchPress/v_BenchPress_g24_c07 9 +PlayingFlute/v_PlayingFlute_g18_c03 61 +HorseRace/v_HorseRace_g16_c05 40 +BasketballDunk/v_BasketballDunk_g21_c02 8 +ApplyEyeMakeup/v_ApplyEyeMakeup_g15_c05 0 +PlayingDhol/v_PlayingDhol_g25_c01 60 +TableTennisShot/v_TableTennisShot_g22_c07 89 +FieldHockeyPenalty/v_FieldHockeyPenalty_g15_c01 28 +Swing/v_Swing_g20_c03 88 +PullUps/v_PullUps_g10_c02 69 +WritingOnBoard/v_WritingOnBoard_g16_c01 99 +Punch/v_Punch_g13_c01 70 +Surfing/v_Surfing_g16_c04 87 +PlayingSitar/v_PlayingSitar_g16_c05 64 +Diving/v_Diving_g20_c05 25 +BoxingSpeedBag/v_BoxingSpeedBag_g09_c02 17 +HandstandPushups/v_HandStandPushups_g20_c05 36 +Lunges/v_Lunges_g14_c03 51 +PlayingPiano/v_PlayingPiano_g15_c03 63 +WalkingWithDog/v_WalkingWithDog_g10_c03 97 +Knitting/v_Knitting_g25_c01 49 +Lunges/v_Lunges_g23_c01 51 +CleanAndJerk/v_CleanAndJerk_g17_c03 20 +Basketball/v_Basketball_g15_c07 7 +LongJump/v_LongJump_g10_c02 50 +CliffDiving/v_CliffDiving_g23_c03 21 +Punch/v_Punch_g13_c06 70 +SoccerJuggling/v_SoccerJuggling_g20_c04 83 +Fencing/v_Fencing_g19_c01 27 +ShavingBeard/v_ShavingBeard_g23_c07 77 +WalkingWithDog/v_WalkingWithDog_g17_c01 97 +Typing/v_Typing_g17_c01 94 +Shotput/v_Shotput_g24_c01 78 +BodyWeightSquats/v_BodyWeightSquats_g21_c04 14 +Skiing/v_Skiing_g11_c04 80 +Drumming/v_Drumming_g19_c02 26 +BalanceBeam/v_BalanceBeam_g19_c04 4 +FieldHockeyPenalty/v_FieldHockeyPenalty_g13_c02 28 +Punch/v_Punch_g11_c04 70 +MoppingFloor/v_MoppingFloor_g10_c02 54 +CleanAndJerk/v_CleanAndJerk_g12_c06 20 +Mixing/v_Mixing_g11_c02 53 +BabyCrawling/v_BabyCrawling_g23_c04 3 +PoleVault/v_PoleVault_g21_c04 67 +Swing/v_Swing_g11_c02 88 +Rafting/v_Rafting_g13_c02 72 +PlayingFlute/v_PlayingFlute_g08_c05 61 +JugglingBalls/v_JugglingBalls_g14_c01 45 +BabyCrawling/v_BabyCrawling_g18_c06 3 +ThrowDiscus/v_ThrowDiscus_g09_c02 92 +BenchPress/v_BenchPress_g08_c01 9 +StillRings/v_StillRings_g18_c01 85 +ParallelBars/v_ParallelBars_g09_c06 56 +TennisSwing/v_TennisSwing_g12_c04 91 +FieldHockeyPenalty/v_FieldHockeyPenalty_g10_c04 28 +CricketShot/v_CricketShot_g24_c02 23 +HorseRace/v_HorseRace_g10_c04 40 +ShavingBeard/v_ShavingBeard_g10_c03 77 +IceDancing/v_IceDancing_g23_c02 43 +FrisbeeCatch/v_FrisbeeCatch_g22_c03 30 +Billiards/v_Billiards_g15_c02 11 +BoxingPunchingBag/v_BoxingPunchingBag_g12_c05 16 +PlayingDaf/v_PlayingDaf_g13_c03 59 +SkateBoarding/v_SkateBoarding_g10_c02 79 +BlowDryHair/v_BlowDryHair_g10_c05 12 +Billiards/v_Billiards_g13_c01 11 +HammerThrow/v_HammerThrow_g15_c01 35 +FrisbeeCatch/v_FrisbeeCatch_g20_c05 30 +PlayingSitar/v_PlayingSitar_g20_c06 64 +ApplyEyeMakeup/v_ApplyEyeMakeup_g25_c05 0 +Knitting/v_Knitting_g12_c03 49 +Diving/v_Diving_g10_c05 25 +IceDancing/v_IceDancing_g22_c01 43 +BlowDryHair/v_BlowDryHair_g08_c01 12 +FrisbeeCatch/v_FrisbeeCatch_g14_c03 30 +Rafting/v_Rafting_g24_c02 72 +BenchPress/v_BenchPress_g17_c06 9 +Skiing/v_Skiing_g20_c03 80 +HorseRiding/v_HorseRiding_g22_c01 41 +HandstandPushups/v_HandStandPushups_g17_c04 36 +PoleVault/v_PoleVault_g18_c07 67 +BabyCrawling/v_BabyCrawling_g25_c06 3 +JugglingBalls/v_JugglingBalls_g11_c02 45 +PlayingDhol/v_PlayingDhol_g18_c01 60 +PullUps/v_PullUps_g25_c03 69 +FloorGymnastics/v_FloorGymnastics_g19_c01 29 +Bowling/v_Bowling_g20_c05 15 +Biking/v_Biking_g21_c04 10 +Billiards/v_Billiards_g08_c05 11 +BrushingTeeth/v_BrushingTeeth_g14_c05 19 +Rowing/v_Rowing_g13_c03 75 +Rowing/v_Rowing_g22_c02 75 +FloorGymnastics/v_FloorGymnastics_g13_c01 29 +RopeClimbing/v_RopeClimbing_g17_c01 74 +SkyDiving/v_SkyDiving_g19_c01 82 +LongJump/v_LongJump_g09_c01 50 +Knitting/v_Knitting_g19_c03 49 +BalanceBeam/v_BalanceBeam_g17_c04 4 +PlayingFlute/v_PlayingFlute_g08_c04 61 +PommelHorse/v_PommelHorse_g17_c05 68 +BlowingCandles/v_BlowingCandles_g14_c03 13 +FrontCrawl/v_FrontCrawl_g24_c03 31 +HammerThrow/v_HammerThrow_g21_c01 35 +LongJump/v_LongJump_g18_c04 50 +ApplyEyeMakeup/v_ApplyEyeMakeup_g20_c05 0 +BandMarching/v_BandMarching_g16_c03 5 +BabyCrawling/v_BabyCrawling_g12_c05 3 +PlayingGuitar/v_PlayingGuitar_g15_c05 62 +YoYo/v_YoYo_g19_c05 100 +Bowling/v_Bowling_g21_c06 15 +WallPushups/v_WallPushups_g09_c02 98 +TennisSwing/v_TennisSwing_g12_c06 91 +JavelinThrow/v_JavelinThrow_g25_c01 44 +Lunges/v_Lunges_g19_c04 51 +BlowDryHair/v_BlowDryHair_g20_c02 12 +CliffDiving/v_CliffDiving_g10_c01 21 +Diving/v_Diving_g10_c03 25 +MoppingFloor/v_MoppingFloor_g24_c04 54 +CuttingInKitchen/v_CuttingInKitchen_g08_c01 24 +FrontCrawl/v_FrontCrawl_g20_c07 31 +BabyCrawling/v_BabyCrawling_g14_c01 3 +ShavingBeard/v_ShavingBeard_g10_c04 77 +SkyDiving/v_SkyDiving_g24_c03 82 +TennisSwing/v_TennisSwing_g16_c06 91 +Nunchucks/v_Nunchucks_g13_c07 55 +PlayingCello/v_PlayingCello_g17_c05 58 +BreastStroke/v_BreastStroke_g14_c01 18 +Rafting/v_Rafting_g19_c04 72 +BlowingCandles/v_BlowingCandles_g15_c03 13 +PlayingCello/v_PlayingCello_g20_c06 58 +BlowDryHair/v_BlowDryHair_g23_c05 12 +PlayingCello/v_PlayingCello_g08_c05 58 +PlayingTabla/v_PlayingTabla_g14_c01 65 +HammerThrow/v_HammerThrow_g25_c03 35 +CricketShot/v_CricketShot_g09_c04 23 +ApplyEyeMakeup/v_ApplyEyeMakeup_g14_c02 0 +FrontCrawl/v_FrontCrawl_g25_c01 31 +PlayingSitar/v_PlayingSitar_g15_c01 64 +Rafting/v_Rafting_g22_c04 72 +BasketballDunk/v_BasketballDunk_g25_c02 8 +PullUps/v_PullUps_g15_c01 69 +PlayingGuitar/v_PlayingGuitar_g10_c07 62 +PlayingViolin/v_PlayingViolin_g13_c01 66 +PullUps/v_PullUps_g22_c04 69 +Fencing/v_Fencing_g13_c02 27 +PommelHorse/v_PommelHorse_g17_c04 68 +MoppingFloor/v_MoppingFloor_g10_c03 54 +BasketballDunk/v_BasketballDunk_g12_c03 8 +Haircut/v_Haircut_g09_c03 33 +PizzaTossing/v_PizzaTossing_g20_c01 57 +Billiards/v_Billiards_g21_c01 11 +PlayingDaf/v_PlayingDaf_g18_c05 59 +BabyCrawling/v_BabyCrawling_g16_c04 3 +PlayingDaf/v_PlayingDaf_g24_c02 59 +SoccerJuggling/v_SoccerJuggling_g22_c03 83 +Biking/v_Biking_g17_c02 10 +Shotput/v_Shotput_g18_c03 78 +JavelinThrow/v_JavelinThrow_g11_c03 44 +Diving/v_Diving_g11_c02 25 +Rowing/v_Rowing_g22_c04 75 +ApplyEyeMakeup/v_ApplyEyeMakeup_g15_c01 0 +WritingOnBoard/v_WritingOnBoard_g15_c05 99 +Basketball/v_Basketball_g15_c05 7 +PushUps/v_PushUps_g24_c03 71 +PlayingDaf/v_PlayingDaf_g18_c03 59 +Kayaking/v_Kayaking_g23_c03 48 +Swing/v_Swing_g09_c05 88 +PlayingTabla/v_PlayingTabla_g23_c04 65 +BalanceBeam/v_BalanceBeam_g20_c01 4 +TableTennisShot/v_TableTennisShot_g20_c07 89 +Kayaking/v_Kayaking_g21_c03 48 +PlayingCello/v_PlayingCello_g18_c01 58 +Rafting/v_Rafting_g11_c01 72 +HeadMassage/v_HeadMassage_g24_c03 38 +Shotput/v_Shotput_g17_c03 78 +StillRings/v_StillRings_g16_c02 85 +FloorGymnastics/v_FloorGymnastics_g13_c02 29 +BlowingCandles/v_BlowingCandles_g13_c04 13 +Kayaking/v_Kayaking_g23_c02 48 +PlayingDaf/v_PlayingDaf_g12_c05 59 +Drumming/v_Drumming_g16_c07 26 +ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c07 0 +ShavingBeard/v_ShavingBeard_g19_c05 77 +Bowling/v_Bowling_g22_c05 15 +FrontCrawl/v_FrontCrawl_g11_c02 31 +Shotput/v_Shotput_g17_c01 78 +BoxingPunchingBag/v_BoxingPunchingBag_g19_c01 16 +GolfSwing/v_GolfSwing_g25_c05 32 +RockClimbingIndoor/v_RockClimbingIndoor_g15_c03 73 +FrisbeeCatch/v_FrisbeeCatch_g08_c04 30 +SkyDiving/v_SkyDiving_g12_c03 82 +HammerThrow/v_HammerThrow_g22_c06 35 +CleanAndJerk/v_CleanAndJerk_g10_c03 20 +PoleVault/v_PoleVault_g10_c01 67 +BoxingSpeedBag/v_BoxingSpeedBag_g13_c06 17 +CleanAndJerk/v_CleanAndJerk_g21_c01 20 +WallPushups/v_WallPushups_g09_c03 98 +JavelinThrow/v_JavelinThrow_g13_c03 44 +HeadMassage/v_HeadMassage_g18_c07 38 +SkateBoarding/v_SkateBoarding_g18_c01 79 +HandstandWalking/v_HandstandWalking_g09_c01 37 +HorseRace/v_HorseRace_g21_c03 40 +CliffDiving/v_CliffDiving_g16_c02 21 +JumpingJack/v_JumpingJack_g22_c03 46 +PizzaTossing/v_PizzaTossing_g17_c04 57 +ThrowDiscus/v_ThrowDiscus_g08_c01 92 +PullUps/v_PullUps_g17_c01 69 +BaseballPitch/v_BaseballPitch_g19_c04 6 +TennisSwing/v_TennisSwing_g16_c03 91 +ParallelBars/v_ParallelBars_g23_c01 56 +CricketShot/v_CricketShot_g14_c02 23 +BlowDryHair/v_BlowDryHair_g11_c07 12 +Rafting/v_Rafting_g13_c03 72 +CricketShot/v_CricketShot_g12_c04 23 +HorseRace/v_HorseRace_g08_c05 40 +Skijet/v_Skijet_g16_c04 81 +CricketBowling/v_CricketBowling_g20_c01 22 +RockClimbingIndoor/v_RockClimbingIndoor_g17_c07 73 +Drumming/v_Drumming_g23_c01 26 +BoxingPunchingBag/v_BoxingPunchingBag_g12_c02 16 +CuttingInKitchen/v_CuttingInKitchen_g22_c02 24 +LongJump/v_LongJump_g17_c03 50 +CricketShot/v_CricketShot_g20_c06 23 +RopeClimbing/v_RopeClimbing_g25_c02 74 +CleanAndJerk/v_CleanAndJerk_g16_c03 20 +Skiing/v_Skiing_g14_c02 80 +FrisbeeCatch/v_FrisbeeCatch_g20_c03 30 +BandMarching/v_BandMarching_g24_c03 5 +RockClimbingIndoor/v_RockClimbingIndoor_g08_c07 73 +HandstandPushups/v_HandStandPushups_g18_c07 36 +ApplyLipstick/v_ApplyLipstick_g19_c02 1 +FrisbeeCatch/v_FrisbeeCatch_g17_c02 30 +BenchPress/v_BenchPress_g13_c06 9 +MilitaryParade/v_MilitaryParade_g20_c02 52 +FieldHockeyPenalty/v_FieldHockeyPenalty_g17_c04 28 +HammerThrow/v_HammerThrow_g11_c03 35 +BrushingTeeth/v_BrushingTeeth_g11_c01 19 +FrontCrawl/v_FrontCrawl_g22_c04 31 +Kayaking/v_Kayaking_g10_c05 48 +HammerThrow/v_HammerThrow_g22_c04 35 +BenchPress/v_BenchPress_g17_c03 9 +PoleVault/v_PoleVault_g12_c05 67 +SkateBoarding/v_SkateBoarding_g17_c03 79 +MoppingFloor/v_MoppingFloor_g18_c02 54 +Haircut/v_Haircut_g25_c02 33 +Billiards/v_Billiards_g19_c04 11 +BabyCrawling/v_BabyCrawling_g18_c04 3 +Shotput/v_Shotput_g22_c01 78 +TableTennisShot/v_TableTennisShot_g22_c06 89 +Rafting/v_Rafting_g17_c05 72 +Diving/v_Diving_g12_c01 25 +Nunchucks/v_Nunchucks_g09_c05 55 +Archery/v_Archery_g11_c02 2 +BasketballDunk/v_BasketballDunk_g19_c04 8 +BalanceBeam/v_BalanceBeam_g14_c03 4 +Drumming/v_Drumming_g18_c04 26 +BasketballDunk/v_BasketballDunk_g14_c02 8 +JumpingJack/v_JumpingJack_g10_c05 46 +UnevenBars/v_UnevenBars_g13_c01 95 +Lunges/v_Lunges_g16_c04 51 +Hammering/v_Hammering_g10_c02 34 +RopeClimbing/v_RopeClimbing_g11_c04 74 +BaseballPitch/v_BaseballPitch_g21_c02 6 +ApplyEyeMakeup/v_ApplyEyeMakeup_g20_c04 0 +BreastStroke/v_BreastStroke_g10_c02 18 +PizzaTossing/v_PizzaTossing_g17_c01 57 +Kayaking/v_Kayaking_g20_c01 48 +PlayingCello/v_PlayingCello_g19_c03 58 +ShavingBeard/v_ShavingBeard_g17_c01 77 +CleanAndJerk/v_CleanAndJerk_g09_c04 20 +TableTennisShot/v_TableTennisShot_g15_c05 89 +PlayingDhol/v_PlayingDhol_g10_c03 60 +PoleVault/v_PoleVault_g14_c02 67 +Skiing/v_Skiing_g24_c02 80 +BoxingPunchingBag/v_BoxingPunchingBag_g11_c05 16 +Mixing/v_Mixing_g13_c03 53 +StillRings/v_StillRings_g22_c03 85 +Nunchucks/v_Nunchucks_g23_c05 55 +SoccerJuggling/v_SoccerJuggling_g23_c02 83 +Archery/v_Archery_g10_c01 2 +SoccerPenalty/v_SoccerPenalty_g20_c04 84 +PlayingGuitar/v_PlayingGuitar_g13_c02 62 +ThrowDiscus/v_ThrowDiscus_g15_c05 92 +PlayingFlute/v_PlayingFlute_g25_c04 61 +CliffDiving/v_CliffDiving_g12_c07 21 +HulaHoop/v_HulaHoop_g13_c05 42 +HandstandWalking/v_HandstandWalking_g13_c01 37 +Swing/v_Swing_g09_c01 88 +FrisbeeCatch/v_FrisbeeCatch_g25_c01 30 +RopeClimbing/v_RopeClimbing_g13_c07 74 +Rafting/v_Rafting_g09_c02 72 +FieldHockeyPenalty/v_FieldHockeyPenalty_g08_c02 28 +PoleVault/v_PoleVault_g17_c03 67 +ThrowDiscus/v_ThrowDiscus_g12_c01 92 +BreastStroke/v_BreastStroke_g12_c01 18 +PlayingTabla/v_PlayingTabla_g22_c01 65 +HandstandPushups/v_HandStandPushups_g22_c05 36 +FrontCrawl/v_FrontCrawl_g19_c01 31 +Archery/v_Archery_g10_c07 2 +HeadMassage/v_HeadMassage_g21_c02 38 +PoleVault/v_PoleVault_g14_c03 67 +Nunchucks/v_Nunchucks_g22_c02 55 +Basketball/v_Basketball_g10_c03 7 +BlowingCandles/v_BlowingCandles_g22_c01 13 +BaseballPitch/v_BaseballPitch_g20_c02 6 +PlayingTabla/v_PlayingTabla_g20_c03 65 +BoxingSpeedBag/v_BoxingSpeedBag_g19_c03 17 +SkateBoarding/v_SkateBoarding_g19_c05 79 +ThrowDiscus/v_ThrowDiscus_g17_c01 92 +BreastStroke/v_BreastStroke_g12_c03 18 +ApplyLipstick/v_ApplyLipstick_g11_c01 1 +Kayaking/v_Kayaking_g11_c07 48 +RopeClimbing/v_RopeClimbing_g13_c01 74 +PlayingDaf/v_PlayingDaf_g11_c04 59 +Bowling/v_Bowling_g19_c02 15 +Kayaking/v_Kayaking_g21_c01 48 +PlayingSitar/v_PlayingSitar_g24_c04 64 +TennisSwing/v_TennisSwing_g08_c02 91 +ShavingBeard/v_ShavingBeard_g22_c02 77 +BrushingTeeth/v_BrushingTeeth_g19_c05 19 +CricketBowling/v_CricketBowling_g14_c04 22 +PlayingCello/v_PlayingCello_g12_c06 58 +BenchPress/v_BenchPress_g13_c05 9 +BodyWeightSquats/v_BodyWeightSquats_g25_c02 14 +MilitaryParade/v_MilitaryParade_g22_c02 52 +PlayingDhol/v_PlayingDhol_g13_c02 60 +BrushingTeeth/v_BrushingTeeth_g23_c03 19 +Archery/v_Archery_g19_c02 2 +SkyDiving/v_SkyDiving_g12_c04 82 +BreastStroke/v_BreastStroke_g15_c01 18 +Punch/v_Punch_g21_c04 70 +HeadMassage/v_HeadMassage_g12_c01 38 +BlowDryHair/v_BlowDryHair_g23_c07 12 +LongJump/v_LongJump_g08_c04 50 +JugglingBalls/v_JugglingBalls_g18_c04 45 +BenchPress/v_BenchPress_g24_c01 9 +WritingOnBoard/v_WritingOnBoard_g10_c03 99 +Surfing/v_Surfing_g21_c01 87 +Billiards/v_Billiards_g18_c05 11 +BabyCrawling/v_BabyCrawling_g22_c05 3 +Mixing/v_Mixing_g09_c03 53 +GolfSwing/v_GolfSwing_g13_c02 32 +PlayingDaf/v_PlayingDaf_g22_c01 59 +BlowDryHair/v_BlowDryHair_g23_c04 12 +YoYo/v_YoYo_g23_c05 100 +SoccerJuggling/v_SoccerJuggling_g14_c04 83 +Haircut/v_Haircut_g11_c01 33 +WallPushups/v_WallPushups_g12_c01 98 +ApplyLipstick/v_ApplyLipstick_g18_c03 1 +BoxingPunchingBag/v_BoxingPunchingBag_g17_c03 16 +BenchPress/v_BenchPress_g14_c04 9 +JugglingBalls/v_JugglingBalls_g08_c03 45 +Diving/v_Diving_g09_c07 25 +ParallelBars/v_ParallelBars_g12_c04 56 +TennisSwing/v_TennisSwing_g11_c06 91 +Swing/v_Swing_g15_c03 88 +StillRings/v_StillRings_g13_c01 85 +IceDancing/v_IceDancing_g21_c03 43 +Surfing/v_Surfing_g22_c03 87 +TrampolineJumping/v_TrampolineJumping_g09_c04 93 +MoppingFloor/v_MoppingFloor_g12_c03 54 +Archery/v_Archery_g25_c07 2 +JumpingJack/v_JumpingJack_g23_c03 46 +GolfSwing/v_GolfSwing_g14_c04 32 +Biking/v_Biking_g20_c03 10 +FieldHockeyPenalty/v_FieldHockeyPenalty_g15_c05 28 +PlayingCello/v_PlayingCello_g21_c05 58 +BlowingCandles/v_BlowingCandles_g12_c04 13 +Hammering/v_Hammering_g14_c05 34 +SkateBoarding/v_SkateBoarding_g22_c04 79 +HorseRiding/v_HorseRiding_g25_c01 41 +Diving/v_Diving_g23_c02 25 +Skiing/v_Skiing_g20_c06 80 +JumpingJack/v_JumpingJack_g10_c01 46 +CricketShot/v_CricketShot_g23_c03 23 +BasketballDunk/v_BasketballDunk_g10_c02 8 +BoxingPunchingBag/v_BoxingPunchingBag_g21_c03 16 +MilitaryParade/v_MilitaryParade_g10_c01 52 +Nunchucks/v_Nunchucks_g25_c02 55 +SalsaSpin/v_SalsaSpin_g09_c05 76 +Fencing/v_Fencing_g11_c05 27 +PlayingFlute/v_PlayingFlute_g19_c04 61 +FrisbeeCatch/v_FrisbeeCatch_g18_c01 30 +SoccerJuggling/v_SoccerJuggling_g20_c05 83 +HorseRiding/v_HorseRiding_g17_c02 41 +HighJump/v_HighJump_g22_c05 39 +Haircut/v_Haircut_g18_c05 33 +MilitaryParade/v_MilitaryParade_g25_c01 52 +Rowing/v_Rowing_g14_c02 75 +BoxingPunchingBag/v_BoxingPunchingBag_g12_c06 16 +BabyCrawling/v_BabyCrawling_g13_c05 3 +Biking/v_Biking_g17_c06 10 +Archery/v_Archery_g14_c03 2 +CleanAndJerk/v_CleanAndJerk_g22_c04 20 +SoccerPenalty/v_SoccerPenalty_g25_c03 84 +BoxingPunchingBag/v_BoxingPunchingBag_g25_c04 16 +Bowling/v_Bowling_g25_c05 15 +ShavingBeard/v_ShavingBeard_g24_c02 77 +Rowing/v_Rowing_g12_c06 75 +GolfSwing/v_GolfSwing_g16_c03 32 +Punch/v_Punch_g08_c02 70 +GolfSwing/v_GolfSwing_g23_c01 32 +VolleyballSpiking/v_VolleyballSpiking_g23_c03 96 +IceDancing/v_IceDancing_g21_c05 43 +JavelinThrow/v_JavelinThrow_g08_c02 44 +LongJump/v_LongJump_g10_c06 50 +Haircut/v_Haircut_g09_c02 33 +Archery/v_Archery_g24_c06 2 +WritingOnBoard/v_WritingOnBoard_g12_c02 99 +TennisSwing/v_TennisSwing_g10_c03 91 +BodyWeightSquats/v_BodyWeightSquats_g23_c03 14 +FrontCrawl/v_FrontCrawl_g09_c04 31 +CleanAndJerk/v_CleanAndJerk_g25_c02 20 +PushUps/v_PushUps_g17_c03 71 +RockClimbingIndoor/v_RockClimbingIndoor_g13_c03 73 +Lunges/v_Lunges_g22_c01 51 +IceDancing/v_IceDancing_g21_c04 43 +HammerThrow/v_HammerThrow_g23_c07 35 +PlayingSitar/v_PlayingSitar_g19_c02 64 +PlayingSitar/v_PlayingSitar_g17_c03 64 +SoccerPenalty/v_SoccerPenalty_g12_c05 84 +FieldHockeyPenalty/v_FieldHockeyPenalty_g14_c05 28 +Punch/v_Punch_g17_c04 70 +HandstandPushups/v_HandStandPushups_g18_c05 36 +Rowing/v_Rowing_g23_c01 75 +Shotput/v_Shotput_g11_c01 78 +JumpingJack/v_JumpingJack_g15_c02 46 +SalsaSpin/v_SalsaSpin_g21_c02 76 +Hammering/v_Hammering_g17_c02 34 +PlayingCello/v_PlayingCello_g09_c05 58 +ApplyLipstick/v_ApplyLipstick_g23_c01 1 +Billiards/v_Billiards_g12_c02 11 +BrushingTeeth/v_BrushingTeeth_g11_c02 19 +Shotput/v_Shotput_g19_c03 78 +JumpingJack/v_JumpingJack_g19_c02 46 +Typing/v_Typing_g14_c04 94 +Drumming/v_Drumming_g21_c01 26 +SumoWrestling/v_SumoWrestling_g16_c01 86 +Punch/v_Punch_g13_c05 70 +RockClimbingIndoor/v_RockClimbingIndoor_g18_c01 73 +HandstandPushups/v_HandStandPushups_g16_c04 36 +TrampolineJumping/v_TrampolineJumping_g18_c03 93 +SkyDiving/v_SkyDiving_g17_c02 82 +Hammering/v_Hammering_g23_c02 34 +TableTennisShot/v_TableTennisShot_g20_c04 89 +MilitaryParade/v_MilitaryParade_g16_c03 52 +HorseRace/v_HorseRace_g08_c03 40 +Bowling/v_Bowling_g24_c01 15 +RopeClimbing/v_RopeClimbing_g23_c02 74 +Skijet/v_Skijet_g23_c03 81 +UnevenBars/v_UnevenBars_g18_c04 95 +Swing/v_Swing_g19_c04 88 +BabyCrawling/v_BabyCrawling_g19_c03 3 +ShavingBeard/v_ShavingBeard_g25_c04 77 +HeadMassage/v_HeadMassage_g14_c04 38 +SkyDiving/v_SkyDiving_g20_c05 82 +SoccerJuggling/v_SoccerJuggling_g17_c02 83 +FrisbeeCatch/v_FrisbeeCatch_g12_c03 30 +PoleVault/v_PoleVault_g19_c05 67 +HorseRace/v_HorseRace_g13_c05 40 +PlayingFlute/v_PlayingFlute_g15_c02 61 +BodyWeightSquats/v_BodyWeightSquats_g20_c06 14 +Fencing/v_Fencing_g22_c02 27 +BabyCrawling/v_BabyCrawling_g25_c04 3 +BaseballPitch/v_BaseballPitch_g24_c05 6 +RopeClimbing/v_RopeClimbing_g08_c01 74 +BoxingSpeedBag/v_BoxingSpeedBag_g11_c04 17 +PommelHorse/v_PommelHorse_g19_c05 68 +BasketballDunk/v_BasketballDunk_g15_c04 8 +Rafting/v_Rafting_g19_c01 72 +FrisbeeCatch/v_FrisbeeCatch_g09_c02 30 +ThrowDiscus/v_ThrowDiscus_g25_c02 92 +CricketShot/v_CricketShot_g14_c07 23 +SumoWrestling/v_SumoWrestling_g24_c02 86 +Surfing/v_Surfing_g19_c03 87 +PlayingGuitar/v_PlayingGuitar_g17_c03 62 +PlayingSitar/v_PlayingSitar_g21_c04 64 +JugglingBalls/v_JugglingBalls_g09_c04 45 +PommelHorse/v_PommelHorse_g08_c02 68 +HandstandWalking/v_HandstandWalking_g17_c01 37 +SoccerPenalty/v_SoccerPenalty_g14_c01 84 +BandMarching/v_BandMarching_g13_c03 5 +BrushingTeeth/v_BrushingTeeth_g21_c04 19 +FrontCrawl/v_FrontCrawl_g13_c02 31 +BoxingPunchingBag/v_BoxingPunchingBag_g15_c07 16 +ShavingBeard/v_ShavingBeard_g13_c02 77 +BlowDryHair/v_BlowDryHair_g17_c03 12 +PoleVault/v_PoleVault_g15_c03 67 +GolfSwing/v_GolfSwing_g21_c06 32 +ParallelBars/v_ParallelBars_g11_c01 56 +Basketball/v_Basketball_g22_c02 7 +Bowling/v_Bowling_g21_c05 15 +PoleVault/v_PoleVault_g12_c01 67 +PommelHorse/v_PommelHorse_g09_c01 68 +Mixing/v_Mixing_g17_c01 53 +HighJump/v_HighJump_g17_c02 39 +CliffDiving/v_CliffDiving_g18_c05 21 +BabyCrawling/v_BabyCrawling_g24_c04 3 +YoYo/v_YoYo_g09_c05 100 +SumoWrestling/v_SumoWrestling_g12_c02 86 +PlayingCello/v_PlayingCello_g25_c04 58 +UnevenBars/v_UnevenBars_g16_c01 95 +Mixing/v_Mixing_g12_c03 53 +RockClimbingIndoor/v_RockClimbingIndoor_g10_c02 73 +PlayingGuitar/v_PlayingGuitar_g18_c07 62 +BaseballPitch/v_BaseballPitch_g09_c06 6 +Rowing/v_Rowing_g12_c07 75 +JugglingBalls/v_JugglingBalls_g14_c03 45 +CleanAndJerk/v_CleanAndJerk_g24_c04 20 +SoccerJuggling/v_SoccerJuggling_g14_c06 83 +BreastStroke/v_BreastStroke_g19_c03 18 +FrisbeeCatch/v_FrisbeeCatch_g12_c01 30 +Swing/v_Swing_g24_c01 88 +HeadMassage/v_HeadMassage_g12_c04 38 +TrampolineJumping/v_TrampolineJumping_g18_c02 93 +Kayaking/v_Kayaking_g11_c02 48 +Nunchucks/v_Nunchucks_g13_c01 55 +FieldHockeyPenalty/v_FieldHockeyPenalty_g21_c01 28 +Bowling/v_Bowling_g15_c06 15 +Hammering/v_Hammering_g09_c06 34 +FrisbeeCatch/v_FrisbeeCatch_g15_c03 30 +Nunchucks/v_Nunchucks_g20_c03 55 +CliffDiving/v_CliffDiving_g16_c03 21 +Biking/v_Biking_g20_c06 10 +Bowling/v_Bowling_g17_c02 15 +JavelinThrow/v_JavelinThrow_g15_c03 44 +PlayingDaf/v_PlayingDaf_g19_c06 59 +PullUps/v_PullUps_g12_c03 69 +SkateBoarding/v_SkateBoarding_g25_c05 79 +PullUps/v_PullUps_g25_c02 69 +HighJump/v_HighJump_g21_c03 39 +Biking/v_Biking_g22_c05 10 +CricketShot/v_CricketShot_g17_c01 23 +Haircut/v_Haircut_g08_c01 33 +FrontCrawl/v_FrontCrawl_g21_c02 31 +StillRings/v_StillRings_g10_c01 85 +TrampolineJumping/v_TrampolineJumping_g12_c01 93 +CricketBowling/v_CricketBowling_g17_c03 22 +BandMarching/v_BandMarching_g16_c04 5 +BaseballPitch/v_BaseballPitch_g22_c04 6 +JumpRope/v_JumpRope_g23_c05 47 +JavelinThrow/v_JavelinThrow_g23_c04 44 +HandstandPushups/v_HandStandPushups_g24_c03 36 +BodyWeightSquats/v_BodyWeightSquats_g15_c04 14 +Surfing/v_Surfing_g24_c02 87 +PizzaTossing/v_PizzaTossing_g16_c03 57 +PommelHorse/v_PommelHorse_g10_c04 68 +HammerThrow/v_HammerThrow_g25_c05 35 +PommelHorse/v_PommelHorse_g24_c01 68 +Rafting/v_Rafting_g14_c04 72 +PlayingPiano/v_PlayingPiano_g19_c01 63 +CricketShot/v_CricketShot_g24_c06 23 +HighJump/v_HighJump_g22_c03 39 +FieldHockeyPenalty/v_FieldHockeyPenalty_g23_c04 28 +StillRings/v_StillRings_g18_c02 85 +TennisSwing/v_TennisSwing_g21_c03 91 +HammerThrow/v_HammerThrow_g20_c04 35 +WritingOnBoard/v_WritingOnBoard_g18_c03 99 +BenchPress/v_BenchPress_g17_c05 9 +BrushingTeeth/v_BrushingTeeth_g10_c03 19 +MoppingFloor/v_MoppingFloor_g15_c04 54 +JumpingJack/v_JumpingJack_g21_c04 46 +TableTennisShot/v_TableTennisShot_g15_c06 89 +ShavingBeard/v_ShavingBeard_g18_c01 77 +BlowDryHair/v_BlowDryHair_g10_c02 12 +PoleVault/v_PoleVault_g22_c02 67 +Mixing/v_Mixing_g09_c04 53 +SoccerJuggling/v_SoccerJuggling_g14_c01 83 +VolleyballSpiking/v_VolleyballSpiking_g13_c05 96 +CricketBowling/v_CricketBowling_g23_c03 22 +BabyCrawling/v_BabyCrawling_g10_c01 3 +PlayingGuitar/v_PlayingGuitar_g10_c03 62 +PushUps/v_PushUps_g19_c03 71 +CricketBowling/v_CricketBowling_g09_c04 22 +Kayaking/v_Kayaking_g15_c03 48 +JavelinThrow/v_JavelinThrow_g08_c01 44 +BodyWeightSquats/v_BodyWeightSquats_g21_c01 14 +Swing/v_Swing_g08_c05 88 +Mixing/v_Mixing_g19_c02 53 +Lunges/v_Lunges_g20_c02 51 +JumpingJack/v_JumpingJack_g25_c07 46 +HandstandPushups/v_HandStandPushups_g18_c01 36 +BlowingCandles/v_BlowingCandles_g12_c02 13 +PlayingGuitar/v_PlayingGuitar_g14_c01 62 +BandMarching/v_BandMarching_g11_c06 5 +BandMarching/v_BandMarching_g17_c04 5 +PlayingDhol/v_PlayingDhol_g21_c04 60 +Skijet/v_Skijet_g10_c02 81 +BalanceBeam/v_BalanceBeam_g21_c03 4 +FrontCrawl/v_FrontCrawl_g23_c07 31 +ShavingBeard/v_ShavingBeard_g23_c03 77 +PlayingDhol/v_PlayingDhol_g23_c06 60 +FloorGymnastics/v_FloorGymnastics_g13_c04 29 +Kayaking/v_Kayaking_g24_c05 48 +Mixing/v_Mixing_g17_c03 53 +Lunges/v_Lunges_g19_c01 51 +RockClimbingIndoor/v_RockClimbingIndoor_g21_c06 73 +TaiChi/v_TaiChi_g25_c03 90 +SkateBoarding/v_SkateBoarding_g15_c02 79 +HammerThrow/v_HammerThrow_g17_c05 35 +Haircut/v_Haircut_g19_c01 33 +BaseballPitch/v_BaseballPitch_g12_c04 6 +HandstandWalking/v_HandstandWalking_g24_c04 37 +Surfing/v_Surfing_g13_c02 87 +CuttingInKitchen/v_CuttingInKitchen_g16_c02 24 +FloorGymnastics/v_FloorGymnastics_g24_c04 29 +Punch/v_Punch_g12_c01 70 +PommelHorse/v_PommelHorse_g24_c03 68 +BrushingTeeth/v_BrushingTeeth_g25_c03 19 +BabyCrawling/v_BabyCrawling_g22_c04 3 +BoxingSpeedBag/v_BoxingSpeedBag_g16_c02 17 +Biking/v_Biking_g18_c01 10 +PlayingDhol/v_PlayingDhol_g19_c04 60 +JugglingBalls/v_JugglingBalls_g24_c04 45 +GolfSwing/v_GolfSwing_g14_c03 32 +WalkingWithDog/v_WalkingWithDog_g14_c03 97 +BodyWeightSquats/v_BodyWeightSquats_g15_c01 14 +PlayingCello/v_PlayingCello_g22_c01 58 +JumpRope/v_JumpRope_g09_c04 47 +CricketShot/v_CricketShot_g16_c01 23 +Basketball/v_Basketball_g13_c03 7 +HighJump/v_HighJump_g14_c02 39 +Bowling/v_Bowling_g20_c01 15 +WallPushups/v_WallPushups_g17_c05 98 +BlowingCandles/v_BlowingCandles_g22_c02 13 +Nunchucks/v_Nunchucks_g09_c04 55 +Shotput/v_Shotput_g18_c02 78 +LongJump/v_LongJump_g14_c02 50 +MoppingFloor/v_MoppingFloor_g22_c01 54 +BasketballDunk/v_BasketballDunk_g16_c05 8 +SoccerJuggling/v_SoccerJuggling_g16_c05 83 +BaseballPitch/v_BaseballPitch_g14_c02 6 +JumpRope/v_JumpRope_g24_c05 47 +CuttingInKitchen/v_CuttingInKitchen_g12_c03 24 +BalanceBeam/v_BalanceBeam_g08_c04 4 +BabyCrawling/v_BabyCrawling_g13_c02 3 +CliffDiving/v_CliffDiving_g09_c02 21 +Archery/v_Archery_g22_c02 2 +Nunchucks/v_Nunchucks_g19_c04 55 +PlayingGuitar/v_PlayingGuitar_g15_c02 62 +BoxingPunchingBag/v_BoxingPunchingBag_g14_c07 16 +Rafting/v_Rafting_g18_c01 72 +Drumming/v_Drumming_g11_c06 26 +BlowingCandles/v_BlowingCandles_g12_c03 13 +PlayingFlute/v_PlayingFlute_g24_c05 61 +RockClimbingIndoor/v_RockClimbingIndoor_g25_c01 73 +BaseballPitch/v_BaseballPitch_g17_c04 6 +Diving/v_Diving_g15_c01 25 +TaiChi/v_TaiChi_g21_c04 90 +HorseRiding/v_HorseRiding_g25_c06 41 +Knitting/v_Knitting_g17_c06 49 +HandstandPushups/v_HandStandPushups_g22_c04 36 +Rowing/v_Rowing_g15_c02 75 +MoppingFloor/v_MoppingFloor_g22_c04 54 +FrisbeeCatch/v_FrisbeeCatch_g20_c04 30 +Shotput/v_Shotput_g11_c04 78 +BaseballPitch/v_BaseballPitch_g11_c01 6 +Swing/v_Swing_g21_c01 88 +RockClimbingIndoor/v_RockClimbingIndoor_g12_c04 73 +Knitting/v_Knitting_g25_c06 49 +BlowingCandles/v_BlowingCandles_g21_c04 13 +SoccerPenalty/v_SoccerPenalty_g19_c01 84 +PlayingViolin/v_PlayingViolin_g24_c04 66 +BrushingTeeth/v_BrushingTeeth_g11_c03 19 +BoxingSpeedBag/v_BoxingSpeedBag_g17_c04 17 +PlayingDaf/v_PlayingDaf_g15_c04 59 +TaiChi/v_TaiChi_g09_c02 90 +WalkingWithDog/v_WalkingWithDog_g09_c01 97 +Hammering/v_Hammering_g14_c04 34 +Lunges/v_Lunges_g10_c04 51 +Rowing/v_Rowing_g17_c03 75 +SalsaSpin/v_SalsaSpin_g15_c04 76 +BoxingPunchingBag/v_BoxingPunchingBag_g09_c01 16 +UnevenBars/v_UnevenBars_g11_c03 95 +Haircut/v_Haircut_g09_c06 33 +ApplyLipstick/v_ApplyLipstick_g20_c02 1 +HighJump/v_HighJump_g24_c04 39 +HeadMassage/v_HeadMassage_g20_c04 38 +BrushingTeeth/v_BrushingTeeth_g23_c05 19 +Typing/v_Typing_g11_c02 94 +PlayingTabla/v_PlayingTabla_g17_c01 65 +JavelinThrow/v_JavelinThrow_g12_c02 44 +HeadMassage/v_HeadMassage_g12_c05 38 +LongJump/v_LongJump_g11_c05 50 +YoYo/v_YoYo_g16_c04 100 +SkyDiving/v_SkyDiving_g15_c04 82 +BabyCrawling/v_BabyCrawling_g20_c06 3 +SoccerJuggling/v_SoccerJuggling_g19_c01 83 +PullUps/v_PullUps_g16_c01 69 +PlayingSitar/v_PlayingSitar_g12_c04 64 +HeadMassage/v_HeadMassage_g08_c05 38 +Haircut/v_Haircut_g15_c02 33 +CuttingInKitchen/v_CuttingInKitchen_g25_c06 24 +Lunges/v_Lunges_g21_c02 51 +BrushingTeeth/v_BrushingTeeth_g15_c01 19 +WritingOnBoard/v_WritingOnBoard_g21_c06 99 +PommelHorse/v_PommelHorse_g24_c02 68 +Typing/v_Typing_g08_c04 94 +BabyCrawling/v_BabyCrawling_g09_c04 3 +Billiards/v_Billiards_g17_c03 11 +Punch/v_Punch_g12_c02 70 +TableTennisShot/v_TableTennisShot_g13_c03 89 +TableTennisShot/v_TableTennisShot_g24_c06 89 +Mixing/v_Mixing_g20_c02 53 +FieldHockeyPenalty/v_FieldHockeyPenalty_g22_c04 28 +BandMarching/v_BandMarching_g16_c05 5 +LongJump/v_LongJump_g15_c05 50 +HulaHoop/v_HulaHoop_g09_c03 42 +Skijet/v_Skijet_g08_c02 81 +ApplyLipstick/v_ApplyLipstick_g08_c02 1 +ShavingBeard/v_ShavingBeard_g20_c07 77 +SoccerPenalty/v_SoccerPenalty_g09_c03 84 +PlayingDhol/v_PlayingDhol_g14_c01 60 +Typing/v_Typing_g14_c01 94 +CuttingInKitchen/v_CuttingInKitchen_g09_c04 24 +PlayingGuitar/v_PlayingGuitar_g10_c01 62 +StillRings/v_StillRings_g20_c04 85 +PlayingDaf/v_PlayingDaf_g10_c03 59 +HandstandPushups/v_HandStandPushups_g11_c01 36 +SoccerJuggling/v_SoccerJuggling_g25_c02 83 +UnevenBars/v_UnevenBars_g08_c04 95 +SoccerPenalty/v_SoccerPenalty_g21_c04 84 +TennisSwing/v_TennisSwing_g16_c07 91 +YoYo/v_YoYo_g21_c06 100 +Fencing/v_Fencing_g18_c01 27 +CricketShot/v_CricketShot_g18_c03 23 +CuttingInKitchen/v_CuttingInKitchen_g25_c04 24 +Typing/v_Typing_g22_c02 94 +Biking/v_Biking_g13_c03 10 +TennisSwing/v_TennisSwing_g09_c06 91 +HandstandWalking/v_HandstandWalking_g19_c04 37 +JumpRope/v_JumpRope_g19_c02 47 +Drumming/v_Drumming_g09_c04 26 +HorseRiding/v_HorseRiding_g22_c02 41 +RockClimbingIndoor/v_RockClimbingIndoor_g24_c04 73 +Biking/v_Biking_g13_c02 10 +BenchPress/v_BenchPress_g25_c01 9 +Basketball/v_Basketball_g14_c04 7 +Punch/v_Punch_g25_c04 70 +Rowing/v_Rowing_g15_c07 75 +Nunchucks/v_Nunchucks_g17_c03 55 +Haircut/v_Haircut_g21_c04 33 +PullUps/v_PullUps_g11_c01 69 +IceDancing/v_IceDancing_g25_c01 43 +WalkingWithDog/v_WalkingWithDog_g25_c03 97 +WallPushups/v_WallPushups_g13_c05 98 +HorseRiding/v_HorseRiding_g09_c02 41 +WalkingWithDog/v_WalkingWithDog_g09_c05 97 +WallPushups/v_WallPushups_g16_c06 98 +FrontCrawl/v_FrontCrawl_g15_c04 31 +BabyCrawling/v_BabyCrawling_g19_c04 3 +SumoWrestling/v_SumoWrestling_g12_c04 86 +CricketBowling/v_CricketBowling_g22_c01 22 +Shotput/v_Shotput_g16_c03 78 +SkyDiving/v_SkyDiving_g18_c05 82 +RopeClimbing/v_RopeClimbing_g08_c02 74 +BodyWeightSquats/v_BodyWeightSquats_g19_c02 14 +UnevenBars/v_UnevenBars_g19_c04 95 +BlowingCandles/v_BlowingCandles_g11_c01 13 +GolfSwing/v_GolfSwing_g15_c02 32 +SalsaSpin/v_SalsaSpin_g20_c05 76 +ThrowDiscus/v_ThrowDiscus_g20_c04 92 +Basketball/v_Basketball_g11_c02 7 +ShavingBeard/v_ShavingBeard_g12_c05 77 +BoxingSpeedBag/v_BoxingSpeedBag_g22_c03 17 +PoleVault/v_PoleVault_g18_c03 67 +Diving/v_Diving_g08_c04 25 +HorseRiding/v_HorseRiding_g10_c02 41 +Biking/v_Biking_g22_c04 10 +JumpRope/v_JumpRope_g17_c05 47 +Fencing/v_Fencing_g17_c03 27 +BodyWeightSquats/v_BodyWeightSquats_g10_c05 14 +WallPushups/v_WallPushups_g10_c02 98 +PlayingViolin/v_PlayingViolin_g17_c03 66 +SoccerJuggling/v_SoccerJuggling_g15_c02 83 +BaseballPitch/v_BaseballPitch_g12_c01 6 +ShavingBeard/v_ShavingBeard_g15_c05 77 +CuttingInKitchen/v_CuttingInKitchen_g23_c02 24 +BreastStroke/v_BreastStroke_g24_c04 18 +IceDancing/v_IceDancing_g08_c05 43 +HeadMassage/v_HeadMassage_g19_c02 38 +Bowling/v_Bowling_g15_c04 15 +CricketBowling/v_CricketBowling_g11_c01 22 +BreastStroke/v_BreastStroke_g09_c01 18 +Fencing/v_Fencing_g21_c02 27 +LongJump/v_LongJump_g10_c07 50 +HorseRace/v_HorseRace_g25_c02 40 +Nunchucks/v_Nunchucks_g22_c03 55 +Drumming/v_Drumming_g24_c03 26 +PizzaTossing/v_PizzaTossing_g23_c04 57 +WallPushups/v_WallPushups_g24_c01 98 +JugglingBalls/v_JugglingBalls_g20_c03 45 +CleanAndJerk/v_CleanAndJerk_g15_c01 20 +SoccerJuggling/v_SoccerJuggling_g13_c05 83 +BoxingSpeedBag/v_BoxingSpeedBag_g13_c01 17 +Punch/v_Punch_g12_c03 70 +BasketballDunk/v_BasketballDunk_g08_c04 8 +Drumming/v_Drumming_g23_c06 26 +LongJump/v_LongJump_g13_c01 50 +Haircut/v_Haircut_g25_c01 33 +SalsaSpin/v_SalsaSpin_g24_c03 76 +Archery/v_Archery_g16_c02 2 +UnevenBars/v_UnevenBars_g15_c04 95 +Billiards/v_Billiards_g12_c05 11 +Haircut/v_Haircut_g20_c06 33 +JumpRope/v_JumpRope_g14_c04 47 +PlayingDhol/v_PlayingDhol_g25_c06 60 +RopeClimbing/v_RopeClimbing_g22_c04 74 +HeadMassage/v_HeadMassage_g11_c01 38 +BalanceBeam/v_BalanceBeam_g20_c03 4 +PizzaTossing/v_PizzaTossing_g24_c06 57 +StillRings/v_StillRings_g25_c05 85 +LongJump/v_LongJump_g19_c06 50 +Shotput/v_Shotput_g17_c02 78 +ApplyEyeMakeup/v_ApplyEyeMakeup_g25_c01 0 +PlayingViolin/v_PlayingViolin_g25_c03 66 +UnevenBars/v_UnevenBars_g20_c04 95 +HorseRiding/v_HorseRiding_g15_c04 41 +BrushingTeeth/v_BrushingTeeth_g12_c06 19 +Fencing/v_Fencing_g24_c02 27 +Basketball/v_Basketball_g15_c01 7 +Lunges/v_Lunges_g19_c06 51 +BrushingTeeth/v_BrushingTeeth_g22_c05 19 +FieldHockeyPenalty/v_FieldHockeyPenalty_g19_c04 28 +CricketShot/v_CricketShot_g08_c06 23 +BreastStroke/v_BreastStroke_g20_c03 18 +CleanAndJerk/v_CleanAndJerk_g21_c03 20 +PlayingViolin/v_PlayingViolin_g12_c02 66 +Rowing/v_Rowing_g23_c05 75 +CricketShot/v_CricketShot_g10_c04 23 +HorseRiding/v_HorseRiding_g16_c01 41 +BandMarching/v_BandMarching_g11_c03 5 +Billiards/v_Billiards_g14_c02 11 +IceDancing/v_IceDancing_g11_c02 43 +TrampolineJumping/v_TrampolineJumping_g10_c03 93 +Hammering/v_Hammering_g25_c01 34 +PlayingCello/v_PlayingCello_g21_c01 58 +WallPushups/v_WallPushups_g19_c03 98 +TrampolineJumping/v_TrampolineJumping_g13_c04 93 +TrampolineJumping/v_TrampolineJumping_g15_c01 93 +SalsaSpin/v_SalsaSpin_g16_c05 76 +CleanAndJerk/v_CleanAndJerk_g20_c03 20 +JavelinThrow/v_JavelinThrow_g22_c03 44 +PizzaTossing/v_PizzaTossing_g18_c03 57 +JumpingJack/v_JumpingJack_g17_c02 46 +HighJump/v_HighJump_g10_c03 39 +RopeClimbing/v_RopeClimbing_g22_c03 74 +PlayingDhol/v_PlayingDhol_g14_c06 60 +Typing/v_Typing_g19_c03 94 +BodyWeightSquats/v_BodyWeightSquats_g09_c02 14 +ParallelBars/v_ParallelBars_g16_c02 56 +ThrowDiscus/v_ThrowDiscus_g23_c01 92 +RockClimbingIndoor/v_RockClimbingIndoor_g25_c04 73 +PlayingCello/v_PlayingCello_g08_c01 58 +BasketballDunk/v_BasketballDunk_g12_c01 8 +BreastStroke/v_BreastStroke_g18_c02 18 +BrushingTeeth/v_BrushingTeeth_g11_c04 19 +GolfSwing/v_GolfSwing_g14_c02 32 +GolfSwing/v_GolfSwing_g16_c02 32 +PlayingFlute/v_PlayingFlute_g24_c01 61 +Kayaking/v_Kayaking_g13_c03 48 +GolfSwing/v_GolfSwing_g10_c02 32 +MilitaryParade/v_MilitaryParade_g08_c02 52 +HammerThrow/v_HammerThrow_g22_c05 35 +Haircut/v_Haircut_g08_c06 33 +BlowDryHair/v_BlowDryHair_g12_c02 12 +TaiChi/v_TaiChi_g08_c02 90 +Drumming/v_Drumming_g17_c06 26 +BlowingCandles/v_BlowingCandles_g20_c04 13 +Kayaking/v_Kayaking_g25_c03 48 +HandstandPushups/v_HandStandPushups_g11_c06 36 +PlayingGuitar/v_PlayingGuitar_g20_c04 62 +PlayingPiano/v_PlayingPiano_g24_c04 63 +HighJump/v_HighJump_g10_c04 39 +TableTennisShot/v_TableTennisShot_g23_c02 89 +CricketShot/v_CricketShot_g09_c03 23 +Kayaking/v_Kayaking_g15_c06 48 +ShavingBeard/v_ShavingBeard_g20_c05 77 +FrontCrawl/v_FrontCrawl_g20_c01 31 +BasketballDunk/v_BasketballDunk_g11_c01 8 +CuttingInKitchen/v_CuttingInKitchen_g23_c01 24 +FrontCrawl/v_FrontCrawl_g21_c04 31 +WritingOnBoard/v_WritingOnBoard_g24_c01 99 +PlayingTabla/v_PlayingTabla_g22_c02 65 +PlayingDhol/v_PlayingDhol_g12_c05 60 +ApplyLipstick/v_ApplyLipstick_g21_c01 1 +WritingOnBoard/v_WritingOnBoard_g16_c07 99 +JumpingJack/v_JumpingJack_g19_c06 46 +WalkingWithDog/v_WalkingWithDog_g25_c02 97 +YoYo/v_YoYo_g25_c05 100 +CliffDiving/v_CliffDiving_g09_c05 21 +BrushingTeeth/v_BrushingTeeth_g14_c03 19 +HeadMassage/v_HeadMassage_g23_c01 38 +PlayingDhol/v_PlayingDhol_g19_c03 60 +YoYo/v_YoYo_g12_c04 100 +PlayingPiano/v_PlayingPiano_g14_c03 63 +Shotput/v_Shotput_g15_c03 78 +VolleyballSpiking/v_VolleyballSpiking_g09_c05 96 +SumoWrestling/v_SumoWrestling_g11_c04 86 +Skiing/v_Skiing_g11_c03 80 +GolfSwing/v_GolfSwing_g17_c04 32 +BoxingSpeedBag/v_BoxingSpeedBag_g18_c04 17 +TrampolineJumping/v_TrampolineJumping_g22_c02 93 +BreastStroke/v_BreastStroke_g16_c03 18 +TrampolineJumping/v_TrampolineJumping_g22_c01 93 +PlayingDhol/v_PlayingDhol_g09_c03 60 +PommelHorse/v_PommelHorse_g12_c05 68 +HorseRiding/v_HorseRiding_g20_c03 41 +HighJump/v_HighJump_g12_c04 39 +HandstandPushups/v_HandStandPushups_g12_c06 36 +PlayingFlute/v_PlayingFlute_g23_c06 61 +PlayingSitar/v_PlayingSitar_g19_c05 64 +Surfing/v_Surfing_g15_c02 87 +FieldHockeyPenalty/v_FieldHockeyPenalty_g14_c07 28 +WalkingWithDog/v_WalkingWithDog_g17_c03 97 +BaseballPitch/v_BaseballPitch_g16_c02 6 +JavelinThrow/v_JavelinThrow_g20_c03 44 +Skiing/v_Skiing_g20_c05 80 +TableTennisShot/v_TableTennisShot_g18_c04 89 +CleanAndJerk/v_CleanAndJerk_g25_c04 20 +WallPushups/v_WallPushups_g11_c01 98 +Punch/v_Punch_g08_c06 70 +BreastStroke/v_BreastStroke_g22_c03 18 +YoYo/v_YoYo_g09_c06 100 +Drumming/v_Drumming_g22_c02 26 +PlayingDhol/v_PlayingDhol_g20_c01 60 +ApplyLipstick/v_ApplyLipstick_g13_c04 1 +BandMarching/v_BandMarching_g22_c01 5 +Knitting/v_Knitting_g15_c01 49 +Kayaking/v_Kayaking_g25_c02 48 +Diving/v_Diving_g23_c01 25 +TennisSwing/v_TennisSwing_g09_c07 91 +ApplyLipstick/v_ApplyLipstick_g15_c01 1 +TrampolineJumping/v_TrampolineJumping_g19_c02 93 +Punch/v_Punch_g12_c07 70 +FloorGymnastics/v_FloorGymnastics_g20_c04 29 +Billiards/v_Billiards_g23_c02 11 +Archery/v_Archery_g12_c01 2 +HorseRace/v_HorseRace_g09_c03 40 +PlayingCello/v_PlayingCello_g20_c04 58 +BoxingSpeedBag/v_BoxingSpeedBag_g21_c02 17 +Bowling/v_Bowling_g11_c02 15 +TaiChi/v_TaiChi_g17_c01 90 +WallPushups/v_WallPushups_g18_c02 98 +SkyDiving/v_SkyDiving_g23_c02 82 +BabyCrawling/v_BabyCrawling_g09_c05 3 +JavelinThrow/v_JavelinThrow_g20_c04 44 +JavelinThrow/v_JavelinThrow_g11_c05 44 +HandstandWalking/v_HandstandWalking_g10_c02 37 +PlayingFlute/v_PlayingFlute_g24_c03 61 +HorseRace/v_HorseRace_g18_c05 40 +Punch/v_Punch_g10_c01 70 +ApplyEyeMakeup/v_ApplyEyeMakeup_g10_c02 0 +Billiards/v_Billiards_g14_c01 11 +Knitting/v_Knitting_g15_c04 49 +BoxingPunchingBag/v_BoxingPunchingBag_g16_c07 16 +GolfSwing/v_GolfSwing_g23_c04 32 +RockClimbingIndoor/v_RockClimbingIndoor_g11_c01 73 +PlayingCello/v_PlayingCello_g10_c04 58 +BoxingPunchingBag/v_BoxingPunchingBag_g12_c01 16 +HammerThrow/v_HammerThrow_g15_c04 35 +Bowling/v_Bowling_g22_c03 15 +PlayingGuitar/v_PlayingGuitar_g15_c01 62 +Swing/v_Swing_g16_c02 88 +HandstandWalking/v_HandstandWalking_g21_c02 37 +BoxingSpeedBag/v_BoxingSpeedBag_g16_c01 17 +SkateBoarding/v_SkateBoarding_g10_c01 79 +BlowDryHair/v_BlowDryHair_g08_c07 12 +HammerThrow/v_HammerThrow_g23_c03 35 +Rowing/v_Rowing_g12_c05 75 +Lunges/v_Lunges_g21_c05 51 +PlayingDaf/v_PlayingDaf_g16_c02 59 +BasketballDunk/v_BasketballDunk_g08_c03 8 +PullUps/v_PullUps_g14_c01 69 +CricketBowling/v_CricketBowling_g15_c04 22 +SoccerPenalty/v_SoccerPenalty_g15_c07 84 +Billiards/v_Billiards_g13_c02 11 +Drumming/v_Drumming_g19_c05 26 +CricketBowling/v_CricketBowling_g12_c02 22 +Rowing/v_Rowing_g08_c04 75 +HulaHoop/v_HulaHoop_g17_c04 42 +PullUps/v_PullUps_g15_c02 69 +Mixing/v_Mixing_g15_c04 53 +Hammering/v_Hammering_g24_c07 34 +Drumming/v_Drumming_g21_c02 26 +JumpRope/v_JumpRope_g13_c04 47 +YoYo/v_YoYo_g12_c03 100 +Swing/v_Swing_g10_c02 88 +UnevenBars/v_UnevenBars_g23_c02 95 +VolleyballSpiking/v_VolleyballSpiking_g21_c03 96 +GolfSwing/v_GolfSwing_g20_c07 32 +PlayingSitar/v_PlayingSitar_g17_c07 64 +BasketballDunk/v_BasketballDunk_g12_c02 8 +Nunchucks/v_Nunchucks_g24_c04 55 +Kayaking/v_Kayaking_g24_c04 48 +TrampolineJumping/v_TrampolineJumping_g12_c03 93 +PlayingDhol/v_PlayingDhol_g10_c01 60 +HighJump/v_HighJump_g22_c04 39 +HammerThrow/v_HammerThrow_g18_c04 35 +GolfSwing/v_GolfSwing_g25_c01 32 +CricketShot/v_CricketShot_g22_c05 23 +CricketBowling/v_CricketBowling_g21_c06 22 +HulaHoop/v_HulaHoop_g13_c03 42 +SoccerPenalty/v_SoccerPenalty_g12_c02 84 +Punch/v_Punch_g24_c02 70 +CuttingInKitchen/v_CuttingInKitchen_g24_c02 24 +WallPushups/v_WallPushups_g24_c03 98 +PizzaTossing/v_PizzaTossing_g24_c05 57 +PlayingPiano/v_PlayingPiano_g10_c04 63 +WritingOnBoard/v_WritingOnBoard_g15_c01 99 +Nunchucks/v_Nunchucks_g17_c06 55 +Lunges/v_Lunges_g24_c03 51 +SoccerPenalty/v_SoccerPenalty_g10_c01 84 +HorseRace/v_HorseRace_g20_c03 40 +BasketballDunk/v_BasketballDunk_g09_c05 8 +PlayingFlute/v_PlayingFlute_g11_c03 61 +JugglingBalls/v_JugglingBalls_g16_c02 45 +PommelHorse/v_PommelHorse_g22_c07 68 +SkateBoarding/v_SkateBoarding_g15_c05 79 +Rowing/v_Rowing_g10_c07 75 +HandstandWalking/v_HandstandWalking_g15_c01 37 +WallPushups/v_WallPushups_g20_c01 98 +CuttingInKitchen/v_CuttingInKitchen_g23_c03 24 +UnevenBars/v_UnevenBars_g09_c04 95 +SalsaSpin/v_SalsaSpin_g15_c02 76 +HorseRiding/v_HorseRiding_g23_c02 41 +WalkingWithDog/v_WalkingWithDog_g08_c02 97 +SkyDiving/v_SkyDiving_g10_c03 82 +TaiChi/v_TaiChi_g12_c01 90 +CleanAndJerk/v_CleanAndJerk_g13_c02 20 +CricketShot/v_CricketShot_g14_c05 23 +Skijet/v_Skijet_g18_c01 81 +SkyDiving/v_SkyDiving_g10_c01 82 +SkyDiving/v_SkyDiving_g13_c04 82 +JavelinThrow/v_JavelinThrow_g18_c01 44 +Billiards/v_Billiards_g11_c01 11 +YoYo/v_YoYo_g24_c04 100 +Billiards/v_Billiards_g09_c03 11 +FrisbeeCatch/v_FrisbeeCatch_g23_c04 30 +BasketballDunk/v_BasketballDunk_g16_c06 8 +PlayingDaf/v_PlayingDaf_g12_c01 59 +CliffDiving/v_CliffDiving_g09_c03 21 +HorseRace/v_HorseRace_g13_c02 40 +TennisSwing/v_TennisSwing_g16_c01 91 +CleanAndJerk/v_CleanAndJerk_g19_c01 20 +ThrowDiscus/v_ThrowDiscus_g18_c06 92 +HorseRace/v_HorseRace_g22_c02 40 +FloorGymnastics/v_FloorGymnastics_g19_c02 29 +JumpRope/v_JumpRope_g14_c01 47 +RopeClimbing/v_RopeClimbing_g23_c03 74 +BreastStroke/v_BreastStroke_g08_c04 18 +PlayingCello/v_PlayingCello_g20_c02 58 +PlayingViolin/v_PlayingViolin_g14_c01 66 +HulaHoop/v_HulaHoop_g09_c05 42 +PizzaTossing/v_PizzaTossing_g17_c02 57 +Rafting/v_Rafting_g23_c01 72 +ApplyEyeMakeup/v_ApplyEyeMakeup_g19_c01 0 +PlayingFlute/v_PlayingFlute_g16_c01 61 +BrushingTeeth/v_BrushingTeeth_g19_c04 19 +Nunchucks/v_Nunchucks_g17_c05 55 +FieldHockeyPenalty/v_FieldHockeyPenalty_g16_c04 28 +Surfing/v_Surfing_g09_c01 87 +Haircut/v_Haircut_g19_c04 33 +RockClimbingIndoor/v_RockClimbingIndoor_g23_c01 73 +HandstandPushups/v_HandStandPushups_g21_c06 36 +SumoWrestling/v_SumoWrestling_g12_c01 86 +LongJump/v_LongJump_g09_c03 50 +BlowDryHair/v_BlowDryHair_g19_c04 12 +FloorGymnastics/v_FloorGymnastics_g11_c02 29 +Knitting/v_Knitting_g09_c04 49 +FrontCrawl/v_FrontCrawl_g17_c06 31 +PlayingPiano/v_PlayingPiano_g09_c01 63 +JumpRope/v_JumpRope_g22_c02 47 +Billiards/v_Billiards_g22_c02 11 +Punch/v_Punch_g16_c07 70 +ShavingBeard/v_ShavingBeard_g21_c04 77 +HammerThrow/v_HammerThrow_g11_c02 35 +Billiards/v_Billiards_g20_c04 11 +RockClimbingIndoor/v_RockClimbingIndoor_g10_c03 73 +JavelinThrow/v_JavelinThrow_g14_c02 44 +Archery/v_Archery_g24_c02 2 +BlowingCandles/v_BlowingCandles_g11_c02 13 +Hammering/v_Hammering_g22_c03 34 +FrontCrawl/v_FrontCrawl_g13_c04 31 +YoYo/v_YoYo_g21_c03 100 +MoppingFloor/v_MoppingFloor_g25_c01 54 +PoleVault/v_PoleVault_g16_c01 67 +BlowingCandles/v_BlowingCandles_g10_c03 13 +ApplyLipstick/v_ApplyLipstick_g11_c04 1 +StillRings/v_StillRings_g10_c04 85 +CricketShot/v_CricketShot_g24_c03 23 +PommelHorse/v_PommelHorse_g16_c01 68 +TableTennisShot/v_TableTennisShot_g22_c02 89 +ThrowDiscus/v_ThrowDiscus_g22_c03 92 +MilitaryParade/v_MilitaryParade_g17_c03 52 +Mixing/v_Mixing_g21_c03 53 +PizzaTossing/v_PizzaTossing_g11_c04 57 +Diving/v_Diving_g16_c02 25 +SoccerPenalty/v_SoccerPenalty_g16_c02 84 +Haircut/v_Haircut_g22_c06 33 +YoYo/v_YoYo_g11_c03 100 +Archery/v_Archery_g17_c01 2 +PlayingDhol/v_PlayingDhol_g10_c07 60 +HighJump/v_HighJump_g14_c03 39 +JavelinThrow/v_JavelinThrow_g15_c06 44 +SoccerJuggling/v_SoccerJuggling_g14_c05 83 +IceDancing/v_IceDancing_g24_c07 43 +Drumming/v_Drumming_g19_c03 26 +Kayaking/v_Kayaking_g10_c03 48 +MilitaryParade/v_MilitaryParade_g25_c06 52 +ApplyLipstick/v_ApplyLipstick_g18_c02 1 +CricketBowling/v_CricketBowling_g09_c05 22 +RockClimbingIndoor/v_RockClimbingIndoor_g13_c06 73 +CricketBowling/v_CricketBowling_g10_c02 22 +CuttingInKitchen/v_CuttingInKitchen_g22_c04 24 +PlayingCello/v_PlayingCello_g24_c07 58 +JavelinThrow/v_JavelinThrow_g11_c04 44 +BoxingSpeedBag/v_BoxingSpeedBag_g08_c03 17 +PlayingGuitar/v_PlayingGuitar_g12_c02 62 +SkateBoarding/v_SkateBoarding_g19_c04 79 +JumpRope/v_JumpRope_g22_c06 47 +RockClimbingIndoor/v_RockClimbingIndoor_g11_c04 73 +PlayingSitar/v_PlayingSitar_g20_c05 64 +WalkingWithDog/v_WalkingWithDog_g23_c04 97 +BalanceBeam/v_BalanceBeam_g16_c04 4 +PlayingDaf/v_PlayingDaf_g21_c02 59 +Surfing/v_Surfing_g19_c01 87 +Surfing/v_Surfing_g08_c05 87 +HandstandPushups/v_HandStandPushups_g10_c03 36 +Billiards/v_Billiards_g11_c07 11 +FieldHockeyPenalty/v_FieldHockeyPenalty_g25_c06 28 +JumpingJack/v_JumpingJack_g13_c01 46 +PoleVault/v_PoleVault_g09_c04 67 +TennisSwing/v_TennisSwing_g10_c07 91 +Knitting/v_Knitting_g18_c01 49 +BrushingTeeth/v_BrushingTeeth_g18_c02 19 +Shotput/v_Shotput_g19_c02 78 +PoleVault/v_PoleVault_g08_c01 67 +Nunchucks/v_Nunchucks_g10_c01 55 +Bowling/v_Bowling_g22_c07 15 +PlayingFlute/v_PlayingFlute_g22_c02 61 +JumpRope/v_JumpRope_g24_c06 47 +Billiards/v_Billiards_g19_c07 11 +Punch/v_Punch_g21_c07 70 +HammerThrow/v_HammerThrow_g19_c01 35 +IceDancing/v_IceDancing_g08_c06 43 +ThrowDiscus/v_ThrowDiscus_g08_c04 92 +Knitting/v_Knitting_g09_c01 49 +PlayingGuitar/v_PlayingGuitar_g13_c03 62 +BlowDryHair/v_BlowDryHair_g10_c01 12 +PlayingSitar/v_PlayingSitar_g11_c01 64 +PlayingViolin/v_PlayingViolin_g14_c02 66 +PoleVault/v_PoleVault_g22_c03 67 +Fencing/v_Fencing_g08_c03 27 +Diving/v_Diving_g14_c01 25 +Diving/v_Diving_g11_c03 25 +BabyCrawling/v_BabyCrawling_g25_c02 3 +PommelHorse/v_PommelHorse_g19_c06 68 +Basketball/v_Basketball_g12_c01 7 +JumpingJack/v_JumpingJack_g21_c02 46 +Lunges/v_Lunges_g11_c05 51 +VolleyballSpiking/v_VolleyballSpiking_g11_c03 96 +PoleVault/v_PoleVault_g17_c01 67 +StillRings/v_StillRings_g25_c07 85 +Diving/v_Diving_g20_c04 25 +PlayingPiano/v_PlayingPiano_g19_c02 63 +FrontCrawl/v_FrontCrawl_g08_c05 31 +HorseRiding/v_HorseRiding_g22_c05 41 +TaiChi/v_TaiChi_g16_c01 90 +UnevenBars/v_UnevenBars_g15_c02 95 +BlowingCandles/v_BlowingCandles_g20_c01 13 +Nunchucks/v_Nunchucks_g21_c05 55 +CricketBowling/v_CricketBowling_g21_c03 22 +Punch/v_Punch_g22_c07 70 +BandMarching/v_BandMarching_g17_c01 5 +BodyWeightSquats/v_BodyWeightSquats_g22_c02 14 +HeadMassage/v_HeadMassage_g14_c03 38 +SkateBoarding/v_SkateBoarding_g21_c01 79 +Lunges/v_Lunges_g14_c07 51 +PlayingViolin/v_PlayingViolin_g23_c01 66 +TennisSwing/v_TennisSwing_g14_c04 91 +Drumming/v_Drumming_g14_c06 26 +BodyWeightSquats/v_BodyWeightSquats_g16_c02 14 +GolfSwing/v_GolfSwing_g10_c03 32 +Surfing/v_Surfing_g21_c03 87 +IceDancing/v_IceDancing_g09_c03 43 +MoppingFloor/v_MoppingFloor_g17_c02 54 +PlayingPiano/v_PlayingPiano_g12_c03 63 +HandstandWalking/v_HandstandWalking_g24_c02 37 +BodyWeightSquats/v_BodyWeightSquats_g10_c01 14 +PlayingDhol/v_PlayingDhol_g17_c07 60 +ApplyLipstick/v_ApplyLipstick_g16_c02 1 +Archery/v_Archery_g09_c01 2 +PommelHorse/v_PommelHorse_g16_c04 68 +LongJump/v_LongJump_g20_c03 50 +CricketBowling/v_CricketBowling_g22_c03 22 +Basketball/v_Basketball_g09_c04 7 +Haircut/v_Haircut_g22_c04 33 +PullUps/v_PullUps_g17_c03 69 +HeadMassage/v_HeadMassage_g10_c01 38 +JumpRope/v_JumpRope_g19_c04 47 +Skiing/v_Skiing_g24_c01 80 +ApplyEyeMakeup/v_ApplyEyeMakeup_g15_c04 0 +TennisSwing/v_TennisSwing_g19_c03 91 +Punch/v_Punch_g25_c07 70 +GolfSwing/v_GolfSwing_g23_c02 32 +BalanceBeam/v_BalanceBeam_g09_c02 4 +SoccerPenalty/v_SoccerPenalty_g22_c05 84 +PlayingDaf/v_PlayingDaf_g19_c05 59 +PlayingTabla/v_PlayingTabla_g25_c01 65 +Punch/v_Punch_g14_c01 70 +JumpRope/v_JumpRope_g09_c03 47 +StillRings/v_StillRings_g23_c04 85 +SkateBoarding/v_SkateBoarding_g24_c04 79 +PullUps/v_PullUps_g12_c04 69 +SoccerJuggling/v_SoccerJuggling_g16_c04 83 +CleanAndJerk/v_CleanAndJerk_g14_c05 20 +HorseRiding/v_HorseRiding_g14_c05 41 +PlayingDaf/v_PlayingDaf_g16_c01 59 +BoxingPunchingBag/v_BoxingPunchingBag_g20_c04 16 +Billiards/v_Billiards_g13_c03 11 +PlayingTabla/v_PlayingTabla_g13_c01 65 +ApplyEyeMakeup/v_ApplyEyeMakeup_g17_c02 0 +JumpRope/v_JumpRope_g18_c03 47 +PlayingDhol/v_PlayingDhol_g14_c04 60 +SoccerJuggling/v_SoccerJuggling_g14_c03 83 +Rowing/v_Rowing_g15_c03 75 +ApplyEyeMakeup/v_ApplyEyeMakeup_g19_c03 0 +ShavingBeard/v_ShavingBeard_g24_c01 77 +TrampolineJumping/v_TrampolineJumping_g11_c03 93 +BalanceBeam/v_BalanceBeam_g15_c02 4 +Skijet/v_Skijet_g22_c02 81 +JumpRope/v_JumpRope_g09_c02 47 +GolfSwing/v_GolfSwing_g17_c01 32 +ShavingBeard/v_ShavingBeard_g18_c07 77 +CleanAndJerk/v_CleanAndJerk_g20_c01 20 +FrontCrawl/v_FrontCrawl_g21_c01 31 +BenchPress/v_BenchPress_g17_c07 9 +IceDancing/v_IceDancing_g20_c03 43 +TrampolineJumping/v_TrampolineJumping_g16_c02 93 +PlayingDhol/v_PlayingDhol_g23_c04 60 +WritingOnBoard/v_WritingOnBoard_g09_c04 99 +SalsaSpin/v_SalsaSpin_g23_c04 76 +PoleVault/v_PoleVault_g10_c04 67 +BrushingTeeth/v_BrushingTeeth_g20_c01 19 +StillRings/v_StillRings_g22_c02 85 +Hammering/v_Hammering_g18_c04 34 +TrampolineJumping/v_TrampolineJumping_g14_c01 93 +CricketBowling/v_CricketBowling_g15_c05 22 +HandstandWalking/v_HandstandWalking_g24_c05 37 +Diving/v_Diving_g22_c06 25 +PlayingCello/v_PlayingCello_g15_c02 58 +HammerThrow/v_HammerThrow_g08_c06 35 +CricketShot/v_CricketShot_g24_c07 23 +RockClimbingIndoor/v_RockClimbingIndoor_g18_c04 73 +ApplyLipstick/v_ApplyLipstick_g17_c01 1 +PlayingCello/v_PlayingCello_g20_c03 58 +JavelinThrow/v_JavelinThrow_g16_c06 44 +Lunges/v_Lunges_g23_c05 51 +BlowDryHair/v_BlowDryHair_g14_c02 12 +Skijet/v_Skijet_g11_c03 81 +TrampolineJumping/v_TrampolineJumping_g21_c02 93 +Basketball/v_Basketball_g11_c04 7 +ParallelBars/v_ParallelBars_g12_c02 56 +IceDancing/v_IceDancing_g21_c06 43 +CricketBowling/v_CricketBowling_g09_c06 22 +WalkingWithDog/v_WalkingWithDog_g20_c01 97 +CliffDiving/v_CliffDiving_g14_c06 21 +BoxingPunchingBag/v_BoxingPunchingBag_g24_c07 16 +BaseballPitch/v_BaseballPitch_g25_c01 6 +Lunges/v_Lunges_g17_c02 51 +BoxingSpeedBag/v_BoxingSpeedBag_g15_c01 17 +Billiards/v_Billiards_g11_c02 11 +Mixing/v_Mixing_g25_c04 53 +GolfSwing/v_GolfSwing_g22_c02 32 +GolfSwing/v_GolfSwing_g21_c05 32 +PlayingDhol/v_PlayingDhol_g21_c05 60 +BaseballPitch/v_BaseballPitch_g13_c07 6 +HandstandPushups/v_HandStandPushups_g25_c07 36 +Billiards/v_Billiards_g11_c03 11 +TennisSwing/v_TennisSwing_g23_c03 91 +Knitting/v_Knitting_g11_c04 49 +Skijet/v_Skijet_g16_c01 81 +Rowing/v_Rowing_g20_c01 75 +WritingOnBoard/v_WritingOnBoard_g10_c07 99 +HulaHoop/v_HulaHoop_g09_c02 42 +IceDancing/v_IceDancing_g22_c03 43 +BandMarching/v_BandMarching_g21_c01 5 +BoxingPunchingBag/v_BoxingPunchingBag_g08_c06 16 +Rowing/v_Rowing_g20_c04 75 +SkateBoarding/v_SkateBoarding_g12_c01 79 +SkyDiving/v_SkyDiving_g14_c01 82 +Diving/v_Diving_g22_c03 25 +CuttingInKitchen/v_CuttingInKitchen_g18_c02 24 +Rafting/v_Rafting_g24_c04 72 +MoppingFloor/v_MoppingFloor_g15_c01 54 +ThrowDiscus/v_ThrowDiscus_g18_c02 92 +PlayingCello/v_PlayingCello_g18_c05 58 +TrampolineJumping/v_TrampolineJumping_g13_c01 93 +HighJump/v_HighJump_g18_c01 39 +TaiChi/v_TaiChi_g10_c04 90 +Nunchucks/v_Nunchucks_g11_c01 55 +Biking/v_Biking_g15_c03 10 +BasketballDunk/v_BasketballDunk_g08_c05 8 +ShavingBeard/v_ShavingBeard_g14_c01 77 +BenchPress/v_BenchPress_g20_c04 9 +HorseRiding/v_HorseRiding_g22_c06 41 +TennisSwing/v_TennisSwing_g09_c05 91 +SoccerJuggling/v_SoccerJuggling_g18_c06 83 +BoxingPunchingBag/v_BoxingPunchingBag_g09_c05 16 +PizzaTossing/v_PizzaTossing_g25_c05 57 +Fencing/v_Fencing_g18_c02 27 +TableTennisShot/v_TableTennisShot_g23_c01 89 +Shotput/v_Shotput_g16_c07 78 +MilitaryParade/v_MilitaryParade_g09_c07 52 +HammerThrow/v_HammerThrow_g09_c05 35 +FloorGymnastics/v_FloorGymnastics_g18_c07 29 +PoleVault/v_PoleVault_g23_c03 67 +HorseRace/v_HorseRace_g11_c06 40 +IceDancing/v_IceDancing_g24_c01 43 +WritingOnBoard/v_WritingOnBoard_g15_c03 99 +BalanceBeam/v_BalanceBeam_g18_c03 4 +WallPushups/v_WallPushups_g14_c04 98 +FrontCrawl/v_FrontCrawl_g10_c03 31 +ApplyEyeMakeup/v_ApplyEyeMakeup_g18_c02 0 +RopeClimbing/v_RopeClimbing_g21_c02 74 +BlowingCandles/v_BlowingCandles_g18_c01 13 +HighJump/v_HighJump_g16_c05 39 +MilitaryParade/v_MilitaryParade_g09_c04 52 +Nunchucks/v_Nunchucks_g14_c02 55 +VolleyballSpiking/v_VolleyballSpiking_g13_c03 96 +TaiChi/v_TaiChi_g13_c03 90 +BaseballPitch/v_BaseballPitch_g13_c01 6 +PlayingDaf/v_PlayingDaf_g14_c06 59 +SkyDiving/v_SkyDiving_g21_c01 82 +PlayingCello/v_PlayingCello_g17_c03 58 +HandstandPushups/v_HandStandPushups_g11_c03 36 +BreastStroke/v_BreastStroke_g08_c02 18 +HulaHoop/v_HulaHoop_g09_c06 42 +PlayingFlute/v_PlayingFlute_g17_c03 61 +Knitting/v_Knitting_g22_c02 49 +Lunges/v_Lunges_g17_c04 51 +Drumming/v_Drumming_g15_c03 26 +PizzaTossing/v_PizzaTossing_g22_c02 57 +Skiing/v_Skiing_g13_c03 80 +HulaHoop/v_HulaHoop_g23_c06 42 +JumpRope/v_JumpRope_g18_c05 47 +PlayingDhol/v_PlayingDhol_g12_c06 60 +Bowling/v_Bowling_g09_c03 15 +PlayingSitar/v_PlayingSitar_g23_c02 64 +SoccerJuggling/v_SoccerJuggling_g22_c04 83 +BalanceBeam/v_BalanceBeam_g12_c02 4 +ApplyLipstick/v_ApplyLipstick_g10_c04 1 +PushUps/v_PushUps_g16_c04 71 +HorseRace/v_HorseRace_g19_c03 40 +SoccerJuggling/v_SoccerJuggling_g12_c01 83 +BoxingPunchingBag/v_BoxingPunchingBag_g08_c04 16 +Rowing/v_Rowing_g15_c01 75 +PizzaTossing/v_PizzaTossing_g20_c03 57 +BasketballDunk/v_BasketballDunk_g11_c03 8 +RopeClimbing/v_RopeClimbing_g25_c01 74 +PlayingViolin/v_PlayingViolin_g21_c03 66 +ThrowDiscus/v_ThrowDiscus_g08_c02 92 +Bowling/v_Bowling_g12_c02 15 +Hammering/v_Hammering_g24_c01 34 +SkateBoarding/v_SkateBoarding_g16_c01 79 +TennisSwing/v_TennisSwing_g08_c01 91 +RockClimbingIndoor/v_RockClimbingIndoor_g24_c06 73 +BalanceBeam/v_BalanceBeam_g23_c04 4 +BoxingSpeedBag/v_BoxingSpeedBag_g16_c03 17 +Skijet/v_Skijet_g24_c03 81 +UnevenBars/v_UnevenBars_g17_c02 95 +JavelinThrow/v_JavelinThrow_g17_c05 44 +BodyWeightSquats/v_BodyWeightSquats_g16_c04 14 +PlayingGuitar/v_PlayingGuitar_g18_c04 62 +Rowing/v_Rowing_g13_c01 75 +ShavingBeard/v_ShavingBeard_g24_c04 77 +Kayaking/v_Kayaking_g09_c03 48 +BenchPress/v_BenchPress_g09_c01 9 +HeadMassage/v_HeadMassage_g17_c07 38 +Kayaking/v_Kayaking_g14_c02 48 +SkyDiving/v_SkyDiving_g18_c02 82 +JugglingBalls/v_JugglingBalls_g16_c04 45 +PlayingCello/v_PlayingCello_g19_c07 58 +CuttingInKitchen/v_CuttingInKitchen_g18_c01 24 +WallPushups/v_WallPushups_g21_c03 98 +HeadMassage/v_HeadMassage_g08_c07 38 +BabyCrawling/v_BabyCrawling_g15_c04 3 +HulaHoop/v_HulaHoop_g21_c03 42 +BalanceBeam/v_BalanceBeam_g21_c02 4 +HulaHoop/v_HulaHoop_g12_c05 42 +Basketball/v_Basketball_g12_c05 7 +BasketballDunk/v_BasketballDunk_g17_c03 8 +WallPushups/v_WallPushups_g11_c03 98 +Shotput/v_Shotput_g20_c02 78 +Skijet/v_Skijet_g21_c02 81 +BodyWeightSquats/v_BodyWeightSquats_g16_c01 14 +UnevenBars/v_UnevenBars_g14_c02 95 +Skiing/v_Skiing_g10_c01 80 +SoccerPenalty/v_SoccerPenalty_g10_c02 84 +ParallelBars/v_ParallelBars_g09_c02 56 +SalsaSpin/v_SalsaSpin_g09_c03 76 +PommelHorse/v_PommelHorse_g12_c02 68 +Archery/v_Archery_g09_c06 2 +PoleVault/v_PoleVault_g19_c06 67 +ThrowDiscus/v_ThrowDiscus_g13_c01 92 +Typing/v_Typing_g23_c04 94 +GolfSwing/v_GolfSwing_g24_c01 32 +SkateBoarding/v_SkateBoarding_g18_c03 79 +PommelHorse/v_PommelHorse_g10_c02 68 +PlayingGuitar/v_PlayingGuitar_g21_c02 62 +ThrowDiscus/v_ThrowDiscus_g18_c05 92 +BoxingPunchingBag/v_BoxingPunchingBag_g16_c02 16 +PlayingDaf/v_PlayingDaf_g08_c06 59 +CleanAndJerk/v_CleanAndJerk_g12_c02 20 +Billiards/v_Billiards_g11_c04 11 +BlowingCandles/v_BlowingCandles_g20_c03 13 +BoxingPunchingBag/v_BoxingPunchingBag_g23_c02 16 +ApplyLipstick/v_ApplyLipstick_g21_c05 1 +Skiing/v_Skiing_g10_c04 80 +BalanceBeam/v_BalanceBeam_g08_c02 4 +Typing/v_Typing_g12_c05 94 +WalkingWithDog/v_WalkingWithDog_g13_c03 97 +CliffDiving/v_CliffDiving_g11_c01 21 +JavelinThrow/v_JavelinThrow_g14_c04 44 +SoccerPenalty/v_SoccerPenalty_g12_c07 84 +Archery/v_Archery_g13_c02 2 +ApplyEyeMakeup/v_ApplyEyeMakeup_g20_c03 0 +Biking/v_Biking_g16_c03 10 +Typing/v_Typing_g15_c04 94 +Mixing/v_Mixing_g11_c06 53 +BlowingCandles/v_BlowingCandles_g25_c04 13 +SoccerJuggling/v_SoccerJuggling_g25_c03 83 +WritingOnBoard/v_WritingOnBoard_g23_c03 99 +PushUps/v_PushUps_g24_c02 71 +TableTennisShot/v_TableTennisShot_g14_c03 89 +BaseballPitch/v_BaseballPitch_g21_c01 6 +Skijet/v_Skijet_g11_c02 81 +SkateBoarding/v_SkateBoarding_g12_c02 79 +Swing/v_Swing_g22_c01 88 +Knitting/v_Knitting_g17_c04 49 +Kayaking/v_Kayaking_g18_c02 48 +HulaHoop/v_HulaHoop_g20_c02 42 +IceDancing/v_IceDancing_g12_c07 43 +Punch/v_Punch_g21_c06 70 +Lunges/v_Lunges_g18_c03 51 +CricketShot/v_CricketShot_g17_c02 23 +HulaHoop/v_HulaHoop_g25_c02 42 +Mixing/v_Mixing_g12_c07 53 +BlowDryHair/v_BlowDryHair_g08_c02 12 +ThrowDiscus/v_ThrowDiscus_g15_c04 92 +HulaHoop/v_HulaHoop_g20_c04 42 +SalsaSpin/v_SalsaSpin_g24_c05 76 +PlayingGuitar/v_PlayingGuitar_g12_c06 62 +JugglingBalls/v_JugglingBalls_g12_c02 45 +BoxingPunchingBag/v_BoxingPunchingBag_g25_c02 16 +Punch/v_Punch_g16_c02 70 +CricketShot/v_CricketShot_g09_c01 23 +Bowling/v_Bowling_g10_c05 15 +SalsaSpin/v_SalsaSpin_g09_c01 76 +SalsaSpin/v_SalsaSpin_g08_c04 76 +HeadMassage/v_HeadMassage_g14_c07 38 +ThrowDiscus/v_ThrowDiscus_g09_c05 92 +HorseRiding/v_HorseRiding_g19_c06 41 +HulaHoop/v_HulaHoop_g12_c06 42 +TennisSwing/v_TennisSwing_g25_c05 91 +ParallelBars/v_ParallelBars_g11_c04 56 +Fencing/v_Fencing_g19_c04 27 +MilitaryParade/v_MilitaryParade_g17_c05 52 +Drumming/v_Drumming_g25_c01 26 +CleanAndJerk/v_CleanAndJerk_g20_c02 20 +PommelHorse/v_PommelHorse_g18_c03 68 +SkyDiving/v_SkyDiving_g22_c04 82 +SkyDiving/v_SkyDiving_g15_c03 82 +BlowDryHair/v_BlowDryHair_g10_c03 12 +TrampolineJumping/v_TrampolineJumping_g10_c01 93 +Archery/v_Archery_g11_c07 2 +TrampolineJumping/v_TrampolineJumping_g08_c04 93 +HandstandWalking/v_HandstandWalking_g18_c01 37 +TennisSwing/v_TennisSwing_g12_c01 91 +PommelHorse/v_PommelHorse_g17_c02 68 +JugglingBalls/v_JugglingBalls_g21_c04 45 +CliffDiving/v_CliffDiving_g19_c06 21 +Archery/v_Archery_g13_c06 2 +BrushingTeeth/v_BrushingTeeth_g18_c07 19 +Skijet/v_Skijet_g15_c03 81 +TableTennisShot/v_TableTennisShot_g18_c01 89 +Mixing/v_Mixing_g16_c04 53 +PullUps/v_PullUps_g18_c01 69 +BenchPress/v_BenchPress_g10_c04 9 +BoxingSpeedBag/v_BoxingSpeedBag_g11_c02 17 +FrontCrawl/v_FrontCrawl_g19_c04 31 +WritingOnBoard/v_WritingOnBoard_g11_c04 99 +GolfSwing/v_GolfSwing_g15_c04 32 +PlayingGuitar/v_PlayingGuitar_g15_c07 62 +FieldHockeyPenalty/v_FieldHockeyPenalty_g25_c02 28 +CliffDiving/v_CliffDiving_g20_c06 21 +JugglingBalls/v_JugglingBalls_g25_c01 45 +FrontCrawl/v_FrontCrawl_g19_c02 31 +VolleyballSpiking/v_VolleyballSpiking_g08_c01 96 +Mixing/v_Mixing_g10_c05 53 +ThrowDiscus/v_ThrowDiscus_g18_c07 92 +HulaHoop/v_HulaHoop_g23_c04 42 +PoleVault/v_PoleVault_g09_c01 67 +Archery/v_Archery_g09_c04 2 +FrisbeeCatch/v_FrisbeeCatch_g10_c05 30 +ParallelBars/v_ParallelBars_g24_c04 56 +BlowDryHair/v_BlowDryHair_g24_c04 12 +PlayingViolin/v_PlayingViolin_g08_c03 66 +PlayingFlute/v_PlayingFlute_g21_c04 61 +HeadMassage/v_HeadMassage_g12_c06 38 +ApplyLipstick/v_ApplyLipstick_g15_c03 1 +CricketShot/v_CricketShot_g15_c02 23 +BreastStroke/v_BreastStroke_g21_c03 18 +HulaHoop/v_HulaHoop_g16_c01 42 +Swing/v_Swing_g20_c05 88 +Knitting/v_Knitting_g10_c03 49 +PizzaTossing/v_PizzaTossing_g25_c01 57 +PlayingViolin/v_PlayingViolin_g13_c03 66 +PushUps/v_PushUps_g10_c04 71 +LongJump/v_LongJump_g11_c06 50 +JavelinThrow/v_JavelinThrow_g15_c01 44 +SkateBoarding/v_SkateBoarding_g24_c01 79 +TennisSwing/v_TennisSwing_g25_c04 91 +HorseRiding/v_HorseRiding_g23_c05 41 +YoYo/v_YoYo_g19_c02 100 +Skijet/v_Skijet_g10_c01 81 +PlayingSitar/v_PlayingSitar_g12_c03 64 +CricketBowling/v_CricketBowling_g11_c04 22 +BenchPress/v_BenchPress_g20_c02 9 +FrisbeeCatch/v_FrisbeeCatch_g14_c02 30 +LongJump/v_LongJump_g20_c04 50 +RopeClimbing/v_RopeClimbing_g19_c02 74 +Rowing/v_Rowing_g13_c06 75 +ShavingBeard/v_ShavingBeard_g11_c03 77 +ApplyLipstick/v_ApplyLipstick_g16_c05 1 +BabyCrawling/v_BabyCrawling_g23_c01 3 +PlayingGuitar/v_PlayingGuitar_g20_c06 62 +HandstandPushups/v_HandStandPushups_g24_c02 36 +Swing/v_Swing_g16_c04 88 +Swing/v_Swing_g18_c02 88 +Lunges/v_Lunges_g15_c04 51 +BenchPress/v_BenchPress_g24_c06 9 +PlayingSitar/v_PlayingSitar_g15_c03 64 +FrontCrawl/v_FrontCrawl_g11_c07 31 +BandMarching/v_BandMarching_g08_c07 5 +Mixing/v_Mixing_g15_c06 53 +Rafting/v_Rafting_g12_c01 72 +FloorGymnastics/v_FloorGymnastics_g17_c05 29 +SoccerPenalty/v_SoccerPenalty_g08_c02 84 +CliffDiving/v_CliffDiving_g20_c04 21 +PommelHorse/v_PommelHorse_g22_c03 68 +BalanceBeam/v_BalanceBeam_g24_c02 4 +RopeClimbing/v_RopeClimbing_g17_c03 74 +SumoWrestling/v_SumoWrestling_g08_c06 86 +CleanAndJerk/v_CleanAndJerk_g11_c03 20 +ParallelBars/v_ParallelBars_g08_c03 56 +PlayingDhol/v_PlayingDhol_g11_c01 60 +Typing/v_Typing_g20_c04 94 +ApplyEyeMakeup/v_ApplyEyeMakeup_g17_c04 0 +Shotput/v_Shotput_g12_c03 78 +Basketball/v_Basketball_g22_c04 7 +UnevenBars/v_UnevenBars_g23_c03 95 +Billiards/v_Billiards_g17_c04 11 +Skiing/v_Skiing_g10_c03 80 +YoYo/v_YoYo_g21_c04 100 +Drumming/v_Drumming_g24_c05 26 +BenchPress/v_BenchPress_g14_c01 9 +SoccerPenalty/v_SoccerPenalty_g18_c05 84 +ShavingBeard/v_ShavingBeard_g09_c07 77 +Hammering/v_Hammering_g15_c02 34 +TennisSwing/v_TennisSwing_g22_c06 91 +PommelHorse/v_PommelHorse_g22_c05 68 +Haircut/v_Haircut_g16_c04 33 +Archery/v_Archery_g13_c03 2 +BalanceBeam/v_BalanceBeam_g13_c05 4 +TrampolineJumping/v_TrampolineJumping_g21_c04 93 +PushUps/v_PushUps_g08_c02 71 +CricketShot/v_CricketShot_g16_c05 23 +BoxingPunchingBag/v_BoxingPunchingBag_g14_c05 16 +JugglingBalls/v_JugglingBalls_g09_c05 45 +SkateBoarding/v_SkateBoarding_g10_c04 79 +BaseballPitch/v_BaseballPitch_g13_c06 6 +Typing/v_Typing_g23_c02 94 +VolleyballSpiking/v_VolleyballSpiking_g18_c04 96 +FieldHockeyPenalty/v_FieldHockeyPenalty_g18_c04 28 +CliffDiving/v_CliffDiving_g22_c05 21 +JavelinThrow/v_JavelinThrow_g14_c03 44 +Knitting/v_Knitting_g20_c01 49 +CuttingInKitchen/v_CuttingInKitchen_g15_c01 24 +Kayaking/v_Kayaking_g17_c01 48 +Rowing/v_Rowing_g10_c03 75 +TrampolineJumping/v_TrampolineJumping_g09_c06 93 +BoxingSpeedBag/v_BoxingSpeedBag_g17_c05 17 +Basketball/v_Basketball_g16_c04 7 +WalkingWithDog/v_WalkingWithDog_g11_c02 97 +PoleVault/v_PoleVault_g14_c05 67 +PlayingGuitar/v_PlayingGuitar_g08_c01 62 +TennisSwing/v_TennisSwing_g17_c01 91 +BalanceBeam/v_BalanceBeam_g10_c04 4 +HandstandPushups/v_HandStandPushups_g16_c01 36 +Punch/v_Punch_g23_c04 70 +PlayingDhol/v_PlayingDhol_g22_c02 60 +WallPushups/v_WallPushups_g23_c02 98 +SoccerPenalty/v_SoccerPenalty_g11_c01 84 +Swing/v_Swing_g19_c03 88 +CuttingInKitchen/v_CuttingInKitchen_g10_c06 24 +HorseRiding/v_HorseRiding_g19_c01 41 +Knitting/v_Knitting_g24_c02 49 +PlayingViolin/v_PlayingViolin_g25_c02 66 +PlayingTabla/v_PlayingTabla_g18_c02 65 +PlayingFlute/v_PlayingFlute_g14_c03 61 +SkateBoarding/v_SkateBoarding_g21_c04 79 +FloorGymnastics/v_FloorGymnastics_g08_c03 29 +ShavingBeard/v_ShavingBeard_g11_c04 77 +ApplyLipstick/v_ApplyLipstick_g10_c01 1 +Typing/v_Typing_g08_c07 94 +BoxingSpeedBag/v_BoxingSpeedBag_g18_c03 17 +BrushingTeeth/v_BrushingTeeth_g17_c03 19 +WalkingWithDog/v_WalkingWithDog_g14_c04 97 +Kayaking/v_Kayaking_g22_c07 48 +IceDancing/v_IceDancing_g10_c05 43 +Diving/v_Diving_g24_c04 25 +PoleVault/v_PoleVault_g08_c02 67 +ApplyLipstick/v_ApplyLipstick_g12_c02 1 +Kayaking/v_Kayaking_g21_c07 48 +BlowDryHair/v_BlowDryHair_g20_c03 12 +PlayingCello/v_PlayingCello_g18_c07 58 +WallPushups/v_WallPushups_g10_c04 98 +BasketballDunk/v_BasketballDunk_g11_c04 8 +JavelinThrow/v_JavelinThrow_g21_c02 44 +PlayingTabla/v_PlayingTabla_g21_c04 65 +TaiChi/v_TaiChi_g15_c01 90 +Skiing/v_Skiing_g09_c02 80 +PlayingPiano/v_PlayingPiano_g18_c04 63 +CricketBowling/v_CricketBowling_g09_c03 22 +SkateBoarding/v_SkateBoarding_g10_c07 79 +PlayingFlute/v_PlayingFlute_g17_c01 61 +WalkingWithDog/v_WalkingWithDog_g21_c02 97 +BreastStroke/v_BreastStroke_g13_c01 18 +MoppingFloor/v_MoppingFloor_g13_c04 54 +FrontCrawl/v_FrontCrawl_g09_c02 31 +PlayingCello/v_PlayingCello_g24_c06 58 +WalkingWithDog/v_WalkingWithDog_g17_c05 97 +Swing/v_Swing_g13_c04 88 +BlowDryHair/v_BlowDryHair_g17_c01 12 +Billiards/v_Billiards_g12_c07 11 +PlayingViolin/v_PlayingViolin_g18_c02 66 +PommelHorse/v_PommelHorse_g19_c02 68 +StillRings/v_StillRings_g17_c04 85 +Archery/v_Archery_g18_c01 2 +PommelHorse/v_PommelHorse_g25_c04 68 +FieldHockeyPenalty/v_FieldHockeyPenalty_g15_c03 28 +SoccerPenalty/v_SoccerPenalty_g21_c02 84 +TennisSwing/v_TennisSwing_g18_c01 91 +BlowDryHair/v_BlowDryHair_g11_c01 12 +VolleyballSpiking/v_VolleyballSpiking_g19_c04 96 +HorseRiding/v_HorseRiding_g18_c02 41 +Basketball/v_Basketball_g24_c02 7 +RockClimbingIndoor/v_RockClimbingIndoor_g12_c02 73 +BoxingSpeedBag/v_BoxingSpeedBag_g13_c05 17 +RockClimbingIndoor/v_RockClimbingIndoor_g08_c02 73 +BrushingTeeth/v_BrushingTeeth_g24_c06 19 +Kayaking/v_Kayaking_g22_c02 48 +PlayingFlute/v_PlayingFlute_g13_c05 61 +HorseRiding/v_HorseRiding_g08_c02 41 +PlayingCello/v_PlayingCello_g15_c07 58 +RopeClimbing/v_RopeClimbing_g17_c05 74 +HandstandWalking/v_HandstandWalking_g16_c01 37 +Kayaking/v_Kayaking_g15_c05 48 +LongJump/v_LongJump_g21_c04 50 +Shotput/v_Shotput_g16_c05 78 +ShavingBeard/v_ShavingBeard_g17_c02 77 +Knitting/v_Knitting_g14_c04 49 +VolleyballSpiking/v_VolleyballSpiking_g15_c04 96 +SumoWrestling/v_SumoWrestling_g24_c03 86 +SoccerPenalty/v_SoccerPenalty_g08_c06 84 +SumoWrestling/v_SumoWrestling_g25_c03 86 +CleanAndJerk/v_CleanAndJerk_g23_c03 20 +HeadMassage/v_HeadMassage_g23_c04 38 +PlayingDaf/v_PlayingDaf_g14_c02 59 +FloorGymnastics/v_FloorGymnastics_g23_c03 29 +Skiing/v_Skiing_g17_c04 80 +BreastStroke/v_BreastStroke_g23_c04 18 +Surfing/v_Surfing_g25_c04 87 +BasketballDunk/v_BasketballDunk_g18_c05 8 +Archery/v_Archery_g11_c03 2 +ApplyLipstick/v_ApplyLipstick_g17_c02 1 +BodyWeightSquats/v_BodyWeightSquats_g21_c03 14 +TennisSwing/v_TennisSwing_g10_c04 91 +HeadMassage/v_HeadMassage_g13_c01 38 +BoxingSpeedBag/v_BoxingSpeedBag_g22_c04 17 +SkateBoarding/v_SkateBoarding_g19_c01 79 +JumpRope/v_JumpRope_g18_c01 47 +CleanAndJerk/v_CleanAndJerk_g16_c02 20 +Haircut/v_Haircut_g12_c02 33 +JavelinThrow/v_JavelinThrow_g20_c01 44 +Bowling/v_Bowling_g23_c05 15 +PoleVault/v_PoleVault_g24_c07 67 +Swing/v_Swing_g18_c03 88 +TableTennisShot/v_TableTennisShot_g18_c03 89 +StillRings/v_StillRings_g14_c04 85 +PlayingCello/v_PlayingCello_g10_c06 58 +SoccerJuggling/v_SoccerJuggling_g16_c07 83 +RopeClimbing/v_RopeClimbing_g25_c04 74 +SoccerJuggling/v_SoccerJuggling_g16_c01 83 +MilitaryParade/v_MilitaryParade_g12_c03 52 +BlowDryHair/v_BlowDryHair_g22_c02 12 +VolleyballSpiking/v_VolleyballSpiking_g18_c01 96 +Skiing/v_Skiing_g12_c03 80 +ShavingBeard/v_ShavingBeard_g20_c02 77 +HighJump/v_HighJump_g20_c04 39 +PlayingSitar/v_PlayingSitar_g21_c02 64 +CuttingInKitchen/v_CuttingInKitchen_g12_c02 24 +CliffDiving/v_CliffDiving_g12_c03 21 +FrontCrawl/v_FrontCrawl_g17_c05 31 +ShavingBeard/v_ShavingBeard_g14_c02 77 +TaiChi/v_TaiChi_g25_c02 90 +Haircut/v_Haircut_g08_c03 33 +TennisSwing/v_TennisSwing_g09_c04 91 +ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c03 0 +BalanceBeam/v_BalanceBeam_g19_c03 4 +Hammering/v_Hammering_g25_c03 34 +SkyDiving/v_SkyDiving_g20_c01 82 +SkyDiving/v_SkyDiving_g13_c02 82 +IceDancing/v_IceDancing_g22_c07 43 +TennisSwing/v_TennisSwing_g14_c06 91 +Diving/v_Diving_g18_c04 25 +Surfing/v_Surfing_g20_c02 87 +PlayingViolin/v_PlayingViolin_g18_c04 66 +PlayingSitar/v_PlayingSitar_g16_c07 64 +FrontCrawl/v_FrontCrawl_g15_c01 31 +WritingOnBoard/v_WritingOnBoard_g22_c02 99 +WallPushups/v_WallPushups_g18_c03 98 +ApplyEyeMakeup/v_ApplyEyeMakeup_g25_c04 0 +JugglingBalls/v_JugglingBalls_g19_c02 45 +SumoWrestling/v_SumoWrestling_g23_c02 86 +BabyCrawling/v_BabyCrawling_g08_c02 3 +Basketball/v_Basketball_g22_c01 7 +HorseRace/v_HorseRace_g14_c04 40 +Knitting/v_Knitting_g20_c04 49 +BaseballPitch/v_BaseballPitch_g15_c04 6 +Surfing/v_Surfing_g11_c01 87 +BoxingPunchingBag/v_BoxingPunchingBag_g10_c01 16 +PlayingFlute/v_PlayingFlute_g16_c02 61 +PlayingTabla/v_PlayingTabla_g17_c04 65 +Archery/v_Archery_g16_c05 2 +CuttingInKitchen/v_CuttingInKitchen_g13_c02 24 +ShavingBeard/v_ShavingBeard_g25_c02 77 +PlayingDaf/v_PlayingDaf_g23_c02 59 +Hammering/v_Hammering_g16_c04 34 +GolfSwing/v_GolfSwing_g17_c03 32 +UnevenBars/v_UnevenBars_g24_c04 95 +Kayaking/v_Kayaking_g16_c07 48 +JugglingBalls/v_JugglingBalls_g10_c01 45 +HeadMassage/v_HeadMassage_g25_c07 38 +PoleVault/v_PoleVault_g20_c03 67 +PlayingSitar/v_PlayingSitar_g09_c01 64 +WalkingWithDog/v_WalkingWithDog_g17_c02 97 +PushUps/v_PushUps_g16_c01 71 +PlayingGuitar/v_PlayingGuitar_g21_c05 62 +GolfSwing/v_GolfSwing_g13_c01 32 +ShavingBeard/v_ShavingBeard_g22_c07 77 +BaseballPitch/v_BaseballPitch_g12_c02 6 +Knitting/v_Knitting_g25_c02 49 +ApplyLipstick/v_ApplyLipstick_g22_c01 1 +MilitaryParade/v_MilitaryParade_g20_c03 52 +MoppingFloor/v_MoppingFloor_g08_c04 54 +TableTennisShot/v_TableTennisShot_g20_c01 89 +SoccerJuggling/v_SoccerJuggling_g09_c02 83 +Hammering/v_Hammering_g21_c06 34 +WritingOnBoard/v_WritingOnBoard_g13_c03 99 +BalanceBeam/v_BalanceBeam_g10_c02 4 +Surfing/v_Surfing_g15_c04 87 +SoccerPenalty/v_SoccerPenalty_g14_c03 84 +BabyCrawling/v_BabyCrawling_g14_c03 3 +HeadMassage/v_HeadMassage_g20_c05 38 +BrushingTeeth/v_BrushingTeeth_g14_c02 19 +HighJump/v_HighJump_g18_c04 39 +PlayingFlute/v_PlayingFlute_g19_c06 61 +BoxingPunchingBag/v_BoxingPunchingBag_g13_c04 16 +HandstandWalking/v_HandstandWalking_g08_c03 37 +Rowing/v_Rowing_g21_c05 75 +Mixing/v_Mixing_g08_c05 53 +FrisbeeCatch/v_FrisbeeCatch_g19_c03 30 +Nunchucks/v_Nunchucks_g11_c05 55 +CuttingInKitchen/v_CuttingInKitchen_g15_c03 24 +SoccerPenalty/v_SoccerPenalty_g13_c03 84 +PlayingFlute/v_PlayingFlute_g09_c06 61 +ApplyLipstick/v_ApplyLipstick_g19_c03 1 +FrontCrawl/v_FrontCrawl_g20_c03 31 +PlayingPiano/v_PlayingPiano_g25_c02 63 +Typing/v_Typing_g20_c01 94 +Knitting/v_Knitting_g20_c03 49 +Bowling/v_Bowling_g13_c04 15 +WalkingWithDog/v_WalkingWithDog_g24_c01 97 +LongJump/v_LongJump_g25_c02 50 +SoccerJuggling/v_SoccerJuggling_g24_c01 83 +PlayingDhol/v_PlayingDhol_g15_c02 60 +BlowDryHair/v_BlowDryHair_g09_c04 12 +Mixing/v_Mixing_g21_c02 53 +Skiing/v_Skiing_g18_c04 80 +Haircut/v_Haircut_g12_c04 33 +Basketball/v_Basketball_g14_c03 7 +IceDancing/v_IceDancing_g11_c03 43 +HulaHoop/v_HulaHoop_g20_c05 42 +BandMarching/v_BandMarching_g14_c03 5 +WallPushups/v_WallPushups_g11_c06 98 +FieldHockeyPenalty/v_FieldHockeyPenalty_g08_c03 28 +VolleyballSpiking/v_VolleyballSpiking_g21_c04 96 +Biking/v_Biking_g24_c06 10 +Typing/v_Typing_g25_c01 94 +HandstandWalking/v_HandstandWalking_g21_c01 37 +Lunges/v_Lunges_g09_c04 51 +RopeClimbing/v_RopeClimbing_g12_c02 74 +VolleyballSpiking/v_VolleyballSpiking_g14_c03 96 +JumpRope/v_JumpRope_g08_c05 47 +SkyDiving/v_SkyDiving_g10_c04 82 +PlayingSitar/v_PlayingSitar_g08_c02 64 +Mixing/v_Mixing_g10_c04 53 +GolfSwing/v_GolfSwing_g23_c03 32 +FieldHockeyPenalty/v_FieldHockeyPenalty_g24_c05 28 +Biking/v_Biking_g18_c03 10 +BreastStroke/v_BreastStroke_g16_c01 18 +TableTennisShot/v_TableTennisShot_g16_c06 89 +Hammering/v_Hammering_g10_c04 34 +RopeClimbing/v_RopeClimbing_g08_c04 74 +PlayingSitar/v_PlayingSitar_g16_c01 64 +Billiards/v_Billiards_g10_c03 11 +HorseRiding/v_HorseRiding_g13_c03 41 +PommelHorse/v_PommelHorse_g13_c06 68 +PlayingViolin/v_PlayingViolin_g23_c03 66 +BoxingPunchingBag/v_BoxingPunchingBag_g21_c05 16 +Rowing/v_Rowing_g19_c04 75 +BabyCrawling/v_BabyCrawling_g14_c02 3 +BaseballPitch/v_BaseballPitch_g15_c06 6 +PlayingSitar/v_PlayingSitar_g25_c02 64 +Haircut/v_Haircut_g24_c05 33 +SoccerJuggling/v_SoccerJuggling_g09_c03 83 +GolfSwing/v_GolfSwing_g22_c01 32 +BabyCrawling/v_BabyCrawling_g17_c01 3 +BabyCrawling/v_BabyCrawling_g12_c01 3 +BlowingCandles/v_BlowingCandles_g17_c02 13 +RockClimbingIndoor/v_RockClimbingIndoor_g25_c06 73 +CricketBowling/v_CricketBowling_g11_c05 22 +Skijet/v_Skijet_g16_c02 81 +JumpingJack/v_JumpingJack_g13_c02 46 +IceDancing/v_IceDancing_g23_c04 43 +BoxingSpeedBag/v_BoxingSpeedBag_g11_c01 17 +FloorGymnastics/v_FloorGymnastics_g23_c01 29 +CricketBowling/v_CricketBowling_g22_c05 22 +Punch/v_Punch_g21_c05 70 +Haircut/v_Haircut_g11_c05 33 +Kayaking/v_Kayaking_g11_c06 48 +SumoWrestling/v_SumoWrestling_g13_c05 86 +VolleyballSpiking/v_VolleyballSpiking_g14_c01 96 +PlayingViolin/v_PlayingViolin_g18_c03 66 +BasketballDunk/v_BasketballDunk_g19_c03 8 +BreastStroke/v_BreastStroke_g16_c04 18 +BreastStroke/v_BreastStroke_g17_c04 18 +Punch/v_Punch_g09_c04 70 +TrampolineJumping/v_TrampolineJumping_g23_c03 93 +Knitting/v_Knitting_g11_c02 49 +MilitaryParade/v_MilitaryParade_g11_c03 52 +BodyWeightSquats/v_BodyWeightSquats_g17_c02 14 +BalanceBeam/v_BalanceBeam_g25_c01 4 +CleanAndJerk/v_CleanAndJerk_g08_c01 20 +IceDancing/v_IceDancing_g10_c04 43 +BlowingCandles/v_BlowingCandles_g18_c03 13 +BabyCrawling/v_BabyCrawling_g09_c06 3 +PlayingViolin/v_PlayingViolin_g25_c04 66 +IceDancing/v_IceDancing_g08_c02 43 +Mixing/v_Mixing_g11_c07 53 +BoxingSpeedBag/v_BoxingSpeedBag_g17_c07 17 +Billiards/v_Billiards_g09_c02 11 +CleanAndJerk/v_CleanAndJerk_g13_c03 20 +Surfing/v_Surfing_g08_c06 87 +LongJump/v_LongJump_g12_c06 50 +ApplyEyeMakeup/v_ApplyEyeMakeup_g24_c02 0 +Rafting/v_Rafting_g16_c01 72 +Mixing/v_Mixing_g24_c04 53 +Biking/v_Biking_g25_c01 10 +Mixing/v_Mixing_g24_c03 53 +WritingOnBoard/v_WritingOnBoard_g22_c07 99 +JumpRope/v_JumpRope_g24_c07 47 +PlayingGuitar/v_PlayingGuitar_g10_c02 62 +PlayingCello/v_PlayingCello_g11_c07 58 +TennisSwing/v_TennisSwing_g11_c05 91 +Haircut/v_Haircut_g22_c01 33 +BaseballPitch/v_BaseballPitch_g23_c03 6 +BenchPress/v_BenchPress_g22_c07 9 +Shotput/v_Shotput_g19_c04 78 +Rafting/v_Rafting_g08_c04 72 +PlayingSitar/v_PlayingSitar_g17_c06 64 +ApplyEyeMakeup/v_ApplyEyeMakeup_g16_c01 0 +ApplyLipstick/v_ApplyLipstick_g20_c03 1 +JugglingBalls/v_JugglingBalls_g24_c01 45 +JumpingJack/v_JumpingJack_g08_c02 46 +HammerThrow/v_HammerThrow_g23_c01 35 +PlayingFlute/v_PlayingFlute_g15_c07 61 +PlayingSitar/v_PlayingSitar_g16_c02 64 +MoppingFloor/v_MoppingFloor_g11_c04 54 +ShavingBeard/v_ShavingBeard_g23_c04 77 +Shotput/v_Shotput_g08_c05 78 +Bowling/v_Bowling_g17_c07 15 +PlayingCello/v_PlayingCello_g12_c07 58 +SalsaSpin/v_SalsaSpin_g14_c01 76 +SumoWrestling/v_SumoWrestling_g08_c01 86 +FrontCrawl/v_FrontCrawl_g10_c01 31 +FrontCrawl/v_FrontCrawl_g23_c01 31 +Skiing/v_Skiing_g23_c02 80 +Mixing/v_Mixing_g12_c01 53 +Nunchucks/v_Nunchucks_g13_c06 55 +PlayingSitar/v_PlayingSitar_g23_c03 64 +PlayingViolin/v_PlayingViolin_g12_c03 66 +JumpRope/v_JumpRope_g18_c02 47 +SoccerJuggling/v_SoccerJuggling_g19_c06 83 +SoccerJuggling/v_SoccerJuggling_g09_c06 83 +PlayingGuitar/v_PlayingGuitar_g08_c03 62 +SalsaSpin/v_SalsaSpin_g17_c01 76 +Rafting/v_Rafting_g17_c04 72 +WritingOnBoard/v_WritingOnBoard_g17_c05 99 +Basketball/v_Basketball_g17_c04 7 +BenchPress/v_BenchPress_g22_c01 9 +MilitaryParade/v_MilitaryParade_g19_c04 52 +ThrowDiscus/v_ThrowDiscus_g14_c02 92 +LongJump/v_LongJump_g18_c03 50 +RockClimbingIndoor/v_RockClimbingIndoor_g21_c05 73 +PizzaTossing/v_PizzaTossing_g09_c03 57 +WalkingWithDog/v_WalkingWithDog_g16_c04 97 +BoxingPunchingBag/v_BoxingPunchingBag_g18_c06 16 +Typing/v_Typing_g08_c05 94 +JugglingBalls/v_JugglingBalls_g24_c02 45 +ParallelBars/v_ParallelBars_g16_c01 56 +IceDancing/v_IceDancing_g14_c06 43 +MoppingFloor/v_MoppingFloor_g16_c03 54 +BandMarching/v_BandMarching_g10_c07 5 +FrisbeeCatch/v_FrisbeeCatch_g20_c01 30 +PlayingDhol/v_PlayingDhol_g22_c05 60 +SoccerPenalty/v_SoccerPenalty_g11_c03 84 +Hammering/v_Hammering_g17_c05 34 +PlayingSitar/v_PlayingSitar_g21_c05 64 +PlayingGuitar/v_PlayingGuitar_g11_c02 62 +UnevenBars/v_UnevenBars_g12_c03 95 +RopeClimbing/v_RopeClimbing_g14_c02 74 +HorseRiding/v_HorseRiding_g12_c06 41 +TableTennisShot/v_TableTennisShot_g23_c03 89 +Billiards/v_Billiards_g08_c01 11 +BoxingSpeedBag/v_BoxingSpeedBag_g21_c03 17 +FrontCrawl/v_FrontCrawl_g22_c02 31 +PlayingGuitar/v_PlayingGuitar_g11_c04 62 +Shotput/v_Shotput_g08_c01 78 +RopeClimbing/v_RopeClimbing_g24_c03 74 +TableTennisShot/v_TableTennisShot_g25_c01 89 +PlayingPiano/v_PlayingPiano_g18_c03 63 +CliffDiving/v_CliffDiving_g19_c01 21 +Rowing/v_Rowing_g08_c02 75 +Bowling/v_Bowling_g09_c06 15 +YoYo/v_YoYo_g24_c03 100 +Skijet/v_Skijet_g09_c03 81 +PlayingDaf/v_PlayingDaf_g25_c05 59 +Hammering/v_Hammering_g10_c06 34 +CuttingInKitchen/v_CuttingInKitchen_g20_c02 24 +WalkingWithDog/v_WalkingWithDog_g13_c02 97 +Rafting/v_Rafting_g22_c02 72 +HammerThrow/v_HammerThrow_g14_c02 35 +JumpingJack/v_JumpingJack_g19_c04 46 +BoxingPunchingBag/v_BoxingPunchingBag_g19_c07 16 +ParallelBars/v_ParallelBars_g20_c02 56 +PizzaTossing/v_PizzaTossing_g23_c05 57 +Rowing/v_Rowing_g23_c04 75 +ApplyEyeMakeup/v_ApplyEyeMakeup_g20_c01 0 +Drumming/v_Drumming_g12_c04 26 +Lunges/v_Lunges_g13_c02 51 +UnevenBars/v_UnevenBars_g18_c02 95 +SkyDiving/v_SkyDiving_g19_c04 82 +CliffDiving/v_CliffDiving_g18_c02 21 +BasketballDunk/v_BasketballDunk_g16_c03 8 +MoppingFloor/v_MoppingFloor_g09_c04 54 +TennisSwing/v_TennisSwing_g18_c05 91 +BlowDryHair/v_BlowDryHair_g18_c03 12 +ShavingBeard/v_ShavingBeard_g08_c06 77 +BoxingPunchingBag/v_BoxingPunchingBag_g08_c07 16 +SoccerPenalty/v_SoccerPenalty_g14_c04 84 +HandstandPushups/v_HandStandPushups_g18_c06 36 +FrontCrawl/v_FrontCrawl_g25_c03 31 +Archery/v_Archery_g24_c04 2 +TennisSwing/v_TennisSwing_g17_c02 91 +Skijet/v_Skijet_g14_c03 81 +Knitting/v_Knitting_g15_c02 49 +HeadMassage/v_HeadMassage_g22_c02 38 +BoxingSpeedBag/v_BoxingSpeedBag_g11_c06 17 +FrontCrawl/v_FrontCrawl_g14_c02 31 +FieldHockeyPenalty/v_FieldHockeyPenalty_g18_c03 28 +RockClimbingIndoor/v_RockClimbingIndoor_g12_c06 73 +ParallelBars/v_ParallelBars_g21_c02 56 +Swing/v_Swing_g23_c01 88 +Drumming/v_Drumming_g25_c02 26 +PlayingCello/v_PlayingCello_g22_c05 58 +UnevenBars/v_UnevenBars_g22_c03 95 +Skiing/v_Skiing_g12_c01 80 +BrushingTeeth/v_BrushingTeeth_g18_c04 19 +FieldHockeyPenalty/v_FieldHockeyPenalty_g12_c03 28 +JavelinThrow/v_JavelinThrow_g17_c04 44 +Kayaking/v_Kayaking_g09_c01 48 +BoxingPunchingBag/v_BoxingPunchingBag_g13_c03 16 +VolleyballSpiking/v_VolleyballSpiking_g25_c03 96 +CliffDiving/v_CliffDiving_g24_c02 21 +Fencing/v_Fencing_g24_c04 27 +CricketShot/v_CricketShot_g12_c03 23 +RockClimbingIndoor/v_RockClimbingIndoor_g17_c01 73 +StillRings/v_StillRings_g08_c01 85 +HandstandPushups/v_HandStandPushups_g10_c02 36 +ShavingBeard/v_ShavingBeard_g13_c04 77 +JavelinThrow/v_JavelinThrow_g22_c01 44 +SkyDiving/v_SkyDiving_g21_c04 82 +SalsaSpin/v_SalsaSpin_g21_c03 76 +PlayingGuitar/v_PlayingGuitar_g18_c06 62 +Skiing/v_Skiing_g21_c05 80 +PlayingDaf/v_PlayingDaf_g19_c03 59 +JumpRope/v_JumpRope_g24_c02 47 +FloorGymnastics/v_FloorGymnastics_g11_c01 29 +JumpRope/v_JumpRope_g12_c01 47 +Fencing/v_Fencing_g19_c02 27 +Knitting/v_Knitting_g16_c01 49 +CricketBowling/v_CricketBowling_g14_c03 22 +YoYo/v_YoYo_g16_c01 100 +HulaHoop/v_HulaHoop_g19_c04 42 +MoppingFloor/v_MoppingFloor_g08_c02 54 +ApplyLipstick/v_ApplyLipstick_g17_c05 1 +Haircut/v_Haircut_g12_c03 33 +HammerThrow/v_HammerThrow_g18_c03 35 +Drumming/v_Drumming_g22_c04 26 +Diving/v_Diving_g23_c05 25 +CliffDiving/v_CliffDiving_g24_c05 21 +SumoWrestling/v_SumoWrestling_g25_c02 86 +FrisbeeCatch/v_FrisbeeCatch_g16_c04 30 +FrisbeeCatch/v_FrisbeeCatch_g09_c05 30 +JugglingBalls/v_JugglingBalls_g15_c01 45 +TrampolineJumping/v_TrampolineJumping_g20_c05 93 +HulaHoop/v_HulaHoop_g12_c02 42 +PommelHorse/v_PommelHorse_g20_c01 68 +Bowling/v_Bowling_g23_c01 15 +FrontCrawl/v_FrontCrawl_g23_c03 31 +HulaHoop/v_HulaHoop_g13_c06 42 +SkyDiving/v_SkyDiving_g11_c01 82 +BrushingTeeth/v_BrushingTeeth_g23_c01 19 +HeadMassage/v_HeadMassage_g09_c04 38 +StillRings/v_StillRings_g20_c03 85 +BaseballPitch/v_BaseballPitch_g11_c05 6 +Mixing/v_Mixing_g12_c04 53 +HulaHoop/v_HulaHoop_g22_c04 42 +PlayingDaf/v_PlayingDaf_g10_c04 59 +Bowling/v_Bowling_g17_c06 15 +Rowing/v_Rowing_g10_c02 75 +Basketball/v_Basketball_g16_c02 7 +BlowingCandles/v_BlowingCandles_g19_c01 13 +BaseballPitch/v_BaseballPitch_g18_c01 6 +Archery/v_Archery_g25_c01 2 +BandMarching/v_BandMarching_g20_c03 5 +ApplyLipstick/v_ApplyLipstick_g14_c01 1 +CliffDiving/v_CliffDiving_g24_c01 21 +Basketball/v_Basketball_g08_c01 7 +Diving/v_Diving_g11_c05 25 +WalkingWithDog/v_WalkingWithDog_g14_c02 97 +JumpingJack/v_JumpingJack_g09_c03 46 +Knitting/v_Knitting_g17_c03 49 +SoccerJuggling/v_SoccerJuggling_g19_c04 83 +WritingOnBoard/v_WritingOnBoard_g08_c01 99 +Kayaking/v_Kayaking_g16_c05 48 +HeadMassage/v_HeadMassage_g08_c01 38 +Lunges/v_Lunges_g19_c03 51 +GolfSwing/v_GolfSwing_g20_c01 32 +Mixing/v_Mixing_g14_c01 53 +IceDancing/v_IceDancing_g15_c04 43 +Typing/v_Typing_g21_c03 94 +Skijet/v_Skijet_g08_c01 81 +CricketBowling/v_CricketBowling_g11_c02 22 +SoccerPenalty/v_SoccerPenalty_g16_c03 84 +IceDancing/v_IceDancing_g09_c02 43 +PlayingFlute/v_PlayingFlute_g16_c03 61 +ParallelBars/v_ParallelBars_g20_c04 56 +Drumming/v_Drumming_g20_c06 26 +PlayingCello/v_PlayingCello_g18_c03 58 +IceDancing/v_IceDancing_g14_c02 43 +BasketballDunk/v_BasketballDunk_g10_c01 8 +JavelinThrow/v_JavelinThrow_g25_c05 44 +SoccerPenalty/v_SoccerPenalty_g19_c03 84 +HammerThrow/v_HammerThrow_g19_c04 35 +Billiards/v_Billiards_g21_c06 11 +BreastStroke/v_BreastStroke_g12_c04 18 +Bowling/v_Bowling_g24_c07 15 +MilitaryParade/v_MilitaryParade_g11_c01 52 +VolleyballSpiking/v_VolleyballSpiking_g20_c04 96 +Nunchucks/v_Nunchucks_g14_c03 55 +IceDancing/v_IceDancing_g12_c04 43 +PlayingSitar/v_PlayingSitar_g22_c02 64 +PlayingDaf/v_PlayingDaf_g11_c03 59 +BaseballPitch/v_BaseballPitch_g10_c02 6 +FloorGymnastics/v_FloorGymnastics_g25_c03 29 +BoxingSpeedBag/v_BoxingSpeedBag_g14_c03 17 +WalkingWithDog/v_WalkingWithDog_g20_c04 97 +JumpRope/v_JumpRope_g13_c05 47 +PoleVault/v_PoleVault_g18_c04 67 +Billiards/v_Billiards_g21_c07 11 +Knitting/v_Knitting_g18_c04 49 +HeadMassage/v_HeadMassage_g16_c03 38 +CuttingInKitchen/v_CuttingInKitchen_g12_c01 24 +FrisbeeCatch/v_FrisbeeCatch_g19_c06 30 +JugglingBalls/v_JugglingBalls_g14_c02 45 +HulaHoop/v_HulaHoop_g08_c04 42 +BasketballDunk/v_BasketballDunk_g22_c04 8 +CleanAndJerk/v_CleanAndJerk_g15_c02 20 +VolleyballSpiking/v_VolleyballSpiking_g22_c04 96 +ParallelBars/v_ParallelBars_g22_c04 56 +ShavingBeard/v_ShavingBeard_g21_c07 77 +Kayaking/v_Kayaking_g08_c05 48 +ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c03 0 +Knitting/v_Knitting_g21_c02 49 +Rowing/v_Rowing_g11_c06 75 +BenchPress/v_BenchPress_g16_c03 9 +Lunges/v_Lunges_g23_c06 51 +GolfSwing/v_GolfSwing_g22_c06 32 +Hammering/v_Hammering_g18_c02 34 +Typing/v_Typing_g09_c07 94 +Biking/v_Biking_g18_c06 10 +CricketShot/v_CricketShot_g19_c01 23 +BrushingTeeth/v_BrushingTeeth_g17_c04 19 +CricketBowling/v_CricketBowling_g08_c03 22 +CleanAndJerk/v_CleanAndJerk_g18_c03 20 +HandstandPushups/v_HandStandPushups_g15_c01 36 +SoccerPenalty/v_SoccerPenalty_g21_c05 84 +BalanceBeam/v_BalanceBeam_g17_c06 4 +RockClimbingIndoor/v_RockClimbingIndoor_g17_c04 73 +PlayingGuitar/v_PlayingGuitar_g12_c07 62 +BlowingCandles/v_BlowingCandles_g21_c01 13 +JavelinThrow/v_JavelinThrow_g13_c04 44 +HeadMassage/v_HeadMassage_g18_c01 38 +FrontCrawl/v_FrontCrawl_g14_c07 31 +HandstandWalking/v_HandstandWalking_g10_c04 37 +PoleVault/v_PoleVault_g24_c01 67 +HeadMassage/v_HeadMassage_g23_c03 38 +Billiards/v_Billiards_g18_c03 11 +CleanAndJerk/v_CleanAndJerk_g22_c02 20 +FrontCrawl/v_FrontCrawl_g09_c05 31 +SalsaSpin/v_SalsaSpin_g21_c01 76 +Nunchucks/v_Nunchucks_g09_c03 55 +Billiards/v_Billiards_g11_c05 11 +MilitaryParade/v_MilitaryParade_g15_c05 52 +SkateBoarding/v_SkateBoarding_g23_c04 79 +FrontCrawl/v_FrontCrawl_g18_c06 31 +BasketballDunk/v_BasketballDunk_g23_c01 8 +HorseRiding/v_HorseRiding_g12_c05 41 +StillRings/v_StillRings_g14_c02 85 +BrushingTeeth/v_BrushingTeeth_g23_c06 19 +BenchPress/v_BenchPress_g08_c06 9 +Lunges/v_Lunges_g13_c06 51 +CricketShot/v_CricketShot_g20_c04 23 +RockClimbingIndoor/v_RockClimbingIndoor_g10_c04 73 +TennisSwing/v_TennisSwing_g24_c01 91 +PommelHorse/v_PommelHorse_g23_c01 68 +SkyDiving/v_SkyDiving_g21_c02 82 +HorseRiding/v_HorseRiding_g18_c03 41 +PlayingViolin/v_PlayingViolin_g15_c04 66 +Drumming/v_Drumming_g08_c03 26 +LongJump/v_LongJump_g22_c01 50 +PlayingDhol/v_PlayingDhol_g14_c02 60 +HulaHoop/v_HulaHoop_g14_c04 42 +BoxingPunchingBag/v_BoxingPunchingBag_g19_c02 16 +FrisbeeCatch/v_FrisbeeCatch_g21_c01 30 +BodyWeightSquats/v_BodyWeightSquats_g22_c01 14 +BandMarching/v_BandMarching_g19_c06 5 +PlayingFlute/v_PlayingFlute_g18_c02 61 +MilitaryParade/v_MilitaryParade_g14_c03 52 +BalanceBeam/v_BalanceBeam_g15_c01 4 +LongJump/v_LongJump_g19_c02 50 +StillRings/v_StillRings_g21_c05 85 +Drumming/v_Drumming_g24_c04 26 +PlayingFlute/v_PlayingFlute_g17_c07 61 +ThrowDiscus/v_ThrowDiscus_g24_c04 92 +TennisSwing/v_TennisSwing_g20_c03 91 +TennisSwing/v_TennisSwing_g23_c02 91 +ParallelBars/v_ParallelBars_g18_c03 56 +HammerThrow/v_HammerThrow_g13_c01 35 +Punch/v_Punch_g22_c06 70 +PlayingPiano/v_PlayingPiano_g11_c02 63 +PlayingDaf/v_PlayingDaf_g21_c06 59 +FrontCrawl/v_FrontCrawl_g13_c03 31 +IceDancing/v_IceDancing_g16_c03 43 +Diving/v_Diving_g25_c04 25 +Biking/v_Biking_g11_c06 10 +TaiChi/v_TaiChi_g21_c01 90 +PizzaTossing/v_PizzaTossing_g11_c01 57 +PlayingDaf/v_PlayingDaf_g08_c04 59 +BasketballDunk/v_BasketballDunk_g14_c01 8 +Punch/v_Punch_g15_c03 70 +Mixing/v_Mixing_g16_c02 53 +JumpingJack/v_JumpingJack_g11_c01 46 +Punch/v_Punch_g13_c04 70 +RopeClimbing/v_RopeClimbing_g19_c06 74 +RockClimbingIndoor/v_RockClimbingIndoor_g09_c04 73 +TennisSwing/v_TennisSwing_g11_c01 91 +Basketball/v_Basketball_g18_c01 7 +Skijet/v_Skijet_g19_c03 81 +Basketball/v_Basketball_g20_c05 7 +MilitaryParade/v_MilitaryParade_g24_c01 52 +WritingOnBoard/v_WritingOnBoard_g11_c02 99 +SalsaSpin/v_SalsaSpin_g12_c05 76 +TableTennisShot/v_TableTennisShot_g23_c07 89 +RopeClimbing/v_RopeClimbing_g21_c04 74 +SoccerPenalty/v_SoccerPenalty_g15_c02 84 +ApplyLipstick/v_ApplyLipstick_g10_c02 1 +BabyCrawling/v_BabyCrawling_g23_c03 3 +HorseRace/v_HorseRace_g15_c03 40 +JavelinThrow/v_JavelinThrow_g08_c03 44 +BlowingCandles/v_BlowingCandles_g14_c02 13 +Bowling/v_Bowling_g23_c03 15 +GolfSwing/v_GolfSwing_g17_c06 32 +IceDancing/v_IceDancing_g22_c05 43 +ParallelBars/v_ParallelBars_g11_c03 56 +PoleVault/v_PoleVault_g22_c01 67 +PlayingTabla/v_PlayingTabla_g13_c04 65 +MilitaryParade/v_MilitaryParade_g18_c01 52 +Nunchucks/v_Nunchucks_g23_c03 55 +Shotput/v_Shotput_g11_c03 78 +TrampolineJumping/v_TrampolineJumping_g12_c02 93 +Skiing/v_Skiing_g19_c06 80 +GolfSwing/v_GolfSwing_g08_c01 32 +PlayingDhol/v_PlayingDhol_g13_c06 60 +CricketBowling/v_CricketBowling_g16_c01 22 +ShavingBeard/v_ShavingBeard_g08_c01 77 +Swing/v_Swing_g15_c02 88 +BoxingPunchingBag/v_BoxingPunchingBag_g09_c03 16 +JumpRope/v_JumpRope_g15_c01 47 +PlayingFlute/v_PlayingFlute_g08_c03 61 +BaseballPitch/v_BaseballPitch_g17_c03 6 +BandMarching/v_BandMarching_g20_c05 5 +CricketShot/v_CricketShot_g14_c04 23 +TableTennisShot/v_TableTennisShot_g24_c01 89 +SumoWrestling/v_SumoWrestling_g15_c04 86 +BabyCrawling/v_BabyCrawling_g12_c02 3 +Haircut/v_Haircut_g20_c04 33 +PizzaTossing/v_PizzaTossing_g22_c01 57 +CleanAndJerk/v_CleanAndJerk_g14_c04 20 +Archery/v_Archery_g21_c02 2 +CuttingInKitchen/v_CuttingInKitchen_g14_c04 24 +TrampolineJumping/v_TrampolineJumping_g09_c02 93 +PlayingDaf/v_PlayingDaf_g12_c03 59 +HorseRace/v_HorseRace_g18_c06 40 +Hammering/v_Hammering_g23_c01 34 +ThrowDiscus/v_ThrowDiscus_g21_c03 92 +JavelinThrow/v_JavelinThrow_g24_c03 44 +GolfSwing/v_GolfSwing_g18_c02 32 +PlayingSitar/v_PlayingSitar_g09_c04 64 +WalkingWithDog/v_WalkingWithDog_g16_c02 97 +JumpRope/v_JumpRope_g11_c01 47 +FrisbeeCatch/v_FrisbeeCatch_g10_c02 30 +PlayingFlute/v_PlayingFlute_g15_c06 61 +Skiing/v_Skiing_g17_c01 80 +HandstandPushups/v_HandStandPushups_g25_c01 36 +WritingOnBoard/v_WritingOnBoard_g19_c03 99 +PizzaTossing/v_PizzaTossing_g23_c03 57 +Shotput/v_Shotput_g24_c02 78 +ApplyEyeMakeup/v_ApplyEyeMakeup_g15_c03 0 +ThrowDiscus/v_ThrowDiscus_g09_c06 92 +HighJump/v_HighJump_g11_c06 39 +BalanceBeam/v_BalanceBeam_g16_c02 4 +PlayingGuitar/v_PlayingGuitar_g11_c06 62 +CuttingInKitchen/v_CuttingInKitchen_g24_c01 24 +ApplyEyeMakeup/v_ApplyEyeMakeup_g25_c07 0 +HulaHoop/v_HulaHoop_g22_c03 42 +JugglingBalls/v_JugglingBalls_g19_c03 45 +ParallelBars/v_ParallelBars_g13_c04 56 +BlowDryHair/v_BlowDryHair_g09_c03 12 +BandMarching/v_BandMarching_g25_c06 5 +BasketballDunk/v_BasketballDunk_g10_c05 8 +Hammering/v_Hammering_g19_c05 34 +BrushingTeeth/v_BrushingTeeth_g09_c02 19 +HulaHoop/v_HulaHoop_g19_c03 42 +BalanceBeam/v_BalanceBeam_g14_c02 4 +PlayingTabla/v_PlayingTabla_g23_c03 65 +Drumming/v_Drumming_g09_c03 26 +BenchPress/v_BenchPress_g11_c01 9 +ApplyEyeMakeup/v_ApplyEyeMakeup_g11_c01 0 +ApplyLipstick/v_ApplyLipstick_g25_c02 1 +PlayingPiano/v_PlayingPiano_g16_c02 63 +RockClimbingIndoor/v_RockClimbingIndoor_g17_c05 73 +Knitting/v_Knitting_g15_c05 49 +ThrowDiscus/v_ThrowDiscus_g21_c04 92 +PizzaTossing/v_PizzaTossing_g10_c01 57 +BlowDryHair/v_BlowDryHair_g25_c01 12 +Fencing/v_Fencing_g10_c04 27 +HandstandPushups/v_HandStandPushups_g20_c02 36 +TrampolineJumping/v_TrampolineJumping_g22_c05 93 +Surfing/v_Surfing_g21_c02 87 +Punch/v_Punch_g14_c02 70 +WallPushups/v_WallPushups_g12_c04 98 +HandstandPushups/v_HandStandPushups_g09_c02 36 +Shotput/v_Shotput_g16_c02 78 +PushUps/v_PushUps_g20_c03 71 +Typing/v_Typing_g20_c03 94 +SoccerJuggling/v_SoccerJuggling_g25_c04 83 +PoleVault/v_PoleVault_g15_c04 67 +SoccerJuggling/v_SoccerJuggling_g17_c04 83 +YoYo/v_YoYo_g22_c04 100 +BandMarching/v_BandMarching_g16_c07 5 +SkyDiving/v_SkyDiving_g16_c03 82 +FieldHockeyPenalty/v_FieldHockeyPenalty_g10_c01 28 +FieldHockeyPenalty/v_FieldHockeyPenalty_g25_c01 28 +BaseballPitch/v_BaseballPitch_g13_c04 6 +VolleyballSpiking/v_VolleyballSpiking_g13_c02 96 +LongJump/v_LongJump_g24_c02 50 +ShavingBeard/v_ShavingBeard_g15_c02 77 +Archery/v_Archery_g11_c06 2 +RockClimbingIndoor/v_RockClimbingIndoor_g23_c04 73 +TaiChi/v_TaiChi_g22_c02 90 +HorseRace/v_HorseRace_g09_c04 40 +CuttingInKitchen/v_CuttingInKitchen_g22_c03 24 +BoxingSpeedBag/v_BoxingSpeedBag_g10_c04 17 +RockClimbingIndoor/v_RockClimbingIndoor_g17_c03 73 +MoppingFloor/v_MoppingFloor_g19_c01 54 +WritingOnBoard/v_WritingOnBoard_g17_c03 99 +FloorGymnastics/v_FloorGymnastics_g10_c03 29 +UnevenBars/v_UnevenBars_g15_c06 95 +Swing/v_Swing_g15_c04 88 +BenchPress/v_BenchPress_g11_c05 9 +HeadMassage/v_HeadMassage_g18_c02 38 +StillRings/v_StillRings_g25_c02 85 +PlayingViolin/v_PlayingViolin_g15_c02 66 +RopeClimbing/v_RopeClimbing_g19_c01 74 +PlayingFlute/v_PlayingFlute_g25_c07 61 +Kayaking/v_Kayaking_g17_c04 48 +Archery/v_Archery_g22_c05 2 +PullUps/v_PullUps_g08_c03 69 +BenchPress/v_BenchPress_g15_c03 9 +BlowDryHair/v_BlowDryHair_g23_c02 12 +HandstandWalking/v_HandstandWalking_g19_c03 37 +MoppingFloor/v_MoppingFloor_g19_c04 54 +Rowing/v_Rowing_g12_c03 75 +Biking/v_Biking_g12_c01 10 +HandstandPushups/v_HandStandPushups_g08_c04 36 +Punch/v_Punch_g18_c01 70 +PlayingGuitar/v_PlayingGuitar_g19_c05 62 +GolfSwing/v_GolfSwing_g18_c01 32 +Haircut/v_Haircut_g13_c01 33 +WritingOnBoard/v_WritingOnBoard_g23_c04 99 +Bowling/v_Bowling_g12_c01 15 +BenchPress/v_BenchPress_g12_c03 9 +PlayingGuitar/v_PlayingGuitar_g11_c03 62 +PlayingFlute/v_PlayingFlute_g09_c05 61 +ApplyEyeMakeup/v_ApplyEyeMakeup_g12_c04 0 +Basketball/v_Basketball_g16_c03 7 +PlayingFlute/v_PlayingFlute_g17_c05 61 +RockClimbingIndoor/v_RockClimbingIndoor_g10_c05 73 +Mixing/v_Mixing_g14_c04 53 +FrontCrawl/v_FrontCrawl_g18_c01 31 +HorseRace/v_HorseRace_g23_c04 40 +BreastStroke/v_BreastStroke_g13_c02 18 +PlayingFlute/v_PlayingFlute_g10_c04 61 +BlowingCandles/v_BlowingCandles_g25_c06 13 +PushUps/v_PushUps_g18_c03 71 +JugglingBalls/v_JugglingBalls_g24_c03 45 +Basketball/v_Basketball_g11_c05 7 +BaseballPitch/v_BaseballPitch_g16_c03 6 +BaseballPitch/v_BaseballPitch_g25_c05 6 +GolfSwing/v_GolfSwing_g16_c04 32 +Diving/v_Diving_g22_c04 25 +FrisbeeCatch/v_FrisbeeCatch_g14_c04 30 +IceDancing/v_IceDancing_g12_c06 43 +JavelinThrow/v_JavelinThrow_g17_c01 44 +Fencing/v_Fencing_g23_c03 27 +TrampolineJumping/v_TrampolineJumping_g15_c03 93 +HeadMassage/v_HeadMassage_g21_c06 38 +HorseRace/v_HorseRace_g23_c03 40 +StillRings/v_StillRings_g10_c05 85 +TableTennisShot/v_TableTennisShot_g25_c07 89 +CuttingInKitchen/v_CuttingInKitchen_g09_c01 24 +Drumming/v_Drumming_g23_c04 26 +HandstandPushups/v_HandStandPushups_g21_c03 36 +CliffDiving/v_CliffDiving_g15_c01 21 +HammerThrow/v_HammerThrow_g16_c02 35 +TaiChi/v_TaiChi_g08_c04 90 +BoxingSpeedBag/v_BoxingSpeedBag_g15_c05 17 +BlowingCandles/v_BlowingCandles_g19_c03 13 +Punch/v_Punch_g24_c03 70 +Basketball/v_Basketball_g20_c06 7 +VolleyballSpiking/v_VolleyballSpiking_g22_c01 96 +Haircut/v_Haircut_g09_c04 33 +Bowling/v_Bowling_g23_c07 15 +Drumming/v_Drumming_g15_c06 26 +HandstandPushups/v_HandStandPushups_g19_c01 36 +BoxingSpeedBag/v_BoxingSpeedBag_g25_c02 17 +JugglingBalls/v_JugglingBalls_g13_c04 45 +ParallelBars/v_ParallelBars_g25_c02 56 +BrushingTeeth/v_BrushingTeeth_g19_c02 19 +LongJump/v_LongJump_g09_c05 50 +YoYo/v_YoYo_g20_c01 100 +BabyCrawling/v_BabyCrawling_g14_c04 3 +Billiards/v_Billiards_g10_c05 11 +ShavingBeard/v_ShavingBeard_g24_c07 77 +BenchPress/v_BenchPress_g15_c04 9 +PlayingFlute/v_PlayingFlute_g12_c03 61 +Mixing/v_Mixing_g23_c02 53 +Rafting/v_Rafting_g10_c05 72 +PizzaTossing/v_PizzaTossing_g16_c02 57 +PlayingDhol/v_PlayingDhol_g08_c03 60 +Hammering/v_Hammering_g16_c01 34 +Archery/v_Archery_g23_c07 2 +TableTennisShot/v_TableTennisShot_g15_c02 89 +TrampolineJumping/v_TrampolineJumping_g25_c02 93 +BenchPress/v_BenchPress_g16_c05 9 +MoppingFloor/v_MoppingFloor_g13_c02 54 +WalkingWithDog/v_WalkingWithDog_g15_c02 97 +BoxingSpeedBag/v_BoxingSpeedBag_g25_c05 17 +Skiing/v_Skiing_g09_c06 80 +TennisSwing/v_TennisSwing_g18_c06 91 +PlayingFlute/v_PlayingFlute_g18_c01 61 +BodyWeightSquats/v_BodyWeightSquats_g16_c03 14 +PlayingTabla/v_PlayingTabla_g14_c03 65 +TennisSwing/v_TennisSwing_g24_c05 91 +LongJump/v_LongJump_g22_c03 50 +BlowingCandles/v_BlowingCandles_g13_c03 13 +WallPushups/v_WallPushups_g08_c03 98 +PlayingTabla/v_PlayingTabla_g11_c03 65 +CricketShot/v_CricketShot_g15_c05 23 +CleanAndJerk/v_CleanAndJerk_g21_c04 20 +ApplyLipstick/v_ApplyLipstick_g24_c05 1 +HulaHoop/v_HulaHoop_g08_c02 42 +PlayingViolin/v_PlayingViolin_g12_c01 66 +SoccerPenalty/v_SoccerPenalty_g20_c01 84 +BandMarching/v_BandMarching_g25_c05 5 +PoleVault/v_PoleVault_g09_c03 67 +HammerThrow/v_HammerThrow_g19_c02 35 +PizzaTossing/v_PizzaTossing_g19_c01 57 +HorseRace/v_HorseRace_g20_c01 40 +BoxingPunchingBag/v_BoxingPunchingBag_g23_c07 16 +UnevenBars/v_UnevenBars_g19_c03 95 +GolfSwing/v_GolfSwing_g25_c02 32 +Basketball/v_Basketball_g19_c06 7 +PlayingFlute/v_PlayingFlute_g15_c01 61 +Diving/v_Diving_g15_c05 25 +WallPushups/v_WallPushups_g08_c02 98 +PizzaTossing/v_PizzaTossing_g12_c02 57 +HorseRace/v_HorseRace_g14_c02 40 +Surfing/v_Surfing_g09_c05 87 +Skiing/v_Skiing_g15_c03 80 +MilitaryParade/v_MilitaryParade_g14_c01 52 +Rowing/v_Rowing_g19_c02 75 +HandstandPushups/v_HandStandPushups_g13_c05 36 +HammerThrow/v_HammerThrow_g17_c01 35 +PlayingTabla/v_PlayingTabla_g08_c03 65 +BenchPress/v_BenchPress_g18_c07 9 +WalkingWithDog/v_WalkingWithDog_g21_c04 97 +SumoWrestling/v_SumoWrestling_g22_c03 86 +HorseRace/v_HorseRace_g17_c03 40 +FrontCrawl/v_FrontCrawl_g09_c01 31 +CleanAndJerk/v_CleanAndJerk_g09_c01 20 +StillRings/v_StillRings_g17_c01 85 +PizzaTossing/v_PizzaTossing_g09_c04 57 +BrushingTeeth/v_BrushingTeeth_g13_c01 19 +Hammering/v_Hammering_g23_c03 34 +BasketballDunk/v_BasketballDunk_g15_c05 8 +Fencing/v_Fencing_g15_c04 27 +GolfSwing/v_GolfSwing_g08_c02 32 +JugglingBalls/v_JugglingBalls_g17_c04 45 +SkateBoarding/v_SkateBoarding_g18_c06 79 +Biking/v_Biking_g12_c04 10 +CleanAndJerk/v_CleanAndJerk_g23_c01 20 +Shotput/v_Shotput_g22_c03 78 +CliffDiving/v_CliffDiving_g17_c05 21 +BoxingSpeedBag/v_BoxingSpeedBag_g20_c04 17 +Punch/v_Punch_g22_c05 70 +BoxingSpeedBag/v_BoxingSpeedBag_g18_c02 17 +Skiing/v_Skiing_g17_c02 80 +BaseballPitch/v_BaseballPitch_g11_c02 6 +WritingOnBoard/v_WritingOnBoard_g18_c07 99 +HorseRiding/v_HorseRiding_g08_c03 41 +HulaHoop/v_HulaHoop_g09_c01 42 +MilitaryParade/v_MilitaryParade_g22_c03 52 +ApplyEyeMakeup/v_ApplyEyeMakeup_g25_c02 0 +ThrowDiscus/v_ThrowDiscus_g14_c01 92 +HorseRiding/v_HorseRiding_g12_c02 41 +BabyCrawling/v_BabyCrawling_g13_c04 3 +Typing/v_Typing_g12_c02 94 +Surfing/v_Surfing_g17_c03 87 +Diving/v_Diving_g21_c05 25 +RockClimbingIndoor/v_RockClimbingIndoor_g13_c01 73 +Knitting/v_Knitting_g14_c05 49 +HeadMassage/v_HeadMassage_g10_c04 38 +Punch/v_Punch_g09_c01 70 +PoleVault/v_PoleVault_g12_c04 67 +RockClimbingIndoor/v_RockClimbingIndoor_g19_c02 73 +BalanceBeam/v_BalanceBeam_g15_c04 4 +HorseRiding/v_HorseRiding_g11_c06 41 +PushUps/v_PushUps_g23_c04 71 +BoxingPunchingBag/v_BoxingPunchingBag_g25_c01 16 +MoppingFloor/v_MoppingFloor_g10_c04 54 +Diving/v_Diving_g25_c02 25 +Diving/v_Diving_g13_c07 25 +Skijet/v_Skijet_g13_c01 81 +BasketballDunk/v_BasketballDunk_g15_c06 8 +SkateBoarding/v_SkateBoarding_g12_c03 79 +CricketBowling/v_CricketBowling_g19_c05 22 +PlayingViolin/v_PlayingViolin_g10_c01 66 +SalsaSpin/v_SalsaSpin_g08_c05 76 +HandstandPushups/v_HandStandPushups_g15_c02 36 +CleanAndJerk/v_CleanAndJerk_g11_c02 20 +Haircut/v_Haircut_g15_c04 33 +SoccerJuggling/v_SoccerJuggling_g21_c02 83 +Typing/v_Typing_g20_c06 94 +JavelinThrow/v_JavelinThrow_g16_c04 44 +PlayingCello/v_PlayingCello_g23_c05 58 +JugglingBalls/v_JugglingBalls_g13_c02 45 +WritingOnBoard/v_WritingOnBoard_g16_c02 99 +PlayingGuitar/v_PlayingGuitar_g17_c05 62 +PushUps/v_PushUps_g24_c01 71 +BaseballPitch/v_BaseballPitch_g09_c05 6 +JumpingJack/v_JumpingJack_g14_c02 46 +Typing/v_Typing_g18_c02 94 +Rafting/v_Rafting_g14_c05 72 +BrushingTeeth/v_BrushingTeeth_g25_c07 19 +Basketball/v_Basketball_g17_c01 7 +JavelinThrow/v_JavelinThrow_g14_c05 44 +Haircut/v_Haircut_g09_c07 33 +BaseballPitch/v_BaseballPitch_g15_c03 6 +PlayingCello/v_PlayingCello_g23_c07 58 +Nunchucks/v_Nunchucks_g25_c05 55 +Drumming/v_Drumming_g13_c06 26 +FrontCrawl/v_FrontCrawl_g09_c03 31 +CuttingInKitchen/v_CuttingInKitchen_g18_c03 24 +SkyDiving/v_SkyDiving_g25_c04 82 +BodyWeightSquats/v_BodyWeightSquats_g09_c04 14 +HandstandPushups/v_HandStandPushups_g12_c01 36 +HammerThrow/v_HammerThrow_g17_c03 35 +PushUps/v_PushUps_g21_c04 71 +Biking/v_Biking_g09_c04 10 +VolleyballSpiking/v_VolleyballSpiking_g18_c03 96 +IceDancing/v_IceDancing_g19_c01 43 +BaseballPitch/v_BaseballPitch_g21_c04 6 +HandstandPushups/v_HandStandPushups_g21_c05 36 +SoccerPenalty/v_SoccerPenalty_g17_c04 84 +VolleyballSpiking/v_VolleyballSpiking_g10_c01 96 +FrontCrawl/v_FrontCrawl_g12_c02 31 +HorseRiding/v_HorseRiding_g12_c07 41 +Kayaking/v_Kayaking_g14_c03 48 +Rafting/v_Rafting_g16_c05 72 +Kayaking/v_Kayaking_g08_c06 48 +Skijet/v_Skijet_g13_c04 81 +Haircut/v_Haircut_g10_c01 33 +PlayingPiano/v_PlayingPiano_g17_c03 63 +Basketball/v_Basketball_g15_c04 7 +WalkingWithDog/v_WalkingWithDog_g08_c05 97 +PlayingSitar/v_PlayingSitar_g24_c03 64 +PlayingDhol/v_PlayingDhol_g11_c07 60 +Basketball/v_Basketball_g20_c03 7 +Kayaking/v_Kayaking_g17_c02 48 +Skiing/v_Skiing_g17_c03 80 +CuttingInKitchen/v_CuttingInKitchen_g21_c03 24 +CricketBowling/v_CricketBowling_g18_c03 22 +BlowDryHair/v_BlowDryHair_g21_c04 12 +ThrowDiscus/v_ThrowDiscus_g23_c03 92 +PlayingGuitar/v_PlayingGuitar_g18_c05 62 +PlayingGuitar/v_PlayingGuitar_g12_c03 62 +JumpingJack/v_JumpingJack_g09_c02 46 +TennisSwing/v_TennisSwing_g14_c05 91 +PlayingCello/v_PlayingCello_g19_c06 58 +PlayingGuitar/v_PlayingGuitar_g15_c03 62 +PlayingDaf/v_PlayingDaf_g23_c04 59 +RockClimbingIndoor/v_RockClimbingIndoor_g09_c03 73 +PlayingCello/v_PlayingCello_g25_c05 58 +Drumming/v_Drumming_g23_c05 26 +PlayingPiano/v_PlayingPiano_g19_c04 63 +FieldHockeyPenalty/v_FieldHockeyPenalty_g13_c04 28 +PullUps/v_PullUps_g23_c03 69 +PushUps/v_PushUps_g20_c01 71 +LongJump/v_LongJump_g24_c04 50 +WallPushups/v_WallPushups_g19_c05 98 +BrushingTeeth/v_BrushingTeeth_g12_c01 19 +HorseRace/v_HorseRace_g24_c03 40 +TennisSwing/v_TennisSwing_g19_c06 91 +PlayingGuitar/v_PlayingGuitar_g12_c04 62 +SoccerPenalty/v_SoccerPenalty_g16_c05 84 +IceDancing/v_IceDancing_g23_c07 43 +CliffDiving/v_CliffDiving_g23_c02 21 +CricketShot/v_CricketShot_g10_c06 23 +Hammering/v_Hammering_g20_c03 34 +Typing/v_Typing_g11_c05 94 +Skijet/v_Skijet_g14_c01 81 +PushUps/v_PushUps_g13_c02 71 +PizzaTossing/v_PizzaTossing_g10_c03 57 +TaiChi/v_TaiChi_g12_c04 90 +BaseballPitch/v_BaseballPitch_g21_c03 6 +PlayingDhol/v_PlayingDhol_g19_c05 60 +Lunges/v_Lunges_g23_c04 51 +TennisSwing/v_TennisSwing_g17_c05 91 +Haircut/v_Haircut_g22_c07 33 +Punch/v_Punch_g25_c05 70 +Typing/v_Typing_g19_c01 94 +CricketBowling/v_CricketBowling_g24_c03 22 +TrampolineJumping/v_TrampolineJumping_g22_c04 93 +UnevenBars/v_UnevenBars_g09_c03 95 +PlayingTabla/v_PlayingTabla_g18_c06 65 +FieldHockeyPenalty/v_FieldHockeyPenalty_g19_c01 28 +PlayingCello/v_PlayingCello_g23_c01 58 +Swing/v_Swing_g18_c04 88 +ParallelBars/v_ParallelBars_g09_c04 56 +Haircut/v_Haircut_g10_c03 33 +Drumming/v_Drumming_g15_c04 26 +WallPushups/v_WallPushups_g19_c02 98 +PlayingPiano/v_PlayingPiano_g08_c04 63 +Lunges/v_Lunges_g09_c02 51 +VolleyballSpiking/v_VolleyballSpiking_g16_c01 96 +PlayingSitar/v_PlayingSitar_g08_c03 64 +HorseRace/v_HorseRace_g13_c03 40 +BabyCrawling/v_BabyCrawling_g25_c03 3 +Skiing/v_Skiing_g13_c04 80 +BoxingSpeedBag/v_BoxingSpeedBag_g23_c01 17 +HighJump/v_HighJump_g09_c01 39 +BrushingTeeth/v_BrushingTeeth_g16_c01 19 +TaiChi/v_TaiChi_g14_c03 90 +FloorGymnastics/v_FloorGymnastics_g12_c02 29 +Hammering/v_Hammering_g12_c04 34 +PlayingGuitar/v_PlayingGuitar_g14_c07 62 +Nunchucks/v_Nunchucks_g20_c01 55 +HorseRiding/v_HorseRiding_g18_c06 41 +PlayingCello/v_PlayingCello_g13_c05 58 +Swing/v_Swing_g19_c02 88 +Basketball/v_Basketball_g08_c03 7 +Diving/v_Diving_g09_c03 25 +HighJump/v_HighJump_g19_c01 39 +Diving/v_Diving_g12_c04 25 +SkateBoarding/v_SkateBoarding_g12_c04 79 +PlayingDhol/v_PlayingDhol_g08_c06 60 +ShavingBeard/v_ShavingBeard_g19_c06 77 +Rafting/v_Rafting_g13_c01 72 +WritingOnBoard/v_WritingOnBoard_g08_c04 99 +BalanceBeam/v_BalanceBeam_g17_c05 4 +SoccerJuggling/v_SoccerJuggling_g20_c01 83 +PommelHorse/v_PommelHorse_g21_c02 68 +HeadMassage/v_HeadMassage_g21_c03 38 +PlayingTabla/v_PlayingTabla_g24_c02 65 +WalkingWithDog/v_WalkingWithDog_g11_c01 97 +PlayingCello/v_PlayingCello_g16_c05 58 +JavelinThrow/v_JavelinThrow_g25_c02 44 +BlowDryHair/v_BlowDryHair_g12_c03 12 +GolfSwing/v_GolfSwing_g12_c04 32 +WritingOnBoard/v_WritingOnBoard_g10_c02 99 +FrisbeeCatch/v_FrisbeeCatch_g16_c01 30 +CleanAndJerk/v_CleanAndJerk_g21_c02 20 +CliffDiving/v_CliffDiving_g19_c07 21 +BenchPress/v_BenchPress_g10_c02 9 +Fencing/v_Fencing_g23_c01 27 +ApplyEyeMakeup/v_ApplyEyeMakeup_g24_c05 0 +FrisbeeCatch/v_FrisbeeCatch_g10_c03 30 +SoccerPenalty/v_SoccerPenalty_g14_c02 84 +Basketball/v_Basketball_g15_c03 7 +PlayingDhol/v_PlayingDhol_g08_c07 60 +RockClimbingIndoor/v_RockClimbingIndoor_g16_c02 73 +MilitaryParade/v_MilitaryParade_g19_c05 52 +Shotput/v_Shotput_g20_c03 78 +PlayingGuitar/v_PlayingGuitar_g20_c02 62 +BasketballDunk/v_BasketballDunk_g15_c01 8 +Haircut/v_Haircut_g22_c02 33 +Bowling/v_Bowling_g19_c06 15 +PommelHorse/v_PommelHorse_g21_c01 68 +HorseRace/v_HorseRace_g16_c02 40 +ApplyEyeMakeup/v_ApplyEyeMakeup_g10_c04 0 +SkyDiving/v_SkyDiving_g08_c01 82 +PlayingGuitar/v_PlayingGuitar_g08_c07 62 +PlayingCello/v_PlayingCello_g12_c05 58 +HammerThrow/v_HammerThrow_g24_c04 35 +SumoWrestling/v_SumoWrestling_g08_c03 86 +TrampolineJumping/v_TrampolineJumping_g21_c01 93 +StillRings/v_StillRings_g13_c04 85 +HammerThrow/v_HammerThrow_g09_c02 35 +Haircut/v_Haircut_g20_c02 33 +HorseRiding/v_HorseRiding_g10_c01 41 +ParallelBars/v_ParallelBars_g22_c02 56 +Drumming/v_Drumming_g23_c07 26 +Skiing/v_Skiing_g25_c04 80 +BoxingSpeedBag/v_BoxingSpeedBag_g12_c01 17 +PlayingViolin/v_PlayingViolin_g20_c02 66 +Drumming/v_Drumming_g17_c03 26 +SoccerJuggling/v_SoccerJuggling_g11_c06 83 +PullUps/v_PullUps_g08_c04 69 +Skijet/v_Skijet_g24_c04 81 +BodyWeightSquats/v_BodyWeightSquats_g08_c01 14 +Billiards/v_Billiards_g23_c01 11 +PommelHorse/v_PommelHorse_g22_c01 68 +BoxingPunchingBag/v_BoxingPunchingBag_g11_c01 16 +TableTennisShot/v_TableTennisShot_g21_c03 89 +PlayingFlute/v_PlayingFlute_g19_c03 61 +ApplyLipstick/v_ApplyLipstick_g25_c03 1 +SumoWrestling/v_SumoWrestling_g14_c03 86 +BlowDryHair/v_BlowDryHair_g12_c01 12 +Swing/v_Swing_g23_c02 88 +PlayingViolin/v_PlayingViolin_g19_c04 66 +BoxingSpeedBag/v_BoxingSpeedBag_g09_c07 17 +BreastStroke/v_BreastStroke_g11_c01 18 +PizzaTossing/v_PizzaTossing_g10_c04 57 +Drumming/v_Drumming_g18_c01 26 +IceDancing/v_IceDancing_g13_c01 43 +JumpRope/v_JumpRope_g13_c01 47 +Kayaking/v_Kayaking_g10_c04 48 +Swing/v_Swing_g24_c05 88 +Rowing/v_Rowing_g17_c07 75 +Diving/v_Diving_g22_c02 25 +VolleyballSpiking/v_VolleyballSpiking_g17_c01 96 +CleanAndJerk/v_CleanAndJerk_g10_c04 20 +Rafting/v_Rafting_g19_c02 72 +BenchPress/v_BenchPress_g14_c03 9 +FloorGymnastics/v_FloorGymnastics_g10_c04 29 +Rafting/v_Rafting_g16_c03 72 +Swing/v_Swing_g10_c01 88 +BrushingTeeth/v_BrushingTeeth_g22_c07 19 +PommelHorse/v_PommelHorse_g17_c01 68 +LongJump/v_LongJump_g12_c03 50 +PlayingDaf/v_PlayingDaf_g19_c04 59 +FrisbeeCatch/v_FrisbeeCatch_g13_c04 30 +UnevenBars/v_UnevenBars_g19_c01 95 +ApplyEyeMakeup/v_ApplyEyeMakeup_g25_c06 0 +BasketballDunk/v_BasketballDunk_g24_c05 8 +Surfing/v_Surfing_g10_c03 87 +PlayingSitar/v_PlayingSitar_g08_c01 64 +HighJump/v_HighJump_g12_c03 39 +PullUps/v_PullUps_g24_c02 69 +FrisbeeCatch/v_FrisbeeCatch_g09_c01 30 +BandMarching/v_BandMarching_g08_c05 5 +Billiards/v_Billiards_g14_c03 11 +Lunges/v_Lunges_g12_c05 51 +PlayingDaf/v_PlayingDaf_g09_c04 59 +SoccerJuggling/v_SoccerJuggling_g11_c01 83 +Nunchucks/v_Nunchucks_g11_c02 55 +PlayingDhol/v_PlayingDhol_g23_c01 60 +Kayaking/v_Kayaking_g18_c04 48 +Fencing/v_Fencing_g20_c02 27 +JumpingJack/v_JumpingJack_g09_c05 46 +BlowDryHair/v_BlowDryHair_g11_c05 12 +JumpingJack/v_JumpingJack_g18_c04 46 +HeadMassage/v_HeadMassage_g21_c05 38 +Knitting/v_Knitting_g10_c07 49 +PoleVault/v_PoleVault_g18_c01 67 +TennisSwing/v_TennisSwing_g16_c04 91 +CuttingInKitchen/v_CuttingInKitchen_g17_c04 24 +Biking/v_Biking_g10_c06 10 +RockClimbingIndoor/v_RockClimbingIndoor_g21_c04 73 +CricketShot/v_CricketShot_g08_c05 23 +RopeClimbing/v_RopeClimbing_g09_c02 74 +FrontCrawl/v_FrontCrawl_g21_c05 31 +Shotput/v_Shotput_g20_c01 78 +Skiing/v_Skiing_g20_c01 80 +PlayingCello/v_PlayingCello_g12_c04 58 +PlayingPiano/v_PlayingPiano_g16_c04 63 +WritingOnBoard/v_WritingOnBoard_g16_c05 99 +BabyCrawling/v_BabyCrawling_g20_c03 3 +PlayingCello/v_PlayingCello_g11_c02 58 +WalkingWithDog/v_WalkingWithDog_g19_c05 97 +MilitaryParade/v_MilitaryParade_g21_c02 52 +PlayingViolin/v_PlayingViolin_g16_c04 66 +BenchPress/v_BenchPress_g10_c01 9 +Fencing/v_Fencing_g19_c03 27 +FrisbeeCatch/v_FrisbeeCatch_g08_c01 30 +SoccerPenalty/v_SoccerPenalty_g24_c02 84 +BaseballPitch/v_BaseballPitch_g19_c01 6 +PlayingTabla/v_PlayingTabla_g10_c05 65 +BoxingPunchingBag/v_BoxingPunchingBag_g17_c01 16 +SumoWrestling/v_SumoWrestling_g24_c04 86 +ThrowDiscus/v_ThrowDiscus_g12_c04 92 +YoYo/v_YoYo_g23_c06 100 +SalsaSpin/v_SalsaSpin_g15_c03 76 +HulaHoop/v_HulaHoop_g16_c06 42 +Skijet/v_Skijet_g11_c01 81 +PlayingGuitar/v_PlayingGuitar_g08_c04 62 +HulaHoop/v_HulaHoop_g10_c04 42 +GolfSwing/v_GolfSwing_g11_c03 32 +PoleVault/v_PoleVault_g13_c05 67 +TrampolineJumping/v_TrampolineJumping_g16_c03 93 +SoccerPenalty/v_SoccerPenalty_g23_c05 84 +HorseRiding/v_HorseRiding_g08_c06 41 +SumoWrestling/v_SumoWrestling_g19_c05 86 +Skiing/v_Skiing_g25_c02 80 +BalanceBeam/v_BalanceBeam_g17_c02 4 +IceDancing/v_IceDancing_g16_c06 43 +JavelinThrow/v_JavelinThrow_g19_c02 44 +Rafting/v_Rafting_g10_c01 72 +Bowling/v_Bowling_g11_c03 15 +HandstandWalking/v_HandstandWalking_g12_c03 37 +CricketBowling/v_CricketBowling_g18_c06 22 +SoccerJuggling/v_SoccerJuggling_g09_c01 83 +Punch/v_Punch_g22_c02 70 +Kayaking/v_Kayaking_g19_c04 48 +Billiards/v_Billiards_g13_c05 11 +FieldHockeyPenalty/v_FieldHockeyPenalty_g12_c04 28 +Swing/v_Swing_g13_c03 88 +Rowing/v_Rowing_g23_c02 75 +Swing/v_Swing_g08_c04 88 +PommelHorse/v_PommelHorse_g14_c01 68 +PlayingPiano/v_PlayingPiano_g22_c01 63 +HammerThrow/v_HammerThrow_g09_c06 35 +Typing/v_Typing_g08_c06 94 +StillRings/v_StillRings_g19_c03 85 +Punch/v_Punch_g17_c07 70 +TennisSwing/v_TennisSwing_g09_c01 91 +ThrowDiscus/v_ThrowDiscus_g12_c03 92 +Knitting/v_Knitting_g22_c01 49 +PlayingCello/v_PlayingCello_g25_c01 58 +WritingOnBoard/v_WritingOnBoard_g16_c06 99 +Nunchucks/v_Nunchucks_g15_c01 55 +BenchPress/v_BenchPress_g17_c04 9 +Hammering/v_Hammering_g11_c03 34 +Typing/v_Typing_g14_c07 94 +PlayingCello/v_PlayingCello_g13_c06 58 +YoYo/v_YoYo_g16_c05 100 +TrampolineJumping/v_TrampolineJumping_g25_c03 93 +BlowDryHair/v_BlowDryHair_g17_c05 12 +UnevenBars/v_UnevenBars_g12_c01 95 +WritingOnBoard/v_WritingOnBoard_g16_c03 99 +Basketball/v_Basketball_g25_c06 7 +ApplyEyeMakeup/v_ApplyEyeMakeup_g20_c02 0 +Bowling/v_Bowling_g10_c06 15 +Bowling/v_Bowling_g22_c02 15 +Biking/v_Biking_g11_c05 10 +BandMarching/v_BandMarching_g23_c03 5 +JumpingJack/v_JumpingJack_g09_c07 46 +VolleyballSpiking/v_VolleyballSpiking_g10_c03 96 +FloorGymnastics/v_FloorGymnastics_g20_c03 29 +CricketShot/v_CricketShot_g12_c07 23 +SoccerJuggling/v_SoccerJuggling_g17_c01 83 +PlayingGuitar/v_PlayingGuitar_g12_c01 62 +PlayingSitar/v_PlayingSitar_g20_c07 64 +BoxingSpeedBag/v_BoxingSpeedBag_g17_c06 17 +JumpingJack/v_JumpingJack_g25_c06 46 +HandstandPushups/v_HandStandPushups_g08_c05 36 +HighJump/v_HighJump_g09_c05 39 +Haircut/v_Haircut_g18_c03 33 +SoccerPenalty/v_SoccerPenalty_g09_c05 84 +HandstandWalking/v_HandstandWalking_g10_c01 37 +YoYo/v_YoYo_g18_c03 100 +HorseRiding/v_HorseRiding_g19_c03 41 +PlayingFlute/v_PlayingFlute_g25_c06 61 +ThrowDiscus/v_ThrowDiscus_g11_c03 92 +JavelinThrow/v_JavelinThrow_g19_c03 44 +CricketShot/v_CricketShot_g10_c03 23 +JumpingJack/v_JumpingJack_g24_c03 46 +YoYo/v_YoYo_g11_c05 100 +GolfSwing/v_GolfSwing_g19_c06 32 +Mixing/v_Mixing_g20_c01 53 +BreastStroke/v_BreastStroke_g19_c01 18 +UnevenBars/v_UnevenBars_g17_c04 95 +IceDancing/v_IceDancing_g13_c02 43 +FloorGymnastics/v_FloorGymnastics_g09_c03 29 +Hammering/v_Hammering_g24_c05 34 +HorseRace/v_HorseRace_g11_c01 40 +SumoWrestling/v_SumoWrestling_g19_c02 86 +Drumming/v_Drumming_g14_c03 26 +PlayingTabla/v_PlayingTabla_g14_c04 65 +Hammering/v_Hammering_g13_c03 34 +HammerThrow/v_HammerThrow_g13_c05 35 +ThrowDiscus/v_ThrowDiscus_g13_c03 92 +SumoWrestling/v_SumoWrestling_g11_c03 86 +Basketball/v_Basketball_g25_c04 7 +Swing/v_Swing_g08_c03 88 +TableTennisShot/v_TableTennisShot_g17_c01 89 +Kayaking/v_Kayaking_g22_c04 48 +HandstandPushups/v_HandStandPushups_g23_c02 36 +PommelHorse/v_PommelHorse_g20_c02 68 +Biking/v_Biking_g24_c01 10 +Rowing/v_Rowing_g25_c06 75 +BodyWeightSquats/v_BodyWeightSquats_g19_c03 14 +PlayingTabla/v_PlayingTabla_g18_c04 65 +Fencing/v_Fencing_g16_c01 27 +PommelHorse/v_PommelHorse_g20_c07 68 +FrontCrawl/v_FrontCrawl_g22_c06 31 +ApplyLipstick/v_ApplyLipstick_g24_c04 1 +CliffDiving/v_CliffDiving_g10_c07 21 +FloorGymnastics/v_FloorGymnastics_g08_c04 29 +WritingOnBoard/v_WritingOnBoard_g08_c03 99 +WritingOnBoard/v_WritingOnBoard_g09_c01 99 +BlowingCandles/v_BlowingCandles_g09_c01 13 +Lunges/v_Lunges_g17_c03 51 +HulaHoop/v_HulaHoop_g12_c04 42 +PlayingDaf/v_PlayingDaf_g17_c04 59 +PlayingPiano/v_PlayingPiano_g24_c02 63 +FloorGymnastics/v_FloorGymnastics_g09_c05 29 +ApplyEyeMakeup/v_ApplyEyeMakeup_g24_c04 0 +WallPushups/v_WallPushups_g08_c01 98 +BoxingSpeedBag/v_BoxingSpeedBag_g17_c03 17 +Archery/v_Archery_g17_c02 2 +PoleVault/v_PoleVault_g20_c05 67 +BoxingSpeedBag/v_BoxingSpeedBag_g25_c01 17 +PlayingFlute/v_PlayingFlute_g11_c02 61 +CricketShot/v_CricketShot_g20_c03 23 +YoYo/v_YoYo_g17_c03 100 +LongJump/v_LongJump_g13_c04 50 +PlayingPiano/v_PlayingPiano_g09_c02 63 +PlayingDhol/v_PlayingDhol_g22_c01 60 +TableTennisShot/v_TableTennisShot_g24_c03 89 +JumpRope/v_JumpRope_g16_c02 47 +CricketBowling/v_CricketBowling_g12_c06 22 +CricketBowling/v_CricketBowling_g16_c04 22 +SumoWrestling/v_SumoWrestling_g17_c05 86 +Basketball/v_Basketball_g14_c05 7 +BreastStroke/v_BreastStroke_g20_c04 18 +PizzaTossing/v_PizzaTossing_g08_c03 57 +Billiards/v_Billiards_g14_c04 11 +WalkingWithDog/v_WalkingWithDog_g24_c06 97 +Lunges/v_Lunges_g11_c02 51 +HighJump/v_HighJump_g11_c05 39 +Bowling/v_Bowling_g14_c04 15 +PlayingGuitar/v_PlayingGuitar_g08_c02 62 +ApplyEyeMakeup/v_ApplyEyeMakeup_g21_c02 0 +PullUps/v_PullUps_g12_c02 69 +Lunges/v_Lunges_g21_c01 51 +CricketShot/v_CricketShot_g11_c07 23 +PlayingFlute/v_PlayingFlute_g12_c02 61 +MoppingFloor/v_MoppingFloor_g17_c04 54 +PlayingDhol/v_PlayingDhol_g17_c04 60 +PommelHorse/v_PommelHorse_g12_c06 68 +Mixing/v_Mixing_g13_c06 53 +SumoWrestling/v_SumoWrestling_g08_c02 86 +HandstandWalking/v_HandstandWalking_g20_c03 37 +Haircut/v_Haircut_g24_c06 33 +Shotput/v_Shotput_g21_c01 78 +SkyDiving/v_SkyDiving_g23_c03 82 +Surfing/v_Surfing_g16_c05 87 +Nunchucks/v_Nunchucks_g19_c02 55 +JumpRope/v_JumpRope_g10_c03 47 +ThrowDiscus/v_ThrowDiscus_g15_c01 92 +Haircut/v_Haircut_g22_c03 33 +PlayingDhol/v_PlayingDhol_g17_c01 60 +Shotput/v_Shotput_g11_c02 78 +Surfing/v_Surfing_g24_c03 87 +PoleVault/v_PoleVault_g16_c06 67 +WalkingWithDog/v_WalkingWithDog_g16_c01 97 +Lunges/v_Lunges_g10_c05 51 +BandMarching/v_BandMarching_g12_c04 5 +TableTennisShot/v_TableTennisShot_g08_c02 89 +PlayingDhol/v_PlayingDhol_g15_c03 60 +HeadMassage/v_HeadMassage_g08_c06 38 +BodyWeightSquats/v_BodyWeightSquats_g15_c02 14 +ApplyLipstick/v_ApplyLipstick_g22_c02 1 +Fencing/v_Fencing_g16_c03 27 +Skijet/v_Skijet_g15_c04 81 +PlayingGuitar/v_PlayingGuitar_g18_c01 62 +BodyWeightSquats/v_BodyWeightSquats_g20_c04 14 +Drumming/v_Drumming_g20_c07 26 +HandstandPushups/v_HandStandPushups_g21_c01 36 +SumoWrestling/v_SumoWrestling_g13_c01 86 +FloorGymnastics/v_FloorGymnastics_g21_c04 29 +BabyCrawling/v_BabyCrawling_g24_c06 3 +Diving/v_Diving_g14_c03 25 +PlayingFlute/v_PlayingFlute_g15_c05 61 +IceDancing/v_IceDancing_g11_c01 43 +Punch/v_Punch_g09_c02 70 +Billiards/v_Billiards_g16_c02 11 +ShavingBeard/v_ShavingBeard_g17_c03 77 +PlayingDaf/v_PlayingDaf_g15_c05 59 +CricketShot/v_CricketShot_g20_c01 23 +BabyCrawling/v_BabyCrawling_g10_c04 3 +BabyCrawling/v_BabyCrawling_g12_c04 3 +FieldHockeyPenalty/v_FieldHockeyPenalty_g16_c03 28 +Skijet/v_Skijet_g13_c02 81 +HorseRace/v_HorseRace_g12_c02 40 +ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c01 0 +TableTennisShot/v_TableTennisShot_g12_c03 89 +BlowingCandles/v_BlowingCandles_g23_c04 13 +Diving/v_Diving_g23_c03 25 +PlayingTabla/v_PlayingTabla_g18_c01 65 +BrushingTeeth/v_BrushingTeeth_g18_c06 19 +SkateBoarding/v_SkateBoarding_g24_c03 79 +CricketShot/v_CricketShot_g14_c06 23 +ShavingBeard/v_ShavingBeard_g15_c03 77 +PlayingDhol/v_PlayingDhol_g22_c03 60 +SkateBoarding/v_SkateBoarding_g12_c06 79 +SkateBoarding/v_SkateBoarding_g25_c03 79 +IceDancing/v_IceDancing_g23_c05 43 +Biking/v_Biking_g08_c04 10 +HandstandPushups/v_HandStandPushups_g20_c04 36 +Skiing/v_Skiing_g08_c04 80 +PlayingCello/v_PlayingCello_g25_c06 58 +Drumming/v_Drumming_g10_c02 26 +HammerThrow/v_HammerThrow_g09_c01 35 +Hammering/v_Hammering_g17_c01 34 +Drumming/v_Drumming_g19_c06 26 +BabyCrawling/v_BabyCrawling_g15_c06 3 +Typing/v_Typing_g13_c03 94 +PommelHorse/v_PommelHorse_g11_c02 68 +Fencing/v_Fencing_g09_c03 27 +PommelHorse/v_PommelHorse_g15_c02 68 +PoleVault/v_PoleVault_g08_c03 67 +MoppingFloor/v_MoppingFloor_g19_c03 54 +TennisSwing/v_TennisSwing_g23_c05 91 +ApplyLipstick/v_ApplyLipstick_g22_c05 1 +JumpingJack/v_JumpingJack_g20_c04 46 +Fencing/v_Fencing_g08_c04 27 +BreastStroke/v_BreastStroke_g09_c03 18 +JumpRope/v_JumpRope_g09_c06 47 +Rafting/v_Rafting_g23_c02 72 +PlayingTabla/v_PlayingTabla_g16_c04 65 +SoccerJuggling/v_SoccerJuggling_g08_c02 83 +Swing/v_Swing_g09_c06 88 +JugglingBalls/v_JugglingBalls_g12_c01 45 +Billiards/v_Billiards_g18_c02 11 +JavelinThrow/v_JavelinThrow_g14_c06 44 +BabyCrawling/v_BabyCrawling_g08_c04 3 +PlayingCello/v_PlayingCello_g23_c02 58 +Basketball/v_Basketball_g19_c03 7 +Shotput/v_Shotput_g13_c01 78 +BoxingPunchingBag/v_BoxingPunchingBag_g23_c04 16 +Swing/v_Swing_g10_c04 88 +PizzaTossing/v_PizzaTossing_g16_c04 57 +Kayaking/v_Kayaking_g18_c01 48 +Surfing/v_Surfing_g08_c03 87 +PlayingCello/v_PlayingCello_g08_c06 58 +FrisbeeCatch/v_FrisbeeCatch_g15_c01 30 +Drumming/v_Drumming_g14_c04 26 +BabyCrawling/v_BabyCrawling_g15_c03 3 +WalkingWithDog/v_WalkingWithDog_g25_c01 97 +JumpingJack/v_JumpingJack_g20_c02 46 +FrisbeeCatch/v_FrisbeeCatch_g22_c05 30 +SkyDiving/v_SkyDiving_g23_c01 82 +TableTennisShot/v_TableTennisShot_g12_c04 89 +YoYo/v_YoYo_g09_c01 100 +Archery/v_Archery_g13_c01 2 +StillRings/v_StillRings_g11_c01 85 +Bowling/v_Bowling_g10_c01 15 +CliffDiving/v_CliffDiving_g25_c01 21 +Rafting/v_Rafting_g25_c01 72 +BalanceBeam/v_BalanceBeam_g25_c04 4 +TaiChi/v_TaiChi_g16_c02 90 +TennisSwing/v_TennisSwing_g16_c02 91 +BoxingPunchingBag/v_BoxingPunchingBag_g18_c04 16 +PushUps/v_PushUps_g08_c04 71 +Mixing/v_Mixing_g09_c06 53 +PlayingSitar/v_PlayingSitar_g23_c01 64 +HandstandWalking/v_HandstandWalking_g25_c03 37 +Shotput/v_Shotput_g23_c07 78 +SumoWrestling/v_SumoWrestling_g11_c02 86 +Basketball/v_Basketball_g21_c05 7 +ApplyEyeMakeup/v_ApplyEyeMakeup_g21_c04 0 +PlayingCello/v_PlayingCello_g15_c01 58 +ShavingBeard/v_ShavingBeard_g15_c04 77 +SkateBoarding/v_SkateBoarding_g22_c05 79 +CricketBowling/v_CricketBowling_g14_c01 22 +WritingOnBoard/v_WritingOnBoard_g18_c05 99 +BaseballPitch/v_BaseballPitch_g25_c06 6 +Haircut/v_Haircut_g25_c03 33 +VolleyballSpiking/v_VolleyballSpiking_g23_c04 96 +BandMarching/v_BandMarching_g09_c02 5 +HighJump/v_HighJump_g17_c01 39 +Rafting/v_Rafting_g19_c03 72 +Fencing/v_Fencing_g25_c01 27 +ShavingBeard/v_ShavingBeard_g23_c05 77 +Fencing/v_Fencing_g18_c03 27 +CuttingInKitchen/v_CuttingInKitchen_g19_c03 24 +Surfing/v_Surfing_g18_c04 87 +TennisSwing/v_TennisSwing_g12_c07 91 +ShavingBeard/v_ShavingBeard_g19_c04 77 +Hammering/v_Hammering_g11_c01 34 +Hammering/v_Hammering_g13_c05 34 +HandstandWalking/v_HandstandWalking_g14_c02 37 +PlayingSitar/v_PlayingSitar_g17_c01 64 +PlayingFlute/v_PlayingFlute_g19_c02 61 +BaseballPitch/v_BaseballPitch_g17_c01 6 +CricketShot/v_CricketShot_g08_c04 23 +PlayingGuitar/v_PlayingGuitar_g23_c04 62 +HulaHoop/v_HulaHoop_g17_c03 42 +CricketShot/v_CricketShot_g23_c05 23 +Rafting/v_Rafting_g21_c02 72 +Surfing/v_Surfing_g14_c01 87 +Biking/v_Biking_g08_c05 10 +WalkingWithDog/v_WalkingWithDog_g20_c07 97 +HorseRace/v_HorseRace_g10_c01 40 +Surfing/v_Surfing_g12_c01 87 +BlowDryHair/v_BlowDryHair_g19_c02 12 +Surfing/v_Surfing_g12_c04 87 +Nunchucks/v_Nunchucks_g14_c04 55 +Rowing/v_Rowing_g10_c06 75 +Punch/v_Punch_g22_c03 70 +TrampolineJumping/v_TrampolineJumping_g20_c01 93 +BabyCrawling/v_BabyCrawling_g22_c02 3 +BoxingPunchingBag/v_BoxingPunchingBag_g19_c03 16 +Drumming/v_Drumming_g21_c07 26 +Billiards/v_Billiards_g08_c04 11 +BabyCrawling/v_BabyCrawling_g17_c04 3 +MilitaryParade/v_MilitaryParade_g13_c01 52 +Basketball/v_Basketball_g21_c02 7 +BrushingTeeth/v_BrushingTeeth_g17_c02 19 +IceDancing/v_IceDancing_g10_c06 43 +BasketballDunk/v_BasketballDunk_g19_c01 8 +HighJump/v_HighJump_g11_c04 39 +ThrowDiscus/v_ThrowDiscus_g19_c02 92 +ApplyEyeMakeup/v_ApplyEyeMakeup_g24_c07 0 +Shotput/v_Shotput_g25_c01 78 +Drumming/v_Drumming_g21_c04 26 +PizzaTossing/v_PizzaTossing_g14_c01 57 +BodyWeightSquats/v_BodyWeightSquats_g09_c07 14 +Bowling/v_Bowling_g18_c03 15 +Swing/v_Swing_g08_c01 88 +BandMarching/v_BandMarching_g09_c06 5 +StillRings/v_StillRings_g25_c03 85 +Typing/v_Typing_g24_c01 94 +MoppingFloor/v_MoppingFloor_g13_c01 54 +CleanAndJerk/v_CleanAndJerk_g12_c03 20 +Kayaking/v_Kayaking_g16_c06 48 +FrisbeeCatch/v_FrisbeeCatch_g22_c04 30 +Rowing/v_Rowing_g21_c02 75 +PlayingGuitar/v_PlayingGuitar_g21_c01 62 +Nunchucks/v_Nunchucks_g25_c06 55 +GolfSwing/v_GolfSwing_g23_c07 32 +Punch/v_Punch_g16_c01 70 +TennisSwing/v_TennisSwing_g10_c06 91 +PushUps/v_PushUps_g18_c02 71 +TennisSwing/v_TennisSwing_g25_c03 91 +TennisSwing/v_TennisSwing_g09_c02 91 +JumpingJack/v_JumpingJack_g18_c01 46 +Knitting/v_Knitting_g08_c01 49 +Lunges/v_Lunges_g21_c03 51 +CliffDiving/v_CliffDiving_g20_c01 21 +HammerThrow/v_HammerThrow_g14_c03 35 +RockClimbingIndoor/v_RockClimbingIndoor_g17_c02 73 +TennisSwing/v_TennisSwing_g21_c05 91 +YoYo/v_YoYo_g10_c01 100 +WalkingWithDog/v_WalkingWithDog_g12_c03 97 +Typing/v_Typing_g17_c03 94 +PlayingDaf/v_PlayingDaf_g15_c07 59 +Drumming/v_Drumming_g24_c02 26 +Rowing/v_Rowing_g19_c03 75 +ThrowDiscus/v_ThrowDiscus_g23_c02 92 +BandMarching/v_BandMarching_g16_c06 5 +HorseRiding/v_HorseRiding_g08_c01 41 +WritingOnBoard/v_WritingOnBoard_g20_c03 99 +Diving/v_Diving_g24_c02 25 +IceDancing/v_IceDancing_g09_c05 43 +Shotput/v_Shotput_g25_c02 78 +PlayingCello/v_PlayingCello_g17_c07 58 +FloorGymnastics/v_FloorGymnastics_g19_c04 29 +Diving/v_Diving_g15_c06 25 +RockClimbingIndoor/v_RockClimbingIndoor_g20_c06 73 +Fencing/v_Fencing_g12_c01 27 +Drumming/v_Drumming_g17_c05 26 +Hammering/v_Hammering_g08_c02 34 +Swing/v_Swing_g21_c02 88 +PlayingDaf/v_PlayingDaf_g16_c06 59 +MilitaryParade/v_MilitaryParade_g08_c03 52 +HeadMassage/v_HeadMassage_g15_c02 38 +Billiards/v_Billiards_g08_c03 11 +SumoWrestling/v_SumoWrestling_g22_c02 86 +BabyCrawling/v_BabyCrawling_g11_c02 3 +HighJump/v_HighJump_g13_c02 39 +GolfSwing/v_GolfSwing_g09_c02 32 +MilitaryParade/v_MilitaryParade_g09_c01 52 +Archery/v_Archery_g17_c03 2 +BrushingTeeth/v_BrushingTeeth_g14_c04 19 +Rafting/v_Rafting_g21_c01 72 +LongJump/v_LongJump_g12_c04 50 +Bowling/v_Bowling_g12_c04 15 +HammerThrow/v_HammerThrow_g10_c02 35 +HighJump/v_HighJump_g20_c05 39 +HorseRiding/v_HorseRiding_g08_c05 41 +WalkingWithDog/v_WalkingWithDog_g19_c03 97 +BabyCrawling/v_BabyCrawling_g24_c05 3 +FrisbeeCatch/v_FrisbeeCatch_g09_c03 30 +Skijet/v_Skijet_g14_c04 81 +Kayaking/v_Kayaking_g16_c04 48 +Nunchucks/v_Nunchucks_g18_c02 55 +CliffDiving/v_CliffDiving_g19_c05 21 +PlayingDaf/v_PlayingDaf_g13_c02 59 +Diving/v_Diving_g19_c01 25 +HeadMassage/v_HeadMassage_g25_c06 38 +Fencing/v_Fencing_g11_c03 27 +BrushingTeeth/v_BrushingTeeth_g19_c03 19 +Basketball/v_Basketball_g16_c01 7 +PlayingGuitar/v_PlayingGuitar_g11_c07 62 +FieldHockeyPenalty/v_FieldHockeyPenalty_g11_c01 28 +SoccerJuggling/v_SoccerJuggling_g22_c06 83 +VolleyballSpiking/v_VolleyballSpiking_g16_c04 96 +SkyDiving/v_SkyDiving_g08_c02 82 +UnevenBars/v_UnevenBars_g25_c01 95 +Lunges/v_Lunges_g08_c03 51 +SkateBoarding/v_SkateBoarding_g13_c02 79 +PlayingPiano/v_PlayingPiano_g17_c05 63 +BreastStroke/v_BreastStroke_g24_c01 18 +MilitaryParade/v_MilitaryParade_g09_c05 52 +Bowling/v_Bowling_g24_c04 15 +YoYo/v_YoYo_g16_c02 100 +Bowling/v_Bowling_g13_c02 15 +RockClimbingIndoor/v_RockClimbingIndoor_g25_c02 73 +HorseRace/v_HorseRace_g24_c01 40 +Fencing/v_Fencing_g22_c04 27 +SalsaSpin/v_SalsaSpin_g21_c05 76 +JumpingJack/v_JumpingJack_g12_c03 46 +PlayingCello/v_PlayingCello_g13_c01 58 +BenchPress/v_BenchPress_g12_c01 9 +Kayaking/v_Kayaking_g08_c04 48 +HorseRace/v_HorseRace_g18_c03 40 +TrampolineJumping/v_TrampolineJumping_g16_c04 93 +PlayingDaf/v_PlayingDaf_g16_c05 59 +SkateBoarding/v_SkateBoarding_g14_c03 79 +BandMarching/v_BandMarching_g10_c02 5 +Rafting/v_Rafting_g08_c03 72 +Swing/v_Swing_g25_c03 88 +IceDancing/v_IceDancing_g21_c07 43 +BandMarching/v_BandMarching_g24_c05 5 +StillRings/v_StillRings_g25_c01 85 +PushUps/v_PushUps_g21_c02 71 +FieldHockeyPenalty/v_FieldHockeyPenalty_g10_c05 28 +HighJump/v_HighJump_g24_c01 39 +PlayingDaf/v_PlayingDaf_g21_c03 59 +PoleVault/v_PoleVault_g19_c01 67 +Skijet/v_Skijet_g23_c01 81 +PlayingCello/v_PlayingCello_g15_c05 58 +HeadMassage/v_HeadMassage_g18_c05 38 +Fencing/v_Fencing_g23_c02 27 +RopeClimbing/v_RopeClimbing_g25_c03 74 +PlayingFlute/v_PlayingFlute_g14_c01 61 +Typing/v_Typing_g18_c01 94 +JugglingBalls/v_JugglingBalls_g25_c04 45 +PlayingSitar/v_PlayingSitar_g11_c07 64 +CliffDiving/v_CliffDiving_g12_c04 21 +BenchPress/v_BenchPress_g24_c05 9 +SumoWrestling/v_SumoWrestling_g17_c01 86 +TableTennisShot/v_TableTennisShot_g10_c02 89 +Punch/v_Punch_g15_c04 70 +JumpRope/v_JumpRope_g15_c06 47 +YoYo/v_YoYo_g13_c05 100 +BabyCrawling/v_BabyCrawling_g25_c05 3 +Basketball/v_Basketball_g18_c05 7 +RopeClimbing/v_RopeClimbing_g11_c01 74 +Knitting/v_Knitting_g17_c01 49 +PlayingDaf/v_PlayingDaf_g17_c01 59 +BasketballDunk/v_BasketballDunk_g20_c05 8 +PlayingViolin/v_PlayingViolin_g09_c01 66 +CliffDiving/v_CliffDiving_g17_c02 21 +Billiards/v_Billiards_g24_c03 11 +IceDancing/v_IceDancing_g21_c02 43 +Rowing/v_Rowing_g19_c05 75 +StillRings/v_StillRings_g10_c02 85 +CricketShot/v_CricketShot_g18_c04 23 +Nunchucks/v_Nunchucks_g24_c02 55 +Billiards/v_Billiards_g25_c06 11 +ParallelBars/v_ParallelBars_g11_c02 56 +SoccerPenalty/v_SoccerPenalty_g24_c04 84 +VolleyballSpiking/v_VolleyballSpiking_g23_c02 96 +TrampolineJumping/v_TrampolineJumping_g10_c04 93 +BalanceBeam/v_BalanceBeam_g18_c02 4 +HulaHoop/v_HulaHoop_g13_c04 42 +BodyWeightSquats/v_BodyWeightSquats_g12_c03 14 +CleanAndJerk/v_CleanAndJerk_g14_c06 20 +PushUps/v_PushUps_g19_c02 71 +Biking/v_Biking_g15_c05 10 +PlayingSitar/v_PlayingSitar_g15_c04 64 +SkateBoarding/v_SkateBoarding_g16_c04 79 +FloorGymnastics/v_FloorGymnastics_g17_c02 29 +FrontCrawl/v_FrontCrawl_g16_c02 31 +TaiChi/v_TaiChi_g22_c01 90 +CricketShot/v_CricketShot_g16_c06 23 +JumpRope/v_JumpRope_g11_c05 47 +CricketBowling/v_CricketBowling_g19_c01 22 +BoxingPunchingBag/v_BoxingPunchingBag_g23_c06 16 +Punch/v_Punch_g25_c01 70 +CricketShot/v_CricketShot_g09_c07 23 +Shotput/v_Shotput_g21_c04 78 +WritingOnBoard/v_WritingOnBoard_g25_c07 99 +Fencing/v_Fencing_g08_c01 27 +CuttingInKitchen/v_CuttingInKitchen_g09_c03 24 +RockClimbingIndoor/v_RockClimbingIndoor_g10_c01 73 +Skiing/v_Skiing_g16_c04 80 +JumpingJack/v_JumpingJack_g25_c04 46 +HeadMassage/v_HeadMassage_g10_c03 38 +PlayingCello/v_PlayingCello_g16_c03 58 +PlayingDhol/v_PlayingDhol_g21_c01 60 +WritingOnBoard/v_WritingOnBoard_g24_c04 99 +RopeClimbing/v_RopeClimbing_g18_c04 74 +Shotput/v_Shotput_g18_c01 78 +PizzaTossing/v_PizzaTossing_g11_c02 57 +TableTennisShot/v_TableTennisShot_g11_c04 89 +CricketBowling/v_CricketBowling_g20_c03 22 +CuttingInKitchen/v_CuttingInKitchen_g11_c01 24 +HighJump/v_HighJump_g08_c02 39 +ThrowDiscus/v_ThrowDiscus_g21_c06 92 +WritingOnBoard/v_WritingOnBoard_g24_c02 99 +HeadMassage/v_HeadMassage_g22_c06 38 +PoleVault/v_PoleVault_g24_c04 67 +Drumming/v_Drumming_g21_c03 26 +BaseballPitch/v_BaseballPitch_g08_c07 6 +TaiChi/v_TaiChi_g17_c02 90 +TrampolineJumping/v_TrampolineJumping_g17_c04 93 +Haircut/v_Haircut_g08_c02 33 +BaseballPitch/v_BaseballPitch_g08_c06 6 +Archery/v_Archery_g12_c02 2 +PlayingGuitar/v_PlayingGuitar_g20_c05 62 +YoYo/v_YoYo_g17_c02 100 +HighJump/v_HighJump_g17_c04 39 +MoppingFloor/v_MoppingFloor_g12_c04 54 +SoccerJuggling/v_SoccerJuggling_g12_c03 83 +ApplyEyeMakeup/v_ApplyEyeMakeup_g22_c02 0 +PoleVault/v_PoleVault_g15_c01 67 +IceDancing/v_IceDancing_g18_c02 43 +BaseballPitch/v_BaseballPitch_g11_c03 6 +PushUps/v_PushUps_g12_c04 71 +SumoWrestling/v_SumoWrestling_g21_c01 86 +HandstandWalking/v_HandstandWalking_g18_c02 37 +HorseRace/v_HorseRace_g22_c04 40 +PlayingDhol/v_PlayingDhol_g24_c04 60 +Archery/v_Archery_g20_c07 2 +PizzaTossing/v_PizzaTossing_g14_c03 57 +TennisSwing/v_TennisSwing_g20_c01 91 +BodyWeightSquats/v_BodyWeightSquats_g20_c03 14 +PoleVault/v_PoleVault_g12_c03 67 +SkyDiving/v_SkyDiving_g14_c03 82 +Punch/v_Punch_g19_c04 70 +YoYo/v_YoYo_g14_c01 100 +CricketBowling/v_CricketBowling_g22_c02 22 +HeadMassage/v_HeadMassage_g21_c01 38 +Lunges/v_Lunges_g11_c03 51 +HammerThrow/v_HammerThrow_g10_c05 35 +StillRings/v_StillRings_g18_c03 85 +BlowDryHair/v_BlowDryHair_g15_c02 12 +Rafting/v_Rafting_g12_c04 72 +BoxingPunchingBag/v_BoxingPunchingBag_g11_c03 16 +PlayingCello/v_PlayingCello_g23_c04 58 +Archery/v_Archery_g14_c04 2 +Drumming/v_Drumming_g08_c04 26 +SoccerJuggling/v_SoccerJuggling_g11_c05 83 +SalsaSpin/v_SalsaSpin_g08_c03 76 +HeadMassage/v_HeadMassage_g21_c04 38 +BandMarching/v_BandMarching_g25_c01 5 +BlowDryHair/v_BlowDryHair_g22_c03 12 +Drumming/v_Drumming_g10_c06 26 +CricketShot/v_CricketShot_g13_c01 23 +FloorGymnastics/v_FloorGymnastics_g18_c01 29 +UnevenBars/v_UnevenBars_g25_c03 95 +Shotput/v_Shotput_g09_c04 78 +PoleVault/v_PoleVault_g14_c04 67 +Billiards/v_Billiards_g20_c03 11 +MoppingFloor/v_MoppingFloor_g16_c01 54 +HulaHoop/v_HulaHoop_g08_c05 42 +CricketShot/v_CricketShot_g12_c06 23 +HandstandPushups/v_HandStandPushups_g24_c04 36 +LongJump/v_LongJump_g19_c04 50 +PlayingDhol/v_PlayingDhol_g08_c04 60 +BalanceBeam/v_BalanceBeam_g20_c02 4 +TaiChi/v_TaiChi_g25_c04 90 +ShavingBeard/v_ShavingBeard_g08_c05 77 +FieldHockeyPenalty/v_FieldHockeyPenalty_g21_c03 28 +PlayingDhol/v_PlayingDhol_g16_c02 60 +TaiChi/v_TaiChi_g10_c01 90 +JavelinThrow/v_JavelinThrow_g19_c04 44 +Haircut/v_Haircut_g11_c03 33 +Nunchucks/v_Nunchucks_g24_c03 55 +HorseRiding/v_HorseRiding_g24_c06 41 +PlayingTabla/v_PlayingTabla_g10_c01 65 +PommelHorse/v_PommelHorse_g20_c05 68 +HandstandPushups/v_HandStandPushups_g09_c03 36 +SalsaSpin/v_SalsaSpin_g24_c04 76 +Surfing/v_Surfing_g08_c02 87 +JavelinThrow/v_JavelinThrow_g21_c03 44 +HeadMassage/v_HeadMassage_g15_c05 38 +HandstandPushups/v_HandStandPushups_g13_c01 36 +Skijet/v_Skijet_g11_c04 81 +Lunges/v_Lunges_g10_c06 51 +HandstandPushups/v_HandStandPushups_g17_c03 36 +CricketShot/v_CricketShot_g20_c07 23 +IceDancing/v_IceDancing_g08_c07 43 +Skiing/v_Skiing_g13_c02 80 +JavelinThrow/v_JavelinThrow_g16_c02 44 +HammerThrow/v_HammerThrow_g18_c05 35 +YoYo/v_YoYo_g13_c04 100 +Kayaking/v_Kayaking_g23_c01 48 +Drumming/v_Drumming_g12_c07 26 +ParallelBars/v_ParallelBars_g14_c04 56 +BabyCrawling/v_BabyCrawling_g12_c06 3 +VolleyballSpiking/v_VolleyballSpiking_g25_c01 96 +PlayingCello/v_PlayingCello_g12_c02 58 +WalkingWithDog/v_WalkingWithDog_g08_c04 97 +FrontCrawl/v_FrontCrawl_g21_c06 31 +PlayingSitar/v_PlayingSitar_g21_c06 64 +Knitting/v_Knitting_g19_c01 49 +Skiing/v_Skiing_g22_c03 80 +Haircut/v_Haircut_g20_c07 33 +BoxingPunchingBag/v_BoxingPunchingBag_g15_c04 16 +VolleyballSpiking/v_VolleyballSpiking_g09_c04 96 +Swing/v_Swing_g14_c01 88 +PlayingSitar/v_PlayingSitar_g10_c06 64 +BaseballPitch/v_BaseballPitch_g13_c05 6 +PlayingCello/v_PlayingCello_g08_c03 58 +TennisSwing/v_TennisSwing_g22_c05 91 +PlayingDaf/v_PlayingDaf_g18_c06 59 +FrisbeeCatch/v_FrisbeeCatch_g10_c01 30 +Punch/v_Punch_g10_c03 70 +PlayingCello/v_PlayingCello_g16_c04 58 +Basketball/v_Basketball_g23_c03 7 +BreastStroke/v_BreastStroke_g25_c01 18 +PullUps/v_PullUps_g24_c04 69 +Kayaking/v_Kayaking_g19_c01 48 +YoYo/v_YoYo_g12_c02 100 +HandstandWalking/v_HandstandWalking_g09_c04 37 +MilitaryParade/v_MilitaryParade_g21_c01 52 +PlayingCello/v_PlayingCello_g19_c02 58 +PlayingGuitar/v_PlayingGuitar_g16_c06 62 +Billiards/v_Billiards_g19_c06 11 +PlayingTabla/v_PlayingTabla_g21_c02 65 +Biking/v_Biking_g25_c04 10 +GolfSwing/v_GolfSwing_g17_c05 32 +TrampolineJumping/v_TrampolineJumping_g19_c04 93 +CuttingInKitchen/v_CuttingInKitchen_g25_c03 24 +Skiing/v_Skiing_g25_c01 80 +BreastStroke/v_BreastStroke_g08_c03 18 +BandMarching/v_BandMarching_g12_c02 5 +BrushingTeeth/v_BrushingTeeth_g23_c04 19 +CricketShot/v_CricketShot_g15_c07 23 +FrisbeeCatch/v_FrisbeeCatch_g16_c02 30 +HorseRiding/v_HorseRiding_g24_c04 41 +Swing/v_Swing_g09_c03 88 +BrushingTeeth/v_BrushingTeeth_g24_c02 19 +HammerThrow/v_HammerThrow_g12_c06 35 +Skiing/v_Skiing_g14_c01 80 +BlowingCandles/v_BlowingCandles_g24_c02 13 +Surfing/v_Surfing_g15_c05 87 +SoccerJuggling/v_SoccerJuggling_g15_c01 83 +WritingOnBoard/v_WritingOnBoard_g18_c01 99 +FrisbeeCatch/v_FrisbeeCatch_g16_c03 30 +FrontCrawl/v_FrontCrawl_g20_c06 31 +HammerThrow/v_HammerThrow_g09_c04 35 +Drumming/v_Drumming_g20_c03 26 +WalkingWithDog/v_WalkingWithDog_g16_c03 97 +TennisSwing/v_TennisSwing_g19_c01 91 +JugglingBalls/v_JugglingBalls_g17_c01 45 +Rafting/v_Rafting_g09_c04 72 +HighJump/v_HighJump_g23_c05 39 +SoccerPenalty/v_SoccerPenalty_g12_c06 84 +IceDancing/v_IceDancing_g21_c01 43 +HandstandPushups/v_HandStandPushups_g22_c03 36 +FloorGymnastics/v_FloorGymnastics_g22_c03 29 +BandMarching/v_BandMarching_g22_c04 5 +VolleyballSpiking/v_VolleyballSpiking_g13_c06 96 +MilitaryParade/v_MilitaryParade_g18_c04 52 +HorseRace/v_HorseRace_g21_c04 40 +PlayingFlute/v_PlayingFlute_g09_c04 61 +SoccerPenalty/v_SoccerPenalty_g09_c02 84 +Bowling/v_Bowling_g12_c03 15 +FrisbeeCatch/v_FrisbeeCatch_g25_c04 30 +TennisSwing/v_TennisSwing_g17_c06 91 +Knitting/v_Knitting_g10_c05 49 +VolleyballSpiking/v_VolleyballSpiking_g09_c01 96 +CricketBowling/v_CricketBowling_g11_c06 22 +BabyCrawling/v_BabyCrawling_g24_c02 3 +ApplyEyeMakeup/v_ApplyEyeMakeup_g22_c03 0 +PommelHorse/v_PommelHorse_g12_c03 68 +Haircut/v_Haircut_g23_c05 33 +Skiing/v_Skiing_g20_c04 80 +SoccerJuggling/v_SoccerJuggling_g23_c05 83 +ThrowDiscus/v_ThrowDiscus_g15_c02 92 +PlayingGuitar/v_PlayingGuitar_g14_c03 62 +Bowling/v_Bowling_g20_c06 15 +BandMarching/v_BandMarching_g09_c04 5 +PlayingDhol/v_PlayingDhol_g25_c02 60 +ThrowDiscus/v_ThrowDiscus_g10_c01 92 +PullUps/v_PullUps_g24_c03 69 +CricketBowling/v_CricketBowling_g13_c01 22 +Mixing/v_Mixing_g22_c03 53 +Drumming/v_Drumming_g15_c01 26 +StillRings/v_StillRings_g11_c02 85 +Diving/v_Diving_g16_c01 25 +Bowling/v_Bowling_g14_c03 15 +PommelHorse/v_PommelHorse_g20_c03 68 +SumoWrestling/v_SumoWrestling_g09_c03 86 +Knitting/v_Knitting_g10_c04 49 +ThrowDiscus/v_ThrowDiscus_g11_c05 92 +BaseballPitch/v_BaseballPitch_g09_c07 6 +LongJump/v_LongJump_g14_c05 50 +PlayingSitar/v_PlayingSitar_g22_c03 64 +RopeClimbing/v_RopeClimbing_g13_c04 74 +BoxingPunchingBag/v_BoxingPunchingBag_g15_c06 16 +SkateBoarding/v_SkateBoarding_g12_c05 79 +Archery/v_Archery_g11_c01 2 +PlayingSitar/v_PlayingSitar_g14_c03 64 +Knitting/v_Knitting_g13_c02 49 +Archery/v_Archery_g23_c05 2 +ThrowDiscus/v_ThrowDiscus_g19_c04 92 +HulaHoop/v_HulaHoop_g23_c02 42 +CricketBowling/v_CricketBowling_g21_c04 22 +IceDancing/v_IceDancing_g10_c07 43 +Rowing/v_Rowing_g20_c02 75 +TaiChi/v_TaiChi_g22_c04 90 +PlayingDaf/v_PlayingDaf_g12_c04 59 +BabyCrawling/v_BabyCrawling_g17_c05 3 +PlayingCello/v_PlayingCello_g22_c03 58 +PlayingDaf/v_PlayingDaf_g20_c04 59 +Drumming/v_Drumming_g16_c06 26 +BenchPress/v_BenchPress_g18_c06 9 +IceDancing/v_IceDancing_g24_c03 43 +CricketShot/v_CricketShot_g09_c05 23 +BlowDryHair/v_BlowDryHair_g14_c03 12 +TableTennisShot/v_TableTennisShot_g12_c01 89 +Skijet/v_Skijet_g17_c02 81 +Drumming/v_Drumming_g24_c06 26 +Rafting/v_Rafting_g10_c03 72 +HorseRiding/v_HorseRiding_g21_c06 41 +Skijet/v_Skijet_g18_c03 81 +RockClimbingIndoor/v_RockClimbingIndoor_g14_c05 73 +Biking/v_Biking_g14_c04 10 +BoxingSpeedBag/v_BoxingSpeedBag_g18_c01 17 +YoYo/v_YoYo_g24_c01 100 +BoxingPunchingBag/v_BoxingPunchingBag_g24_c02 16 +Swing/v_Swing_g25_c02 88 +BoxingSpeedBag/v_BoxingSpeedBag_g17_c01 17 +JumpRope/v_JumpRope_g17_c03 47 +TaiChi/v_TaiChi_g22_c03 90 +Shotput/v_Shotput_g08_c04 78 +PoleVault/v_PoleVault_g16_c07 67 +FrisbeeCatch/v_FrisbeeCatch_g25_c03 30 +PoleVault/v_PoleVault_g17_c06 67 +PlayingFlute/v_PlayingFlute_g10_c06 61 +JumpRope/v_JumpRope_g16_c07 47 +CricketBowling/v_CricketBowling_g20_c05 22 +Archery/v_Archery_g20_c02 2 +Surfing/v_Surfing_g17_c04 87 +TableTennisShot/v_TableTennisShot_g17_c02 89 +WalkingWithDog/v_WalkingWithDog_g24_c03 97 +SoccerJuggling/v_SoccerJuggling_g12_c06 83 +Biking/v_Biking_g15_c01 10 +Diving/v_Diving_g24_c06 25 +PlayingGuitar/v_PlayingGuitar_g24_c01 62 +MilitaryParade/v_MilitaryParade_g23_c03 52 +FrisbeeCatch/v_FrisbeeCatch_g21_c03 30 +CricketBowling/v_CricketBowling_g08_c02 22 +Hammering/v_Hammering_g17_c03 34 +Fencing/v_Fencing_g15_c02 27 +SalsaSpin/v_SalsaSpin_g10_c06 76 +Surfing/v_Surfing_g25_c01 87 +Archery/v_Archery_g18_c05 2 +BaseballPitch/v_BaseballPitch_g16_c05 6 +BrushingTeeth/v_BrushingTeeth_g15_c03 19 +WritingOnBoard/v_WritingOnBoard_g16_c04 99 +ShavingBeard/v_ShavingBeard_g21_c06 77 +Archery/v_Archery_g25_c05 2 +HandstandWalking/v_HandstandWalking_g21_c04 37 +WritingOnBoard/v_WritingOnBoard_g21_c03 99 +Surfing/v_Surfing_g22_c02 87 +Skijet/v_Skijet_g17_c03 81 +CricketBowling/v_CricketBowling_g13_c04 22 +FrisbeeCatch/v_FrisbeeCatch_g17_c03 30 +TrampolineJumping/v_TrampolineJumping_g20_c03 93 +HandstandPushups/v_HandStandPushups_g10_c04 36 +Typing/v_Typing_g11_c04 94 +BlowingCandles/v_BlowingCandles_g24_c03 13 +PlayingFlute/v_PlayingFlute_g15_c04 61 +BandMarching/v_BandMarching_g19_c01 5 +Archery/v_Archery_g22_c01 2 +TennisSwing/v_TennisSwing_g17_c07 91 +Hammering/v_Hammering_g13_c02 34 +FloorGymnastics/v_FloorGymnastics_g17_c06 29 +JumpRope/v_JumpRope_g17_c04 47 +Typing/v_Typing_g10_c06 94 +WritingOnBoard/v_WritingOnBoard_g14_c01 99 +Knitting/v_Knitting_g12_c01 49 +HammerThrow/v_HammerThrow_g17_c04 35 +BreastStroke/v_BreastStroke_g11_c04 18 +JumpRope/v_JumpRope_g18_c07 47 +LongJump/v_LongJump_g10_c01 50 +BoxingPunchingBag/v_BoxingPunchingBag_g23_c01 16 +PlayingSitar/v_PlayingSitar_g09_c02 64 +HammerThrow/v_HammerThrow_g21_c04 35 +StillRings/v_StillRings_g10_c03 85 +BenchPress/v_BenchPress_g21_c04 9 +Archery/v_Archery_g10_c06 2 +Surfing/v_Surfing_g23_c01 87 +SalsaSpin/v_SalsaSpin_g18_c02 76 +PlayingFlute/v_PlayingFlute_g08_c06 61 +PizzaTossing/v_PizzaTossing_g12_c03 57 +BandMarching/v_BandMarching_g20_c01 5 +WalkingWithDog/v_WalkingWithDog_g19_c04 97 +BoxingSpeedBag/v_BoxingSpeedBag_g15_c02 17 +Archery/v_Archery_g25_c06 2 +SkyDiving/v_SkyDiving_g15_c01 82 +Mixing/v_Mixing_g17_c04 53 +Diving/v_Diving_g10_c01 25 +Surfing/v_Surfing_g22_c01 87 +Rowing/v_Rowing_g19_c01 75 +PlayingSitar/v_PlayingSitar_g20_c01 64 +BasketballDunk/v_BasketballDunk_g24_c02 8 +CliffDiving/v_CliffDiving_g12_c05 21 +CricketBowling/v_CricketBowling_g17_c05 22 +Surfing/v_Surfing_g16_c06 87 +SumoWrestling/v_SumoWrestling_g21_c04 86 +Surfing/v_Surfing_g08_c01 87 +ShavingBeard/v_ShavingBeard_g14_c04 77 +PoleVault/v_PoleVault_g16_c05 67 +SumoWrestling/v_SumoWrestling_g20_c04 86 +SumoWrestling/v_SumoWrestling_g11_c01 86 +HandstandPushups/v_HandStandPushups_g18_c02 36 +TrampolineJumping/v_TrampolineJumping_g11_c05 93 +Punch/v_Punch_g18_c05 70 +CricketBowling/v_CricketBowling_g18_c02 22 +Rafting/v_Rafting_g16_c04 72 +Mixing/v_Mixing_g12_c02 53 +SkateBoarding/v_SkateBoarding_g25_c02 79 +BabyCrawling/v_BabyCrawling_g25_c01 3 +PlayingPiano/v_PlayingPiano_g09_c03 63 +CleanAndJerk/v_CleanAndJerk_g16_c04 20 +BoxingPunchingBag/v_BoxingPunchingBag_g24_c05 16 +StillRings/v_StillRings_g08_c03 85 +RockClimbingIndoor/v_RockClimbingIndoor_g12_c05 73 +BrushingTeeth/v_BrushingTeeth_g13_c02 19 +PlayingPiano/v_PlayingPiano_g21_c03 63 +Billiards/v_Billiards_g12_c04 11 +JavelinThrow/v_JavelinThrow_g19_c01 44 +Mixing/v_Mixing_g11_c01 53 +ShavingBeard/v_ShavingBeard_g20_c04 77 +BlowDryHair/v_BlowDryHair_g21_c01 12 +PullUps/v_PullUps_g10_c01 69 +GolfSwing/v_GolfSwing_g16_c01 32 +BaseballPitch/v_BaseballPitch_g22_c01 6 +JavelinThrow/v_JavelinThrow_g18_c03 44 +Fencing/v_Fencing_g16_c04 27 +FrontCrawl/v_FrontCrawl_g11_c05 31 +HandstandPushups/v_HandStandPushups_g16_c03 36 +BandMarching/v_BandMarching_g24_c06 5 +SoccerPenalty/v_SoccerPenalty_g24_c01 84 +PommelHorse/v_PommelHorse_g11_c03 68 +PlayingDaf/v_PlayingDaf_g16_c03 59 +HandstandWalking/v_HandstandWalking_g14_c04 37 +HighJump/v_HighJump_g13_c04 39 +IceDancing/v_IceDancing_g20_c02 43 +BodyWeightSquats/v_BodyWeightSquats_g14_c01 14 +BreastStroke/v_BreastStroke_g13_c03 18 +SumoWrestling/v_SumoWrestling_g20_c02 86 +PullUps/v_PullUps_g16_c02 69 +FieldHockeyPenalty/v_FieldHockeyPenalty_g11_c04 28 +HorseRace/v_HorseRace_g15_c04 40 +Skiing/v_Skiing_g08_c06 80 +Shotput/v_Shotput_g16_c04 78 +ShavingBeard/v_ShavingBeard_g18_c05 77 +HammerThrow/v_HammerThrow_g16_c01 35 +JumpingJack/v_JumpingJack_g23_c02 46 +Swing/v_Swing_g13_c01 88 +Basketball/v_Basketball_g20_c07 7 +HandstandWalking/v_HandstandWalking_g24_c06 37 +JugglingBalls/v_JugglingBalls_g09_c01 45 +PullUps/v_PullUps_g14_c03 69 +BlowDryHair/v_BlowDryHair_g15_c04 12 +CliffDiving/v_CliffDiving_g08_c01 21 +PoleVault/v_PoleVault_g10_c03 67 +Lunges/v_Lunges_g25_c03 51 +Skijet/v_Skijet_g24_c02 81 +PlayingDhol/v_PlayingDhol_g09_c04 60 +Swing/v_Swing_g24_c04 88 +FrontCrawl/v_FrontCrawl_g11_c01 31 +Drumming/v_Drumming_g12_c01 26 +BaseballPitch/v_BaseballPitch_g24_c01 6 +HeadMassage/v_HeadMassage_g09_c02 38 +TableTennisShot/v_TableTennisShot_g09_c06 89 +Bowling/v_Bowling_g08_c07 15 +Rowing/v_Rowing_g15_c04 75 +BoxingSpeedBag/v_BoxingSpeedBag_g11_c03 17 +ThrowDiscus/v_ThrowDiscus_g13_c05 92 +PlayingViolin/v_PlayingViolin_g11_c01 66 +PlayingFlute/v_PlayingFlute_g24_c02 61 +Hammering/v_Hammering_g09_c01 34 +Punch/v_Punch_g23_c02 70 +CleanAndJerk/v_CleanAndJerk_g14_c03 20 +Kayaking/v_Kayaking_g14_c01 48 +CricketBowling/v_CricketBowling_g10_c03 22 +MoppingFloor/v_MoppingFloor_g21_c03 54 +ApplyLipstick/v_ApplyLipstick_g22_c04 1 +SkateBoarding/v_SkateBoarding_g15_c06 79 +HandstandPushups/v_HandStandPushups_g23_c03 36 +Archery/v_Archery_g18_c04 2 +HandstandPushups/v_HandStandPushups_g17_c02 36 +Skiing/v_Skiing_g13_c07 80 +Punch/v_Punch_g20_c03 70 +SoccerJuggling/v_SoccerJuggling_g23_c03 83 +HighJump/v_HighJump_g21_c01 39 +BreastStroke/v_BreastStroke_g11_c03 18 +IceDancing/v_IceDancing_g17_c02 43 +StillRings/v_StillRings_g15_c01 85 +HammerThrow/v_HammerThrow_g08_c05 35 +Kayaking/v_Kayaking_g09_c04 48 +SoccerJuggling/v_SoccerJuggling_g22_c05 83 +PlayingGuitar/v_PlayingGuitar_g16_c02 62 +WalkingWithDog/v_WalkingWithDog_g23_c01 97 +ThrowDiscus/v_ThrowDiscus_g11_c04 92 +StillRings/v_StillRings_g15_c02 85 +CleanAndJerk/v_CleanAndJerk_g12_c05 20 +WallPushups/v_WallPushups_g10_c01 98 +PlayingDaf/v_PlayingDaf_g25_c07 59 +Rowing/v_Rowing_g13_c07 75 +WritingOnBoard/v_WritingOnBoard_g23_c02 99 +CliffDiving/v_CliffDiving_g24_c03 21 +RockClimbingIndoor/v_RockClimbingIndoor_g16_c04 73 +JumpingJack/v_JumpingJack_g11_c03 46 +HammerThrow/v_HammerThrow_g20_c01 35 +VolleyballSpiking/v_VolleyballSpiking_g17_c04 96 +GolfSwing/v_GolfSwing_g20_c03 32 +PlayingTabla/v_PlayingTabla_g08_c04 65 +Shotput/v_Shotput_g16_c01 78 +PizzaTossing/v_PizzaTossing_g08_c01 57 +FloorGymnastics/v_FloorGymnastics_g16_c03 29 +BodyWeightSquats/v_BodyWeightSquats_g13_c02 14 +Nunchucks/v_Nunchucks_g16_c02 55 +Diving/v_Diving_g18_c01 25 +Haircut/v_Haircut_g14_c03 33 +HighJump/v_HighJump_g24_c03 39 +JugglingBalls/v_JugglingBalls_g15_c02 45 +FrontCrawl/v_FrontCrawl_g11_c03 31 +BenchPress/v_BenchPress_g19_c04 9 +JugglingBalls/v_JugglingBalls_g08_c02 45 +HandstandPushups/v_HandStandPushups_g13_c04 36 +ThrowDiscus/v_ThrowDiscus_g19_c03 92 +SumoWrestling/v_SumoWrestling_g20_c01 86 +BasketballDunk/v_BasketballDunk_g24_c04 8 +HandstandPushups/v_HandStandPushups_g15_c03 36 +ThrowDiscus/v_ThrowDiscus_g18_c04 92 +PlayingSitar/v_PlayingSitar_g19_c01 64 +ShavingBeard/v_ShavingBeard_g12_c04 77 +Surfing/v_Surfing_g20_c03 87 +PullUps/v_PullUps_g17_c02 69 +Biking/v_Biking_g18_c02 10 +SkyDiving/v_SkyDiving_g11_c02 82 +SoccerPenalty/v_SoccerPenalty_g13_c01 84 +LongJump/v_LongJump_g22_c02 50 +PlayingPiano/v_PlayingPiano_g17_c06 63 +PlayingSitar/v_PlayingSitar_g11_c04 64 +FloorGymnastics/v_FloorGymnastics_g20_c01 29 +Surfing/v_Surfing_g15_c03 87 +Lunges/v_Lunges_g11_c07 51 +Mixing/v_Mixing_g12_c06 53 +Drumming/v_Drumming_g18_c03 26 +Shotput/v_Shotput_g15_c06 78 +SkateBoarding/v_SkateBoarding_g08_c02 79 +PushUps/v_PushUps_g14_c02 71 +BlowingCandles/v_BlowingCandles_g15_c04 13 +Skiing/v_Skiing_g23_c03 80 +BreastStroke/v_BreastStroke_g14_c04 18 +ShavingBeard/v_ShavingBeard_g14_c05 77 +PlayingDaf/v_PlayingDaf_g23_c06 59 +HorseRiding/v_HorseRiding_g24_c02 41 +Mixing/v_Mixing_g22_c01 53 +Archery/v_Archery_g15_c05 2 +FrisbeeCatch/v_FrisbeeCatch_g10_c04 30 +MoppingFloor/v_MoppingFloor_g16_c02 54 +Surfing/v_Surfing_g11_c02 87 +PlayingFlute/v_PlayingFlute_g23_c05 61 +BoxingPunchingBag/v_BoxingPunchingBag_g16_c01 16 +GolfSwing/v_GolfSwing_g21_c02 32 +ParallelBars/v_ParallelBars_g13_c01 56 +Bowling/v_Bowling_g20_c03 15 +BoxingPunchingBag/v_BoxingPunchingBag_g15_c03 16 +PoleVault/v_PoleVault_g20_c02 67 +TrampolineJumping/v_TrampolineJumping_g22_c06 93 +StillRings/v_StillRings_g13_c03 85 +BenchPress/v_BenchPress_g24_c04 9 +PlayingCello/v_PlayingCello_g19_c04 58 +CricketShot/v_CricketShot_g25_c01 23 +CuttingInKitchen/v_CuttingInKitchen_g25_c05 24 +TennisSwing/v_TennisSwing_g24_c02 91 +Punch/v_Punch_g21_c02 70 +JumpRope/v_JumpRope_g08_c07 47 +HorseRiding/v_HorseRiding_g14_c02 41 +Drumming/v_Drumming_g11_c02 26 +SoccerJuggling/v_SoccerJuggling_g12_c04 83 +CricketBowling/v_CricketBowling_g13_c02 22 +Shotput/v_Shotput_g10_c01 78 +Lunges/v_Lunges_g11_c04 51 +Fencing/v_Fencing_g20_c04 27 +SoccerPenalty/v_SoccerPenalty_g10_c05 84 +HulaHoop/v_HulaHoop_g13_c01 42 +BoxingPunchingBag/v_BoxingPunchingBag_g16_c06 16 +PlayingCello/v_PlayingCello_g13_c02 58 +Drumming/v_Drumming_g11_c01 26 +Drumming/v_Drumming_g21_c05 26 +Rowing/v_Rowing_g15_c05 75 +Diving/v_Diving_g13_c04 25 +TableTennisShot/v_TableTennisShot_g11_c03 89 +PlayingSitar/v_PlayingSitar_g23_c06 64 +ThrowDiscus/v_ThrowDiscus_g24_c01 92 +BreastStroke/v_BreastStroke_g22_c04 18 +Diving/v_Diving_g12_c07 25 +Diving/v_Diving_g12_c03 25 +SkyDiving/v_SkyDiving_g13_c03 82 +WalkingWithDog/v_WalkingWithDog_g10_c05 97 +Bowling/v_Bowling_g14_c05 15 +UnevenBars/v_UnevenBars_g10_c04 95 +MilitaryParade/v_MilitaryParade_g12_c02 52 +FrontCrawl/v_FrontCrawl_g20_c05 31 +PlayingPiano/v_PlayingPiano_g21_c01 63 +Rowing/v_Rowing_g10_c05 75 +IceDancing/v_IceDancing_g20_c06 43 +ThrowDiscus/v_ThrowDiscus_g20_c02 92 +HammerThrow/v_HammerThrow_g17_c02 35 +Kayaking/v_Kayaking_g08_c02 48 +CricketBowling/v_CricketBowling_g19_c04 22 +HandstandWalking/v_HandstandWalking_g24_c03 37 +TaiChi/v_TaiChi_g24_c04 90 +PommelHorse/v_PommelHorse_g11_c04 68 +HandstandWalking/v_HandstandWalking_g20_c01 37 +CricketShot/v_CricketShot_g19_c04 23 +PlayingViolin/v_PlayingViolin_g24_c02 66 +BaseballPitch/v_BaseballPitch_g23_c07 6 +BalanceBeam/v_BalanceBeam_g22_c03 4 +HorseRace/v_HorseRace_g15_c02 40 +HandstandPushups/v_HandStandPushups_g13_c07 36 +HeadMassage/v_HeadMassage_g17_c05 38 +Skijet/v_Skijet_g25_c02 81 +Fencing/v_Fencing_g25_c04 27 +BenchPress/v_BenchPress_g10_c03 9 +HandstandWalking/v_HandstandWalking_g13_c02 37 +GolfSwing/v_GolfSwing_g24_c04 32 +Billiards/v_Billiards_g16_c04 11 +SkyDiving/v_SkyDiving_g10_c05 82 +Billiards/v_Billiards_g24_c06 11 +SoccerPenalty/v_SoccerPenalty_g13_c04 84 +VolleyballSpiking/v_VolleyballSpiking_g13_c04 96 +JumpRope/v_JumpRope_g24_c03 47 +WallPushups/v_WallPushups_g13_c01 98 +BreastStroke/v_BreastStroke_g25_c03 18 +Mixing/v_Mixing_g11_c05 53 +Rowing/v_Rowing_g14_c03 75 +Biking/v_Biking_g22_c02 10 +Skiing/v_Skiing_g21_c02 80 +Drumming/v_Drumming_g17_c07 26 +BoxingPunchingBag/v_BoxingPunchingBag_g24_c01 16 +MoppingFloor/v_MoppingFloor_g25_c02 54 +PoleVault/v_PoleVault_g18_c02 67 +PlayingDhol/v_PlayingDhol_g24_c02 60 +FrontCrawl/v_FrontCrawl_g23_c04 31 +HeadMassage/v_HeadMassage_g20_c01 38 +FrontCrawl/v_FrontCrawl_g25_c02 31 +BlowDryHair/v_BlowDryHair_g24_c01 12 +Rafting/v_Rafting_g09_c01 72 +HandstandPushups/v_HandStandPushups_g15_c05 36 +ParallelBars/v_ParallelBars_g15_c01 56 +Lunges/v_Lunges_g24_c02 51 +PizzaTossing/v_PizzaTossing_g20_c02 57 +Mixing/v_Mixing_g10_c03 53 +BaseballPitch/v_BaseballPitch_g10_c01 6 +CleanAndJerk/v_CleanAndJerk_g08_c04 20 +Typing/v_Typing_g24_c03 94 +ApplyEyeMakeup/v_ApplyEyeMakeup_g25_c03 0 +PushUps/v_PushUps_g22_c02 71 +CricketShot/v_CricketShot_g22_c01 23 +Rafting/v_Rafting_g21_c03 72 +Bowling/v_Bowling_g11_c07 15 +Nunchucks/v_Nunchucks_g19_c03 55 +Haircut/v_Haircut_g22_c05 33 +StillRings/v_StillRings_g10_c06 85 +HulaHoop/v_HulaHoop_g15_c01 42 +BlowDryHair/v_BlowDryHair_g11_c04 12 +PlayingSitar/v_PlayingSitar_g18_c02 64 +BreastStroke/v_BreastStroke_g21_c01 18 +BlowingCandles/v_BlowingCandles_g23_c01 13 +GolfSwing/v_GolfSwing_g10_c04 32 +ParallelBars/v_ParallelBars_g20_c03 56 +Fencing/v_Fencing_g15_c01 27 +MoppingFloor/v_MoppingFloor_g11_c01 54 +TaiChi/v_TaiChi_g18_c01 90 +PlayingTabla/v_PlayingTabla_g09_c05 65 +WallPushups/v_WallPushups_g21_c05 98 +HorseRiding/v_HorseRiding_g20_c02 41 +BandMarching/v_BandMarching_g23_c05 5 +ThrowDiscus/v_ThrowDiscus_g24_c02 92 +JugglingBalls/v_JugglingBalls_g11_c01 45 +ApplyLipstick/v_ApplyLipstick_g11_c02 1 +Archery/v_Archery_g14_c02 2 +RockClimbingIndoor/v_RockClimbingIndoor_g22_c05 73 +GolfSwing/v_GolfSwing_g11_c04 32 +Rafting/v_Rafting_g17_c01 72 +CliffDiving/v_CliffDiving_g22_c03 21 +MoppingFloor/v_MoppingFloor_g11_c03 54 +Rafting/v_Rafting_g09_c03 72 +PoleVault/v_PoleVault_g20_c06 67 +BenchPress/v_BenchPress_g08_c04 9 +Mixing/v_Mixing_g21_c01 53 +Rowing/v_Rowing_g18_c03 75 +BlowingCandles/v_BlowingCandles_g25_c03 13 +PushUps/v_PushUps_g13_c03 71 +PizzaTossing/v_PizzaTossing_g23_c06 57 +WritingOnBoard/v_WritingOnBoard_g21_c05 99 +UnevenBars/v_UnevenBars_g11_c01 95 +BoxingPunchingBag/v_BoxingPunchingBag_g14_c06 16 +BaseballPitch/v_BaseballPitch_g18_c03 6 +Hammering/v_Hammering_g14_c01 34 +BodyWeightSquats/v_BodyWeightSquats_g11_c03 14 +BoxingPunchingBag/v_BoxingPunchingBag_g25_c03 16 +Rafting/v_Rafting_g16_c06 72 +Archery/v_Archery_g12_c03 2 +CliffDiving/v_CliffDiving_g14_c04 21 +PlayingPiano/v_PlayingPiano_g23_c03 63 +IceDancing/v_IceDancing_g17_c01 43 +BenchPress/v_BenchPress_g22_c03 9 +MilitaryParade/v_MilitaryParade_g11_c02 52 +PizzaTossing/v_PizzaTossing_g15_c01 57 +SkateBoarding/v_SkateBoarding_g20_c05 79 +BrushingTeeth/v_BrushingTeeth_g17_c01 19 +HandstandWalking/v_HandstandWalking_g12_c01 37 +PlayingDhol/v_PlayingDhol_g09_c07 60 +SkyDiving/v_SkyDiving_g11_c03 82 +PommelHorse/v_PommelHorse_g09_c02 68 +PoleVault/v_PoleVault_g10_c05 67 +WallPushups/v_WallPushups_g14_c01 98 +SalsaSpin/v_SalsaSpin_g11_c03 76 +Punch/v_Punch_g19_c01 70 +RopeClimbing/v_RopeClimbing_g23_c01 74 +BrushingTeeth/v_BrushingTeeth_g08_c02 19 +Shotput/v_Shotput_g12_c02 78 +Biking/v_Biking_g19_c04 10 +FloorGymnastics/v_FloorGymnastics_g21_c02 29 +CleanAndJerk/v_CleanAndJerk_g12_c07 20 +Mixing/v_Mixing_g10_c01 53 +GolfSwing/v_GolfSwing_g11_c02 32 +Knitting/v_Knitting_g23_c02 49 +Rafting/v_Rafting_g12_c03 72 +HorseRace/v_HorseRace_g19_c01 40 +BrushingTeeth/v_BrushingTeeth_g22_c06 19 +Nunchucks/v_Nunchucks_g15_c06 55 +BrushingTeeth/v_BrushingTeeth_g08_c05 19 +BodyWeightSquats/v_BodyWeightSquats_g09_c01 14 +BoxingPunchingBag/v_BoxingPunchingBag_g09_c02 16 +TennisSwing/v_TennisSwing_g18_c04 91 +Basketball/v_Basketball_g22_c05 7 +PlayingGuitar/v_PlayingGuitar_g18_c02 62 +TableTennisShot/v_TableTennisShot_g14_c01 89 +Billiards/v_Billiards_g24_c04 11 +ApplyLipstick/v_ApplyLipstick_g08_c03 1 +BlowingCandles/v_BlowingCandles_g08_c01 13 +PlayingGuitar/v_PlayingGuitar_g20_c07 62 +Diving/v_Diving_g22_c05 25 +LongJump/v_LongJump_g10_c03 50 +TaiChi/v_TaiChi_g17_c04 90 +PlayingPiano/v_PlayingPiano_g09_c04 63 +Skiing/v_Skiing_g10_c05 80 +UnevenBars/v_UnevenBars_g24_c01 95 +SkateBoarding/v_SkateBoarding_g22_c01 79 +SalsaSpin/v_SalsaSpin_g17_c03 76 +Hammering/v_Hammering_g24_c03 34 +TableTennisShot/v_TableTennisShot_g25_c05 89 +Rafting/v_Rafting_g25_c03 72 +ApplyEyeMakeup/v_ApplyEyeMakeup_g19_c04 0 +SkateBoarding/v_SkateBoarding_g14_c04 79 +HammerThrow/v_HammerThrow_g23_c04 35 +BlowingCandles/v_BlowingCandles_g18_c02 13 +WallPushups/v_WallPushups_g09_c05 98 +Nunchucks/v_Nunchucks_g16_c01 55 +PlayingSitar/v_PlayingSitar_g20_c04 64 +PlayingCello/v_PlayingCello_g08_c02 58 +Surfing/v_Surfing_g16_c02 87 +LongJump/v_LongJump_g22_c04 50 +ShavingBeard/v_ShavingBeard_g09_c05 77 +PlayingDhol/v_PlayingDhol_g21_c07 60 +Punch/v_Punch_g11_c01 70 +RockClimbingIndoor/v_RockClimbingIndoor_g23_c05 73 +Bowling/v_Bowling_g21_c01 15 +Rafting/v_Rafting_g13_c04 72 +HeadMassage/v_HeadMassage_g14_c02 38 +WalkingWithDog/v_WalkingWithDog_g10_c04 97 +Kayaking/v_Kayaking_g15_c07 48 +BoxingPunchingBag/v_BoxingPunchingBag_g19_c04 16 +Nunchucks/v_Nunchucks_g21_c03 55 +LongJump/v_LongJump_g13_c05 50 +PlayingDaf/v_PlayingDaf_g10_c07 59 +MoppingFloor/v_MoppingFloor_g12_c02 54 +SoccerPenalty/v_SoccerPenalty_g08_c05 84 +Skijet/v_Skijet_g19_c01 81 +HorseRiding/v_HorseRiding_g20_c07 41 +HorseRace/v_HorseRace_g24_c05 40 +JumpRope/v_JumpRope_g23_c02 47 +Billiards/v_Billiards_g18_c01 11 +RockClimbingIndoor/v_RockClimbingIndoor_g11_c02 73 +HorseRace/v_HorseRace_g19_c05 40 +Hammering/v_Hammering_g24_c04 34 +GolfSwing/v_GolfSwing_g11_c07 32 +PlayingCello/v_PlayingCello_g15_c03 58 +Bowling/v_Bowling_g16_c01 15 +Hammering/v_Hammering_g21_c07 34 +SalsaSpin/v_SalsaSpin_g13_c01 76 +BlowingCandles/v_BlowingCandles_g22_c04 13 +Rowing/v_Rowing_g12_c01 75 +ApplyLipstick/v_ApplyLipstick_g24_c03 1 +CricketShot/v_CricketShot_g14_c03 23 +PlayingFlute/v_PlayingFlute_g22_c03 61 +PoleVault/v_PoleVault_g23_c05 67 +SkateBoarding/v_SkateBoarding_g11_c01 79 +SoccerPenalty/v_SoccerPenalty_g21_c01 84 +RopeClimbing/v_RopeClimbing_g12_c03 74 +PlayingCello/v_PlayingCello_g11_c03 58 +PlayingSitar/v_PlayingSitar_g13_c01 64 +SkyDiving/v_SkyDiving_g22_c03 82 +BasketballDunk/v_BasketballDunk_g25_c04 8 +Biking/v_Biking_g20_c05 10 +BoxingSpeedBag/v_BoxingSpeedBag_g25_c06 17 +Diving/v_Diving_g14_c04 25 +SoccerPenalty/v_SoccerPenalty_g15_c05 84 +Punch/v_Punch_g14_c05 70 +HighJump/v_HighJump_g25_c02 39 +HorseRace/v_HorseRace_g24_c02 40 +TaiChi/v_TaiChi_g21_c03 90 +Mixing/v_Mixing_g15_c05 53 +CricketShot/v_CricketShot_g25_c07 23 +CuttingInKitchen/v_CuttingInKitchen_g10_c05 24 +BenchPress/v_BenchPress_g12_c02 9 +MilitaryParade/v_MilitaryParade_g16_c04 52 +Billiards/v_Billiards_g08_c06 11 +JumpingJack/v_JumpingJack_g12_c02 46 +HammerThrow/v_HammerThrow_g11_c06 35 +BabyCrawling/v_BabyCrawling_g15_c01 3 +Haircut/v_Haircut_g16_c03 33 +PlayingSitar/v_PlayingSitar_g25_c04 64 +ApplyLipstick/v_ApplyLipstick_g09_c03 1 +PlayingCello/v_PlayingCello_g25_c03 58 +Punch/v_Punch_g09_c03 70 +HandstandWalking/v_HandstandWalking_g17_c03 37 +TaiChi/v_TaiChi_g19_c02 90 +PoleVault/v_PoleVault_g16_c02 67 +BlowDryHair/v_BlowDryHair_g21_c05 12 +BandMarching/v_BandMarching_g18_c05 5 +PlayingSitar/v_PlayingSitar_g25_c01 64 +Billiards/v_Billiards_g15_c01 11 +Billiards/v_Billiards_g18_c04 11 +BodyWeightSquats/v_BodyWeightSquats_g20_c02 14 +GolfSwing/v_GolfSwing_g20_c04 32 +SumoWrestling/v_SumoWrestling_g08_c04 86 +CricketBowling/v_CricketBowling_g21_c02 22 +SkyDiving/v_SkyDiving_g20_c02 82 +WritingOnBoard/v_WritingOnBoard_g19_c02 99 +Skiing/v_Skiing_g16_c01 80 +TableTennisShot/v_TableTennisShot_g16_c04 89 +ApplyEyeMakeup/v_ApplyEyeMakeup_g15_c02 0 +BalanceBeam/v_BalanceBeam_g19_c02 4 +TableTennisShot/v_TableTennisShot_g11_c01 89 +Haircut/v_Haircut_g14_c01 33 +ShavingBeard/v_ShavingBeard_g21_c03 77 +PlayingGuitar/v_PlayingGuitar_g16_c04 62 +PoleVault/v_PoleVault_g25_c01 67 +HorseRiding/v_HorseRiding_g18_c04 41 +Surfing/v_Surfing_g10_c05 87 +HeadMassage/v_HeadMassage_g25_c03 38 +PlayingDaf/v_PlayingDaf_g18_c01 59 +MoppingFloor/v_MoppingFloor_g21_c02 54 +JumpingJack/v_JumpingJack_g09_c01 46 +HorseRiding/v_HorseRiding_g13_c04 41 +CricketBowling/v_CricketBowling_g09_c07 22 +Knitting/v_Knitting_g25_c03 49 +JugglingBalls/v_JugglingBalls_g16_c03 45 +PlayingSitar/v_PlayingSitar_g14_c05 64 +BaseballPitch/v_BaseballPitch_g15_c05 6 +TennisSwing/v_TennisSwing_g16_c05 91 +ApplyLipstick/v_ApplyLipstick_g11_c03 1 +VolleyballSpiking/v_VolleyballSpiking_g08_c02 96 +PlayingSitar/v_PlayingSitar_g24_c05 64 +BlowDryHair/v_BlowDryHair_g24_c02 12 +Skiing/v_Skiing_g09_c03 80 +BreastStroke/v_BreastStroke_g20_c02 18 +BandMarching/v_BandMarching_g10_c03 5 +PlayingGuitar/v_PlayingGuitar_g13_c06 62 +JugglingBalls/v_JugglingBalls_g21_c03 45 +Biking/v_Biking_g12_c03 10 +GolfSwing/v_GolfSwing_g25_c06 32 +BlowingCandles/v_BlowingCandles_g16_c02 13 +JumpingJack/v_JumpingJack_g10_c03 46 +Biking/v_Biking_g16_c04 10 +BandMarching/v_BandMarching_g13_c02 5 +PlayingDhol/v_PlayingDhol_g17_c06 60 +Hammering/v_Hammering_g09_c04 34 +PlayingGuitar/v_PlayingGuitar_g17_c01 62 +BasketballDunk/v_BasketballDunk_g18_c02 8 +WritingOnBoard/v_WritingOnBoard_g13_c06 99 +BrushingTeeth/v_BrushingTeeth_g09_c01 19 +ParallelBars/v_ParallelBars_g23_c03 56 +ApplyLipstick/v_ApplyLipstick_g20_c01 1 +SoccerPenalty/v_SoccerPenalty_g19_c02 84 +Kayaking/v_Kayaking_g19_c02 48 +Skijet/v_Skijet_g22_c01 81 +SkateBoarding/v_SkateBoarding_g19_c02 79 +BalanceBeam/v_BalanceBeam_g23_c01 4 +RockClimbingIndoor/v_RockClimbingIndoor_g08_c04 73 +Hammering/v_Hammering_g12_c02 34 +MilitaryParade/v_MilitaryParade_g17_c02 52 +ApplyLipstick/v_ApplyLipstick_g13_c02 1 +BodyWeightSquats/v_BodyWeightSquats_g23_c02 14 +Nunchucks/v_Nunchucks_g08_c02 55 +YoYo/v_YoYo_g15_c01 100 +Archery/v_Archery_g25_c02 2 +Nunchucks/v_Nunchucks_g12_c03 55 +HammerThrow/v_HammerThrow_g14_c05 35 +Nunchucks/v_Nunchucks_g09_c02 55 +Lunges/v_Lunges_g14_c02 51 +PlayingViolin/v_PlayingViolin_g15_c01 66 +ApplyEyeMakeup/v_ApplyEyeMakeup_g24_c06 0 +Shotput/v_Shotput_g09_c01 78 +Bowling/v_Bowling_g23_c06 15 +BreastStroke/v_BreastStroke_g14_c03 18 +IceDancing/v_IceDancing_g25_c06 43 +SkateBoarding/v_SkateBoarding_g23_c01 79 +SalsaSpin/v_SalsaSpin_g22_c05 76 +HulaHoop/v_HulaHoop_g13_c02 42 +ParallelBars/v_ParallelBars_g22_c01 56 +BasketballDunk/v_BasketballDunk_g17_c05 8 +BoxingSpeedBag/v_BoxingSpeedBag_g13_c03 17 +Hammering/v_Hammering_g12_c01 34 +FieldHockeyPenalty/v_FieldHockeyPenalty_g18_c01 28 +Rowing/v_Rowing_g17_c02 75 +Fencing/v_Fencing_g21_c01 27 +SoccerPenalty/v_SoccerPenalty_g17_c02 84 +FrontCrawl/v_FrontCrawl_g08_c04 31 +BenchPress/v_BenchPress_g19_c06 9 +HammerThrow/v_HammerThrow_g19_c07 35 +HighJump/v_HighJump_g10_c02 39 +FloorGymnastics/v_FloorGymnastics_g10_c01 29 +PushUps/v_PushUps_g09_c03 71 +Swing/v_Swing_g14_c05 88 +LongJump/v_LongJump_g09_c04 50 +MoppingFloor/v_MoppingFloor_g25_c04 54 +GolfSwing/v_GolfSwing_g19_c04 32 +VolleyballSpiking/v_VolleyballSpiking_g13_c07 96 +SumoWrestling/v_SumoWrestling_g23_c03 86 +PlayingFlute/v_PlayingFlute_g21_c03 61 +SoccerJuggling/v_SoccerJuggling_g18_c05 83 +PizzaTossing/v_PizzaTossing_g17_c03 57 +CricketBowling/v_CricketBowling_g18_c01 22 +PoleVault/v_PoleVault_g08_c04 67 +BoxingPunchingBag/v_BoxingPunchingBag_g22_c02 16 +CleanAndJerk/v_CleanAndJerk_g25_c03 20 +RockClimbingIndoor/v_RockClimbingIndoor_g22_c06 73 +Rafting/v_Rafting_g14_c03 72 +BabyCrawling/v_BabyCrawling_g18_c05 3 +Lunges/v_Lunges_g22_c02 51 +ParallelBars/v_ParallelBars_g19_c04 56 +Punch/v_Punch_g23_c05 70 +ApplyLipstick/v_ApplyLipstick_g12_c01 1 +Basketball/v_Basketball_g23_c04 7 +PlayingViolin/v_PlayingViolin_g13_c02 66 +HeadMassage/v_HeadMassage_g15_c01 38 +BenchPress/v_BenchPress_g13_c03 9 +UnevenBars/v_UnevenBars_g22_c01 95 +RopeClimbing/v_RopeClimbing_g21_c03 74 +JavelinThrow/v_JavelinThrow_g24_c01 44 +SoccerJuggling/v_SoccerJuggling_g17_c07 83 +Knitting/v_Knitting_g18_c02 49 +Diving/v_Diving_g15_c07 25 +JavelinThrow/v_JavelinThrow_g21_c01 44 +JumpRope/v_JumpRope_g12_c03 47 +Skijet/v_Skijet_g20_c03 81 +WritingOnBoard/v_WritingOnBoard_g25_c06 99 +FieldHockeyPenalty/v_FieldHockeyPenalty_g25_c03 28 +HorseRiding/v_HorseRiding_g17_c04 41 +Rowing/v_Rowing_g17_c05 75 +Punch/v_Punch_g11_c05 70 +SoccerPenalty/v_SoccerPenalty_g12_c03 84 +MoppingFloor/v_MoppingFloor_g14_c01 54 +SumoWrestling/v_SumoWrestling_g18_c05 86 +CleanAndJerk/v_CleanAndJerk_g18_c02 20 +Billiards/v_Billiards_g20_c02 11 +Fencing/v_Fencing_g10_c05 27 +FieldHockeyPenalty/v_FieldHockeyPenalty_g12_c01 28 +BalanceBeam/v_BalanceBeam_g11_c03 4 +Skiing/v_Skiing_g22_c02 80 +Archery/v_Archery_g23_c04 2 +FrisbeeCatch/v_FrisbeeCatch_g11_c04 30 +TableTennisShot/v_TableTennisShot_g22_c04 89 +HorseRiding/v_HorseRiding_g16_c03 41 +FrontCrawl/v_FrontCrawl_g22_c05 31 +PlayingTabla/v_PlayingTabla_g19_c03 65 +IceDancing/v_IceDancing_g10_c02 43 +BenchPress/v_BenchPress_g14_c06 9 +SkyDiving/v_SkyDiving_g17_c05 82 +Mixing/v_Mixing_g14_c03 53 +CricketBowling/v_CricketBowling_g11_c07 22 +WalkingWithDog/v_WalkingWithDog_g15_c03 97 +SoccerPenalty/v_SoccerPenalty_g10_c04 84 +FieldHockeyPenalty/v_FieldHockeyPenalty_g19_c03 28 +Archery/v_Archery_g18_c07 2 +TableTennisShot/v_TableTennisShot_g20_c02 89 +Shotput/v_Shotput_g10_c04 78 +VolleyballSpiking/v_VolleyballSpiking_g20_c02 96 +Kayaking/v_Kayaking_g23_c04 48 +TableTennisShot/v_TableTennisShot_g20_c06 89 +CliffDiving/v_CliffDiving_g19_c04 21 +Shotput/v_Shotput_g15_c01 78 +Diving/v_Diving_g08_c06 25 +PlayingDhol/v_PlayingDhol_g21_c03 60 +PlayingGuitar/v_PlayingGuitar_g10_c06 62 +PlayingDaf/v_PlayingDaf_g20_c07 59 +PizzaTossing/v_PizzaTossing_g24_c01 57 +PlayingSitar/v_PlayingSitar_g14_c06 64 +Rafting/v_Rafting_g19_c05 72 +ApplyEyeMakeup/v_ApplyEyeMakeup_g14_c03 0 +Biking/v_Biking_g20_c07 10 +Kayaking/v_Kayaking_g11_c05 48 +Surfing/v_Surfing_g17_c07 87 +Fencing/v_Fencing_g25_c02 27 +JugglingBalls/v_JugglingBalls_g08_c04 45 +PommelHorse/v_PommelHorse_g16_c03 68 +PlayingDaf/v_PlayingDaf_g15_c06 59 +PoleVault/v_PoleVault_g12_c02 67 +Swing/v_Swing_g22_c05 88 +BasketballDunk/v_BasketballDunk_g22_c01 8 +TennisSwing/v_TennisSwing_g20_c02 91 +BoxingSpeedBag/v_BoxingSpeedBag_g10_c02 17 +PlayingPiano/v_PlayingPiano_g23_c01 63 +PullUps/v_PullUps_g21_c02 69 +TaiChi/v_TaiChi_g11_c04 90 +HandstandPushups/v_HandStandPushups_g12_c02 36 +CricketShot/v_CricketShot_g21_c04 23 +BoxingSpeedBag/v_BoxingSpeedBag_g23_c02 17 +BreastStroke/v_BreastStroke_g17_c01 18 +FloorGymnastics/v_FloorGymnastics_g18_c06 29 +TennisSwing/v_TennisSwing_g14_c02 91 +ApplyEyeMakeup/v_ApplyEyeMakeup_g11_c03 0 +CliffDiving/v_CliffDiving_g22_c06 21 +Rafting/v_Rafting_g10_c02 72 +Bowling/v_Bowling_g19_c03 15 +PommelHorse/v_PommelHorse_g10_c03 68 +FrisbeeCatch/v_FrisbeeCatch_g16_c05 30 +Shotput/v_Shotput_g15_c02 78 +CricketBowling/v_CricketBowling_g22_c04 22 +HammerThrow/v_HammerThrow_g12_c02 35 +PommelHorse/v_PommelHorse_g08_c03 68 +Bowling/v_Bowling_g08_c05 15 +PlayingPiano/v_PlayingPiano_g20_c01 63 +HorseRace/v_HorseRace_g24_c07 40 +JumpingJack/v_JumpingJack_g10_c06 46 +YoYo/v_YoYo_g25_c03 100 +VolleyballSpiking/v_VolleyballSpiking_g09_c06 96 +Knitting/v_Knitting_g23_c01 49 +CricketShot/v_CricketShot_g23_c02 23 +Shotput/v_Shotput_g15_c05 78 +TaiChi/v_TaiChi_g20_c02 90 +ThrowDiscus/v_ThrowDiscus_g22_c01 92 +ApplyEyeMakeup/v_ApplyEyeMakeup_g21_c05 0 +PlayingDaf/v_PlayingDaf_g10_c01 59 +SumoWrestling/v_SumoWrestling_g10_c03 86 +Hammering/v_Hammering_g09_c05 34 +SoccerPenalty/v_SoccerPenalty_g22_c04 84 +Typing/v_Typing_g23_c01 94 +BodyWeightSquats/v_BodyWeightSquats_g08_c04 14 +CliffDiving/v_CliffDiving_g20_c07 21 +BenchPress/v_BenchPress_g15_c01 9 +ThrowDiscus/v_ThrowDiscus_g09_c04 92 +SkateBoarding/v_SkateBoarding_g20_c03 79 +Surfing/v_Surfing_g09_c04 87 +PoleVault/v_PoleVault_g24_c02 67 +Surfing/v_Surfing_g10_c02 87 +BalanceBeam/v_BalanceBeam_g16_c03 4 +ApplyLipstick/v_ApplyLipstick_g18_c04 1 +PlayingGuitar/v_PlayingGuitar_g16_c03 62 +RockClimbingIndoor/v_RockClimbingIndoor_g08_c01 73 +PlayingFlute/v_PlayingFlute_g23_c04 61 +CuttingInKitchen/v_CuttingInKitchen_g24_c04 24 +PullUps/v_PullUps_g18_c02 69 +Typing/v_Typing_g12_c04 94 +Nunchucks/v_Nunchucks_g15_c05 55 +RopeClimbing/v_RopeClimbing_g17_c04 74 +ShavingBeard/v_ShavingBeard_g20_c03 77 +HighJump/v_HighJump_g20_c03 39 +BoxingSpeedBag/v_BoxingSpeedBag_g22_c02 17 +VolleyballSpiking/v_VolleyballSpiking_g11_c04 96 +BoxingPunchingBag/v_BoxingPunchingBag_g15_c02 16 +BenchPress/v_BenchPress_g25_c03 9 +HandstandPushups/v_HandStandPushups_g15_c04 36 +JumpingJack/v_JumpingJack_g14_c04 46 +SalsaSpin/v_SalsaSpin_g11_c05 76 +PlayingDaf/v_PlayingDaf_g17_c03 59 +PommelHorse/v_PommelHorse_g21_c04 68 +HammerThrow/v_HammerThrow_g24_c03 35 +PlayingPiano/v_PlayingPiano_g15_c04 63 +HeadMassage/v_HeadMassage_g15_c04 38 +SkateBoarding/v_SkateBoarding_g24_c02 79 +CricketBowling/v_CricketBowling_g10_c04 22 +Rowing/v_Rowing_g24_c02 75 +Drumming/v_Drumming_g10_c05 26 +SalsaSpin/v_SalsaSpin_g10_c01 76 +HorseRace/v_HorseRace_g22_c03 40 +HulaHoop/v_HulaHoop_g24_c04 42 +Biking/v_Biking_g20_c02 10 +JumpingJack/v_JumpingJack_g15_c03 46 +HammerThrow/v_HammerThrow_g23_c02 35 +Mixing/v_Mixing_g18_c01 53 +MilitaryParade/v_MilitaryParade_g17_c04 52 +Fencing/v_Fencing_g14_c01 27 +TennisSwing/v_TennisSwing_g08_c06 91 +Archery/v_Archery_g08_c02 2 +JugglingBalls/v_JugglingBalls_g22_c06 45 +WalkingWithDog/v_WalkingWithDog_g22_c02 97 +Basketball/v_Basketball_g24_c03 7 +Hammering/v_Hammering_g16_c03 34 +JavelinThrow/v_JavelinThrow_g15_c04 44 +WalkingWithDog/v_WalkingWithDog_g16_c05 97 +FloorGymnastics/v_FloorGymnastics_g25_c02 29 +TrampolineJumping/v_TrampolineJumping_g15_c02 93 +PlayingViolin/v_PlayingViolin_g08_c01 66 +BaseballPitch/v_BaseballPitch_g11_c04 6 +Haircut/v_Haircut_g14_c04 33 +HeadMassage/v_HeadMassage_g23_c02 38 +PullUps/v_PullUps_g09_c01 69 +HandstandPushups/v_HandStandPushups_g25_c02 36 +WritingOnBoard/v_WritingOnBoard_g17_c01 99 +HorseRiding/v_HorseRiding_g23_c06 41 +PullUps/v_PullUps_g10_c03 69 +Hammering/v_Hammering_g23_c06 34 +PlayingDaf/v_PlayingDaf_g09_c05 59 +TableTennisShot/v_TableTennisShot_g25_c06 89 +SalsaSpin/v_SalsaSpin_g14_c03 76 +BoxingPunchingBag/v_BoxingPunchingBag_g09_c06 16 +PlayingFlute/v_PlayingFlute_g08_c07 61 +TrampolineJumping/v_TrampolineJumping_g19_c06 93 +TrampolineJumping/v_TrampolineJumping_g18_c04 93 +TennisSwing/v_TennisSwing_g20_c04 91 +BandMarching/v_BandMarching_g12_c03 5 +Punch/v_Punch_g08_c04 70 +ThrowDiscus/v_ThrowDiscus_g08_c03 92 +HighJump/v_HighJump_g23_c03 39 +HighJump/v_HighJump_g25_c03 39 +ParallelBars/v_ParallelBars_g24_c03 56 +Kayaking/v_Kayaking_g09_c05 48 +PlayingCello/v_PlayingCello_g13_c03 58 +Nunchucks/v_Nunchucks_g21_c06 55 +BoxingPunchingBag/v_BoxingPunchingBag_g18_c03 16 +MilitaryParade/v_MilitaryParade_g18_c05 52 +FloorGymnastics/v_FloorGymnastics_g14_c02 29 +Lunges/v_Lunges_g14_c01 51 +UnevenBars/v_UnevenBars_g15_c03 95 +PlayingGuitar/v_PlayingGuitar_g08_c05 62 +Skijet/v_Skijet_g10_c04 81 +HeadMassage/v_HeadMassage_g13_c04 38 +Kayaking/v_Kayaking_g24_c06 48 +Nunchucks/v_Nunchucks_g12_c01 55 +CleanAndJerk/v_CleanAndJerk_g10_c02 20 +BandMarching/v_BandMarching_g18_c01 5 +GolfSwing/v_GolfSwing_g21_c03 32 +JavelinThrow/v_JavelinThrow_g14_c01 44 +TaiChi/v_TaiChi_g09_c04 90 +Typing/v_Typing_g11_c01 94 +Basketball/v_Basketball_g15_c02 7 +BlowingCandles/v_BlowingCandles_g25_c02 13 +IceDancing/v_IceDancing_g17_c03 43 +HammerThrow/v_HammerThrow_g20_c03 35 +BoxingPunchingBag/v_BoxingPunchingBag_g20_c01 16 +TennisSwing/v_TennisSwing_g18_c03 91 +WalkingWithDog/v_WalkingWithDog_g20_c03 97 +Swing/v_Swing_g09_c04 88 +ShavingBeard/v_ShavingBeard_g09_c06 77 +Punch/v_Punch_g08_c05 70 +BlowDryHair/v_BlowDryHair_g18_c05 12 +Diving/v_Diving_g15_c03 25 +PommelHorse/v_PommelHorse_g18_c02 68 +PlayingPiano/v_PlayingPiano_g17_c01 63 +SoccerPenalty/v_SoccerPenalty_g22_c03 84 +PullUps/v_PullUps_g20_c02 69 +ApplyLipstick/v_ApplyLipstick_g20_c05 1 +BoxingPunchingBag/v_BoxingPunchingBag_g16_c04 16 +HulaHoop/v_HulaHoop_g18_c01 42 +PlayingPiano/v_PlayingPiano_g21_c04 63 +HeadMassage/v_HeadMassage_g20_c03 38 +BalanceBeam/v_BalanceBeam_g09_c04 4 +IceDancing/v_IceDancing_g24_c02 43 +Diving/v_Diving_g08_c05 25 +Mixing/v_Mixing_g22_c02 53 +RopeClimbing/v_RopeClimbing_g09_c01 74 +YoYo/v_YoYo_g21_c05 100 +PlayingViolin/v_PlayingViolin_g24_c01 66 +PlayingTabla/v_PlayingTabla_g16_c01 65 +SoccerPenalty/v_SoccerPenalty_g22_c01 84 +ThrowDiscus/v_ThrowDiscus_g16_c02 92 +Punch/v_Punch_g15_c02 70 +BalanceBeam/v_BalanceBeam_g12_c03 4 +TaiChi/v_TaiChi_g18_c03 90 +YoYo/v_YoYo_g23_c04 100 +BalanceBeam/v_BalanceBeam_g16_c01 4 +Nunchucks/v_Nunchucks_g18_c03 55 +WalkingWithDog/v_WalkingWithDog_g21_c03 97 +JugglingBalls/v_JugglingBalls_g11_c04 45 +FloorGymnastics/v_FloorGymnastics_g15_c04 29 +ParallelBars/v_ParallelBars_g23_c04 56 +HammerThrow/v_HammerThrow_g08_c03 35 +JumpRope/v_JumpRope_g08_c01 47 +BenchPress/v_BenchPress_g15_c05 9 +BrushingTeeth/v_BrushingTeeth_g10_c01 19 +BrushingTeeth/v_BrushingTeeth_g22_c03 19 +PizzaTossing/v_PizzaTossing_g18_c02 57 +PlayingGuitar/v_PlayingGuitar_g11_c05 62 +Punch/v_Punch_g14_c03 70 +CricketBowling/v_CricketBowling_g12_c05 22 +JumpRope/v_JumpRope_g09_c05 47 +GolfSwing/v_GolfSwing_g12_c01 32 +BreastStroke/v_BreastStroke_g10_c03 18 +Basketball/v_Basketball_g12_c04 7 +BandMarching/v_BandMarching_g17_c03 5 +BoxingSpeedBag/v_BoxingSpeedBag_g09_c03 17 +WallPushups/v_WallPushups_g24_c02 98 +SoccerJuggling/v_SoccerJuggling_g18_c02 83 +CricketShot/v_CricketShot_g16_c03 23 +FloorGymnastics/v_FloorGymnastics_g21_c05 29 +SumoWrestling/v_SumoWrestling_g16_c04 86 +PlayingTabla/v_PlayingTabla_g11_c04 65 +TennisSwing/v_TennisSwing_g15_c06 91 +WallPushups/v_WallPushups_g14_c03 98 +Lunges/v_Lunges_g08_c04 51 +Surfing/v_Surfing_g12_c03 87 +LongJump/v_LongJump_g23_c03 50 +SumoWrestling/v_SumoWrestling_g13_c03 86 +HandstandWalking/v_HandstandWalking_g08_c01 37 +SoccerJuggling/v_SoccerJuggling_g13_c03 83 +CleanAndJerk/v_CleanAndJerk_g15_c04 20 +PlayingFlute/v_PlayingFlute_g13_c03 61 +HulaHoop/v_HulaHoop_g18_c03 42 +Mixing/v_Mixing_g08_c02 53 +WalkingWithDog/v_WalkingWithDog_g13_c05 97 +HulaHoop/v_HulaHoop_g21_c02 42 +SalsaSpin/v_SalsaSpin_g16_c01 76 +BandMarching/v_BandMarching_g15_c03 5 +SoccerJuggling/v_SoccerJuggling_g22_c07 83 +Skijet/v_Skijet_g22_c04 81 +BandMarching/v_BandMarching_g10_c05 5 +ShavingBeard/v_ShavingBeard_g12_c01 77 +MoppingFloor/v_MoppingFloor_g21_c04 54 +JugglingBalls/v_JugglingBalls_g12_c04 45 +MoppingFloor/v_MoppingFloor_g12_c01 54 +JugglingBalls/v_JugglingBalls_g25_c05 45 +HorseRiding/v_HorseRiding_g15_c03 41 +JavelinThrow/v_JavelinThrow_g22_c04 44 +PlayingSitar/v_PlayingSitar_g10_c05 64 +JumpRope/v_JumpRope_g23_c01 47 +ShavingBeard/v_ShavingBeard_g10_c06 77 +BlowDryHair/v_BlowDryHair_g11_c06 12 +PoleVault/v_PoleVault_g21_c05 67 +CliffDiving/v_CliffDiving_g10_c04 21 +HorseRiding/v_HorseRiding_g18_c01 41 +MoppingFloor/v_MoppingFloor_g22_c03 54 +PlayingGuitar/v_PlayingGuitar_g16_c05 62 +ApplyEyeMakeup/v_ApplyEyeMakeup_g13_c02 0 +HulaHoop/v_HulaHoop_g23_c05 42 +PlayingFlute/v_PlayingFlute_g13_c02 61 +PlayingGuitar/v_PlayingGuitar_g19_c03 62 +CleanAndJerk/v_CleanAndJerk_g14_c01 20 +PlayingDaf/v_PlayingDaf_g25_c01 59 +WallPushups/v_WallPushups_g16_c04 98 +SoccerPenalty/v_SoccerPenalty_g18_c03 84 +Knitting/v_Knitting_g08_c03 49 +TaiChi/v_TaiChi_g17_c03 90 +Kayaking/v_Kayaking_g10_c01 48 +Shotput/v_Shotput_g11_c07 78 +WallPushups/v_WallPushups_g16_c03 98 +Biking/v_Biking_g21_c05 10 +JavelinThrow/v_JavelinThrow_g18_c04 44 +PlayingCello/v_PlayingCello_g14_c04 58 +PlayingTabla/v_PlayingTabla_g11_c01 65 +MilitaryParade/v_MilitaryParade_g25_c03 52 +SoccerJuggling/v_SoccerJuggling_g24_c02 83 +SkyDiving/v_SkyDiving_g20_c06 82 +VolleyballSpiking/v_VolleyballSpiking_g17_c02 96 +HandstandWalking/v_HandstandWalking_g22_c02 37 +ApplyLipstick/v_ApplyLipstick_g12_c05 1 +SkyDiving/v_SkyDiving_g17_c04 82 +Drumming/v_Drumming_g16_c05 26 +SkyDiving/v_SkyDiving_g16_c04 82 +SkyDiving/v_SkyDiving_g20_c04 82 +ShavingBeard/v_ShavingBeard_g13_c03 77 +ShavingBeard/v_ShavingBeard_g22_c03 77 +CliffDiving/v_CliffDiving_g09_c06 21 +CricketShot/v_CricketShot_g14_c01 23 +ApplyLipstick/v_ApplyLipstick_g22_c07 1 +Mixing/v_Mixing_g23_c04 53 +RopeClimbing/v_RopeClimbing_g15_c01 74 +HulaHoop/v_HulaHoop_g25_c01 42 +BrushingTeeth/v_BrushingTeeth_g18_c03 19 +BenchPress/v_BenchPress_g23_c03 9 +IceDancing/v_IceDancing_g15_c03 43 +Hammering/v_Hammering_g19_c04 34 +Bowling/v_Bowling_g25_c01 15 +HorseRiding/v_HorseRiding_g17_c03 41 +BaseballPitch/v_BaseballPitch_g14_c03 6 +Skiing/v_Skiing_g19_c01 80 +TrampolineJumping/v_TrampolineJumping_g24_c04 93 +ShavingBeard/v_ShavingBeard_g22_c05 77 +Typing/v_Typing_g10_c07 94 +HeadMassage/v_HeadMassage_g11_c02 38 +Billiards/v_Billiards_g25_c03 11 +StillRings/v_StillRings_g21_c04 85 +Lunges/v_Lunges_g16_c03 51 +SoccerJuggling/v_SoccerJuggling_g08_c03 83 +RockClimbingIndoor/v_RockClimbingIndoor_g20_c02 73 +Nunchucks/v_Nunchucks_g12_c04 55 +BrushingTeeth/v_BrushingTeeth_g10_c02 19 +JumpingJack/v_JumpingJack_g12_c04 46 +BaseballPitch/v_BaseballPitch_g08_c04 6 +TableTennisShot/v_TableTennisShot_g15_c03 89 +Swing/v_Swing_g12_c02 88 +Drumming/v_Drumming_g16_c01 26 +MilitaryParade/v_MilitaryParade_g19_c03 52 +JavelinThrow/v_JavelinThrow_g17_c02 44 +Billiards/v_Billiards_g13_c04 11 +SoccerPenalty/v_SoccerPenalty_g20_c03 84 +BasketballDunk/v_BasketballDunk_g13_c02 8 +UnevenBars/v_UnevenBars_g09_c01 95 +BlowingCandles/v_BlowingCandles_g08_c03 13 +FrontCrawl/v_FrontCrawl_g10_c02 31 +PlayingGuitar/v_PlayingGuitar_g24_c03 62 +PizzaTossing/v_PizzaTossing_g18_c04 57 +ParallelBars/v_ParallelBars_g25_c04 56 +PlayingTabla/v_PlayingTabla_g17_c03 65 +BenchPress/v_BenchPress_g18_c03 9 +SkateBoarding/v_SkateBoarding_g11_c04 79 +PlayingTabla/v_PlayingTabla_g25_c03 65 +Shotput/v_Shotput_g11_c06 78 +PlayingDaf/v_PlayingDaf_g21_c07 59 +BalanceBeam/v_BalanceBeam_g22_c02 4 +BaseballPitch/v_BaseballPitch_g19_c03 6 +FloorGymnastics/v_FloorGymnastics_g11_c03 29 +BasketballDunk/v_BasketballDunk_g12_c05 8 +JumpingJack/v_JumpingJack_g19_c01 46 +BoxingPunchingBag/v_BoxingPunchingBag_g14_c03 16 +ThrowDiscus/v_ThrowDiscus_g17_c04 92 +FrisbeeCatch/v_FrisbeeCatch_g11_c03 30 +PlayingGuitar/v_PlayingGuitar_g17_c07 62 +CliffDiving/v_CliffDiving_g21_c04 21 +BlowDryHair/v_BlowDryHair_g16_c04 12 +ParallelBars/v_ParallelBars_g08_c01 56 +BodyWeightSquats/v_BodyWeightSquats_g18_c01 14 +CliffDiving/v_CliffDiving_g08_c04 21 +HeadMassage/v_HeadMassage_g14_c05 38 +FrisbeeCatch/v_FrisbeeCatch_g18_c05 30 +TennisSwing/v_TennisSwing_g15_c07 91 +Mixing/v_Mixing_g24_c02 53 +PlayingFlute/v_PlayingFlute_g18_c04 61 +IceDancing/v_IceDancing_g12_c02 43 +HorseRiding/v_HorseRiding_g20_c04 41 +HighJump/v_HighJump_g18_c02 39 +JumpingJack/v_JumpingJack_g23_c01 46 +HorseRace/v_HorseRace_g13_c04 40 +HorseRiding/v_HorseRiding_g14_c07 41 +FrontCrawl/v_FrontCrawl_g15_c03 31 +PizzaTossing/v_PizzaTossing_g22_c03 57 +Fencing/v_Fencing_g16_c02 27 +PommelHorse/v_PommelHorse_g15_c04 68 +HeadMassage/v_HeadMassage_g23_c06 38 +BalanceBeam/v_BalanceBeam_g09_c01 4 +Bowling/v_Bowling_g18_c01 15 +JavelinThrow/v_JavelinThrow_g10_c01 44 +FrisbeeCatch/v_FrisbeeCatch_g24_c03 30 +PullUps/v_PullUps_g19_c02 69 +HulaHoop/v_HulaHoop_g11_c03 42 +ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c05 0 +PlayingDhol/v_PlayingDhol_g11_c02 60 +Drumming/v_Drumming_g25_c04 26 +BenchPress/v_BenchPress_g09_c02 9 +PizzaTossing/v_PizzaTossing_g22_c04 57 +HeadMassage/v_HeadMassage_g14_c01 38 +CliffDiving/v_CliffDiving_g08_c02 21 +Fencing/v_Fencing_g11_c04 27 +FieldHockeyPenalty/v_FieldHockeyPenalty_g10_c03 28 +Diving/v_Diving_g16_c03 25 +BlowDryHair/v_BlowDryHair_g17_c02 12 +PlayingPiano/v_PlayingPiano_g10_c02 63 +YoYo/v_YoYo_g08_c01 100 +BoxingPunchingBag/v_BoxingPunchingBag_g10_c03 16 +SoccerJuggling/v_SoccerJuggling_g19_c07 83 +StillRings/v_StillRings_g14_c03 85 +TennisSwing/v_TennisSwing_g21_c01 91 +CricketBowling/v_CricketBowling_g16_c06 22 +RopeClimbing/v_RopeClimbing_g09_c04 74 +Biking/v_Biking_g10_c02 10 +BrushingTeeth/v_BrushingTeeth_g10_c04 19 +LongJump/v_LongJump_g15_c03 50 +BabyCrawling/v_BabyCrawling_g13_c06 3 +PlayingViolin/v_PlayingViolin_g15_c03 66 +JumpRope/v_JumpRope_g08_c06 47 +Swing/v_Swing_g20_c04 88 +Billiards/v_Billiards_g15_c05 11 +SkateBoarding/v_SkateBoarding_g18_c05 79 +Punch/v_Punch_g13_c07 70 +MilitaryParade/v_MilitaryParade_g13_c03 52 +BalanceBeam/v_BalanceBeam_g24_c01 4 +BalanceBeam/v_BalanceBeam_g18_c04 4 +Rafting/v_Rafting_g10_c04 72 +TennisSwing/v_TennisSwing_g13_c06 91 +PlayingCello/v_PlayingCello_g21_c03 58 +Mixing/v_Mixing_g18_c04 53 +PlayingPiano/v_PlayingPiano_g13_c03 63 +ParallelBars/v_ParallelBars_g17_c01 56 +ApplyEyeMakeup/v_ApplyEyeMakeup_g23_c06 0 +Basketball/v_Basketball_g09_c01 7 +JavelinThrow/v_JavelinThrow_g25_c04 44 +PommelHorse/v_PommelHorse_g14_c04 68 +Shotput/v_Shotput_g14_c03 78 +PommelHorse/v_PommelHorse_g15_c01 68 +ParallelBars/v_ParallelBars_g19_c03 56 +WritingOnBoard/v_WritingOnBoard_g24_c05 99 +IceDancing/v_IceDancing_g19_c02 43 +PlayingGuitar/v_PlayingGuitar_g20_c03 62 +SalsaSpin/v_SalsaSpin_g11_c06 76 +VolleyballSpiking/v_VolleyballSpiking_g19_c01 96 +BenchPress/v_BenchPress_g18_c04 9 +VolleyballSpiking/v_VolleyballSpiking_g08_c03 96 +BoxingPunchingBag/v_BoxingPunchingBag_g15_c05 16 +WritingOnBoard/v_WritingOnBoard_g22_c01 99 +BasketballDunk/v_BasketballDunk_g16_c01 8 +TableTennisShot/v_TableTennisShot_g24_c05 89 +Nunchucks/v_Nunchucks_g15_c03 55 +VolleyballSpiking/v_VolleyballSpiking_g19_c03 96 +Fencing/v_Fencing_g11_c02 27 +HulaHoop/v_HulaHoop_g17_c01 42 +PlayingDhol/v_PlayingDhol_g24_c03 60 +WallPushups/v_WallPushups_g21_c04 98 +Nunchucks/v_Nunchucks_g18_c06 55 +PushUps/v_PushUps_g17_c04 71 +FloorGymnastics/v_FloorGymnastics_g22_c01 29 +PlayingFlute/v_PlayingFlute_g09_c02 61 +Rafting/v_Rafting_g20_c02 72 +TennisSwing/v_TennisSwing_g22_c02 91 +SkyDiving/v_SkyDiving_g24_c04 82 +BaseballPitch/v_BaseballPitch_g23_c01 6 +BandMarching/v_BandMarching_g12_c05 5 +LongJump/v_LongJump_g09_c02 50 +SoccerJuggling/v_SoccerJuggling_g11_c03 83 +PlayingFlute/v_PlayingFlute_g10_c05 61 +Knitting/v_Knitting_g10_c02 49 +Lunges/v_Lunges_g22_c04 51 +TaiChi/v_TaiChi_g23_c01 90 +YoYo/v_YoYo_g11_c04 100 +BabyCrawling/v_BabyCrawling_g19_c01 3 +BodyWeightSquats/v_BodyWeightSquats_g14_c02 14 +BlowDryHair/v_BlowDryHair_g19_c01 12 +JumpRope/v_JumpRope_g11_c04 47 +Mixing/v_Mixing_g13_c01 53 +SoccerJuggling/v_SoccerJuggling_g24_c03 83 +PlayingFlute/v_PlayingFlute_g19_c07 61 +PullUps/v_PullUps_g23_c02 69 +BrushingTeeth/v_BrushingTeeth_g13_c04 19 +Drumming/v_Drumming_g22_c03 26 +PlayingCello/v_PlayingCello_g24_c03 58 +Punch/v_Punch_g11_c06 70 +Haircut/v_Haircut_g23_c02 33 +ParallelBars/v_ParallelBars_g25_c01 56 +Hammering/v_Hammering_g11_c02 34 +SoccerPenalty/v_SoccerPenalty_g18_c01 84 +BlowingCandles/v_BlowingCandles_g15_c02 13 +PlayingCello/v_PlayingCello_g09_c01 58 +Bowling/v_Bowling_g09_c02 15 +SoccerJuggling/v_SoccerJuggling_g21_c05 83 +JumpRope/v_JumpRope_g20_c07 47 +TrampolineJumping/v_TrampolineJumping_g24_c01 93 +Archery/v_Archery_g15_c01 2 +YoYo/v_YoYo_g08_c03 100 +Swing/v_Swing_g22_c03 88 +ApplyLipstick/v_ApplyLipstick_g15_c04 1 +BaseballPitch/v_BaseballPitch_g22_c03 6 +PoleVault/v_PoleVault_g13_c01 67 +SkateBoarding/v_SkateBoarding_g24_c05 79 +BlowDryHair/v_BlowDryHair_g25_c03 12 +PlayingDhol/v_PlayingDhol_g11_c04 60 +BreastStroke/v_BreastStroke_g15_c03 18 +Basketball/v_Basketball_g10_c04 7 +StillRings/v_StillRings_g09_c01 85 +Punch/v_Punch_g08_c07 70 +ApplyLipstick/v_ApplyLipstick_g18_c01 1 +RopeClimbing/v_RopeClimbing_g19_c03 74 +FrontCrawl/v_FrontCrawl_g17_c01 31 +PlayingTabla/v_PlayingTabla_g12_c03 65 +StillRings/v_StillRings_g12_c01 85 +HeadMassage/v_HeadMassage_g17_c04 38 +RopeClimbing/v_RopeClimbing_g19_c05 74 +BoxingSpeedBag/v_BoxingSpeedBag_g16_c04 17 +GolfSwing/v_GolfSwing_g20_c05 32 +SkyDiving/v_SkyDiving_g18_c03 82 +PlayingDaf/v_PlayingDaf_g22_c06 59 +ThrowDiscus/v_ThrowDiscus_g18_c03 92 +Surfing/v_Surfing_g24_c04 87 +FieldHockeyPenalty/v_FieldHockeyPenalty_g16_c01 28 +Drumming/v_Drumming_g17_c01 26 +WalkingWithDog/v_WalkingWithDog_g22_c03 97 +BaseballPitch/v_BaseballPitch_g08_c05 6 +ShavingBeard/v_ShavingBeard_g11_c02 77 +RopeClimbing/v_RopeClimbing_g10_c05 74 +MilitaryParade/v_MilitaryParade_g13_c04 52 +JavelinThrow/v_JavelinThrow_g23_c03 44 +Skiing/v_Skiing_g14_c03 80 +Shotput/v_Shotput_g21_c02 78 +GolfSwing/v_GolfSwing_g08_c06 32 +PlayingFlute/v_PlayingFlute_g17_c06 61 +ParallelBars/v_ParallelBars_g11_c05 56 +SoccerPenalty/v_SoccerPenalty_g11_c04 84 +PlayingFlute/v_PlayingFlute_g21_c01 61 +ThrowDiscus/v_ThrowDiscus_g21_c01 92 +WritingOnBoard/v_WritingOnBoard_g12_c01 99 +BoxingSpeedBag/v_BoxingSpeedBag_g16_c05 17 +BandMarching/v_BandMarching_g13_c01 5 +BoxingPunchingBag/v_BoxingPunchingBag_g21_c01 16 +GolfSwing/v_GolfSwing_g20_c06 32 +CricketShot/v_CricketShot_g11_c03 23 +Billiards/v_Billiards_g23_c03 11 +HorseRiding/v_HorseRiding_g20_c05 41 +ShavingBeard/v_ShavingBeard_g24_c06 77 +BaseballPitch/v_BaseballPitch_g12_c03 6 +HulaHoop/v_HulaHoop_g20_c06 42 +LongJump/v_LongJump_g08_c06 50 +Knitting/v_Knitting_g16_c02 49 +Shotput/v_Shotput_g20_c04 78 +Typing/v_Typing_g10_c04 94 +HeadMassage/v_HeadMassage_g12_c03 38 +Billiards/v_Billiards_g16_c05 11 +Haircut/v_Haircut_g17_c03 33 +Typing/v_Typing_g16_c01 94 +TennisSwing/v_TennisSwing_g18_c02 91 +CuttingInKitchen/v_CuttingInKitchen_g13_c01 24 +BaseballPitch/v_BaseballPitch_g24_c06 6 +BodyWeightSquats/v_BodyWeightSquats_g24_c05 14 +PlayingDhol/v_PlayingDhol_g14_c05 60 +Drumming/v_Drumming_g17_c02 26 +PlayingGuitar/v_PlayingGuitar_g22_c05 62 +ThrowDiscus/v_ThrowDiscus_g11_c01 92 +ShavingBeard/v_ShavingBeard_g16_c03 77 +TennisSwing/v_TennisSwing_g15_c03 91 +HorseRace/v_HorseRace_g15_c01 40 +BandMarching/v_BandMarching_g13_c04 5 +PlayingSitar/v_PlayingSitar_g24_c02 64 +PommelHorse/v_PommelHorse_g16_c02 68 +SumoWrestling/v_SumoWrestling_g23_c01 86 +HandstandPushups/v_HandStandPushups_g17_c01 36 +BalanceBeam/v_BalanceBeam_g10_c03 4 +Kayaking/v_Kayaking_g23_c05 48 +BodyWeightSquats/v_BodyWeightSquats_g13_c03 14 +CricketBowling/v_CricketBowling_g11_c03 22 +HorseRace/v_HorseRace_g10_c03 40 +IceDancing/v_IceDancing_g22_c02 43 +Drumming/v_Drumming_g12_c03 26 +StillRings/v_StillRings_g19_c02 85 +BlowingCandles/v_BlowingCandles_g16_c01 13 +UnevenBars/v_UnevenBars_g21_c01 95 +Archery/v_Archery_g14_c01 2 +IceDancing/v_IceDancing_g22_c06 43 +Archery/v_Archery_g21_c03 2 +FieldHockeyPenalty/v_FieldHockeyPenalty_g21_c02 28 +Lunges/v_Lunges_g14_c04 51 +VolleyballSpiking/v_VolleyballSpiking_g24_c03 96 +Haircut/v_Haircut_g25_c04 33 +SoccerJuggling/v_SoccerJuggling_g22_c02 83 +BasketballDunk/v_BasketballDunk_g20_c02 8 +ShavingBeard/v_ShavingBeard_g10_c05 77 +WritingOnBoard/v_WritingOnBoard_g12_c03 99 +JumpRope/v_JumpRope_g12_c05 47 +Nunchucks/v_Nunchucks_g08_c05 55 +BaseballPitch/v_BaseballPitch_g10_c05 6 +WalkingWithDog/v_WalkingWithDog_g09_c06 97 +BoxingPunchingBag/v_BoxingPunchingBag_g20_c03 16 +Typing/v_Typing_g22_c01 94 +Mixing/v_Mixing_g15_c07 53 +GolfSwing/v_GolfSwing_g18_c06 32 +Basketball/v_Basketball_g09_c03 7 +PommelHorse/v_PommelHorse_g09_c04 68 +Biking/v_Biking_g24_c05 10 +ThrowDiscus/v_ThrowDiscus_g13_c02 92 +PlayingGuitar/v_PlayingGuitar_g14_c06 62 +MoppingFloor/v_MoppingFloor_g19_c02 54 +Shotput/v_Shotput_g08_c06 78 +SumoWrestling/v_SumoWrestling_g22_c01 86 +Biking/v_Biking_g17_c01 10 +Basketball/v_Basketball_g10_c05 7 +Basketball/v_Basketball_g19_c05 7 +Biking/v_Biking_g24_c04 10 +Punch/v_Punch_g17_c01 70 +Rowing/v_Rowing_g11_c05 75 +SalsaSpin/v_SalsaSpin_g20_c01 76 +HorseRace/v_HorseRace_g21_c01 40 +ParallelBars/v_ParallelBars_g10_c03 56 +PlayingViolin/v_PlayingViolin_g17_c02 66 +RockClimbingIndoor/v_RockClimbingIndoor_g19_c01 73 +BandMarching/v_BandMarching_g11_c07 5 +SkyDiving/v_SkyDiving_g12_c02 82 +PlayingDhol/v_PlayingDhol_g24_c01 60 +PlayingSitar/v_PlayingSitar_g18_c03 64 +ShavingBeard/v_ShavingBeard_g22_c01 77 +PlayingSitar/v_PlayingSitar_g19_c04 64 +IceDancing/v_IceDancing_g18_c05 43 +SkateBoarding/v_SkateBoarding_g16_c02 79 +Skijet/v_Skijet_g18_c02 81 +HandstandPushups/v_HandStandPushups_g24_c01 36 +FrisbeeCatch/v_FrisbeeCatch_g12_c04 30 +JugglingBalls/v_JugglingBalls_g09_c06 45 +SkyDiving/v_SkyDiving_g22_c01 82 +PlayingSitar/v_PlayingSitar_g16_c04 64 +GolfSwing/v_GolfSwing_g24_c07 32 +FloorGymnastics/v_FloorGymnastics_g24_c02 29 +Skiing/v_Skiing_g21_c04 80 +WallPushups/v_WallPushups_g20_c03 98 +PlayingPiano/v_PlayingPiano_g14_c01 63 +Drumming/v_Drumming_g25_c06 26 +PommelHorse/v_PommelHorse_g19_c01 68 +PlayingFlute/v_PlayingFlute_g20_c04 61 +JavelinThrow/v_JavelinThrow_g11_c06 44 +CliffDiving/v_CliffDiving_g19_c02 21 +PlayingViolin/v_PlayingViolin_g09_c02 66 +BaseballPitch/v_BaseballPitch_g16_c06 6 +TrampolineJumping/v_TrampolineJumping_g13_c03 93 +HulaHoop/v_HulaHoop_g08_c03 42 +TableTennisShot/v_TableTennisShot_g19_c07 89 +BabyCrawling/v_BabyCrawling_g24_c03 3 +Skiing/v_Skiing_g23_c04 80 +HulaHoop/v_HulaHoop_g21_c01 42 +Diving/v_Diving_g09_c05 25 +BoxingSpeedBag/v_BoxingSpeedBag_g25_c07 17 +SkateBoarding/v_SkateBoarding_g23_c03 79 +BreastStroke/v_BreastStroke_g10_c01 18 +BlowDryHair/v_BlowDryHair_g08_c06 12 +Hammering/v_Hammering_g16_c02 34 +StillRings/v_StillRings_g24_c02 85 +CuttingInKitchen/v_CuttingInKitchen_g21_c02 24 +Lunges/v_Lunges_g08_c02 51 +TableTennisShot/v_TableTennisShot_g24_c07 89 +PoleVault/v_PoleVault_g21_c02 67 +UnevenBars/v_UnevenBars_g19_c02 95 +ThrowDiscus/v_ThrowDiscus_g11_c07 92 +YoYo/v_YoYo_g22_c05 100 +WallPushups/v_WallPushups_g24_c04 98 +HighJump/v_HighJump_g23_c04 39 +BalanceBeam/v_BalanceBeam_g13_c04 4 +JumpingJack/v_JumpingJack_g08_c01 46 +Biking/v_Biking_g23_c03 10 +YoYo/v_YoYo_g25_c02 100 +FrisbeeCatch/v_FrisbeeCatch_g13_c02 30 +TennisSwing/v_TennisSwing_g09_c03 91 +Nunchucks/v_Nunchucks_g21_c04 55 +Punch/v_Punch_g24_c01 70 +JumpingJack/v_JumpingJack_g09_c04 46 +HorseRace/v_HorseRace_g21_c05 40 +SoccerPenalty/v_SoccerPenalty_g12_c01 84 +Basketball/v_Basketball_g10_c01 7 +Biking/v_Biking_g18_c05 10 +Billiards/v_Billiards_g14_c05 11 +Typing/v_Typing_g17_c04 94 +RopeClimbing/v_RopeClimbing_g10_c04 74 +BenchPress/v_BenchPress_g21_c01 9 +FrontCrawl/v_FrontCrawl_g22_c03 31 +HammerThrow/v_HammerThrow_g13_c07 35 +YoYo/v_YoYo_g20_c02 100 +CleanAndJerk/v_CleanAndJerk_g15_c03 20 +PlayingTabla/v_PlayingTabla_g08_c01 65 +Skiing/v_Skiing_g24_c04 80 +PlayingSitar/v_PlayingSitar_g09_c07 64 +Biking/v_Biking_g17_c05 10 +BreastStroke/v_BreastStroke_g11_c02 18 +HulaHoop/v_HulaHoop_g14_c05 42 +CliffDiving/v_CliffDiving_g13_c03 21 +GolfSwing/v_GolfSwing_g18_c04 32 +BoxingPunchingBag/v_BoxingPunchingBag_g08_c05 16 +HorseRace/v_HorseRace_g23_c01 40 +SumoWrestling/v_SumoWrestling_g25_c04 86 +FieldHockeyPenalty/v_FieldHockeyPenalty_g09_c01 28 +ApplyEyeMakeup/v_ApplyEyeMakeup_g17_c03 0 +Billiards/v_Billiards_g21_c03 11 +RockClimbingIndoor/v_RockClimbingIndoor_g08_c06 73 +Bowling/v_Bowling_g09_c05 15 +Lunges/v_Lunges_g12_c04 51 +PlayingTabla/v_PlayingTabla_g08_c02 65 +Hammering/v_Hammering_g21_c02 34 +BodyWeightSquats/v_BodyWeightSquats_g25_c06 14 +Hammering/v_Hammering_g14_c06 34 +HandstandWalking/v_HandstandWalking_g18_c03 37 +BoxingSpeedBag/v_BoxingSpeedBag_g20_c02 17 +Mixing/v_Mixing_g10_c02 53 +BoxingPunchingBag/v_BoxingPunchingBag_g17_c02 16 +SkateBoarding/v_SkateBoarding_g20_c01 79 +SoccerJuggling/v_SoccerJuggling_g09_c05 83 +PizzaTossing/v_PizzaTossing_g13_c03 57 +FloorGymnastics/v_FloorGymnastics_g16_c02 29 +SoccerPenalty/v_SoccerPenalty_g13_c06 84 +BandMarching/v_BandMarching_g14_c05 5 +PommelHorse/v_PommelHorse_g18_c04 68 +HighJump/v_HighJump_g08_c05 39 +Diving/v_Diving_g09_c02 25 +UnevenBars/v_UnevenBars_g15_c05 95 +BenchPress/v_BenchPress_g13_c02 9 +Skiing/v_Skiing_g22_c01 80 +BodyWeightSquats/v_BodyWeightSquats_g24_c04 14 +SoccerJuggling/v_SoccerJuggling_g24_c04 83 +PlayingFlute/v_PlayingFlute_g17_c04 61 +Hammering/v_Hammering_g19_c01 34 +Kayaking/v_Kayaking_g20_c04 48 +PlayingFlute/v_PlayingFlute_g20_c03 61 +SalsaSpin/v_SalsaSpin_g13_c03 76 +BrushingTeeth/v_BrushingTeeth_g09_c03 19 +SkyDiving/v_SkyDiving_g17_c03 82 +Rafting/v_Rafting_g23_c03 72 +Swing/v_Swing_g11_c05 88 +Archery/v_Archery_g09_c05 2 +JumpRope/v_JumpRope_g08_c04 47 +PlayingTabla/v_PlayingTabla_g25_c04 65 +ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c02 0 +ApplyLipstick/v_ApplyLipstick_g16_c03 1 +Punch/v_Punch_g08_c01 70 +Skiing/v_Skiing_g08_c03 80 +ParallelBars/v_ParallelBars_g17_c03 56 +Swing/v_Swing_g22_c02 88 +HandstandPushups/v_HandStandPushups_g11_c04 36 +FrontCrawl/v_FrontCrawl_g22_c01 31 +HandstandWalking/v_HandstandWalking_g25_c04 37 +PlayingDaf/v_PlayingDaf_g24_c03 59 +BaseballPitch/v_BaseballPitch_g25_c02 6 +Billiards/v_Billiards_g09_c01 11 +Typing/v_Typing_g13_c04 94 +BlowDryHair/v_BlowDryHair_g11_c02 12 +BasketballDunk/v_BasketballDunk_g17_c01 8 +Archery/v_Archery_g08_c05 2 +YoYo/v_YoYo_g11_c02 100 +CricketShot/v_CricketShot_g18_c02 23 +PlayingGuitar/v_PlayingGuitar_g21_c04 62 +Punch/v_Punch_g15_c01 70 +PommelHorse/v_PommelHorse_g25_c03 68 +HorseRace/v_HorseRace_g19_c04 40 +Basketball/v_Basketball_g16_c05 7 +Drumming/v_Drumming_g20_c04 26 +Kayaking/v_Kayaking_g12_c04 48 +IceDancing/v_IceDancing_g14_c03 43 +Rowing/v_Rowing_g17_c01 75 +PommelHorse/v_PommelHorse_g20_c04 68 +CricketShot/v_CricketShot_g08_c02 23 +Rafting/v_Rafting_g20_c03 72 +ApplyLipstick/v_ApplyLipstick_g12_c03 1 +CuttingInKitchen/v_CuttingInKitchen_g22_c01 24 +SkyDiving/v_SkyDiving_g19_c03 82 +FrisbeeCatch/v_FrisbeeCatch_g23_c03 30 +Hammering/v_Hammering_g08_c01 34 +Punch/v_Punch_g25_c02 70 +TennisSwing/v_TennisSwing_g12_c03 91 +PlayingViolin/v_PlayingViolin_g19_c03 66 +CliffDiving/v_CliffDiving_g14_c05 21 +PlayingSitar/v_PlayingSitar_g22_c06 64 +SoccerPenalty/v_SoccerPenalty_g19_c04 84 +PoleVault/v_PoleVault_g17_c05 67 +IceDancing/v_IceDancing_g19_c07 43 +WallPushups/v_WallPushups_g08_c05 98 +Swing/v_Swing_g09_c02 88 +UnevenBars/v_UnevenBars_g16_c03 95 +BasketballDunk/v_BasketballDunk_g18_c04 8 +BoxingPunchingBag/v_BoxingPunchingBag_g20_c02 16 +IceDancing/v_IceDancing_g25_c02 43 +BlowDryHair/v_BlowDryHair_g18_c02 12 +Skijet/v_Skijet_g16_c03 81 +PoleVault/v_PoleVault_g18_c05 67 +TennisSwing/v_TennisSwing_g21_c04 91 +SalsaSpin/v_SalsaSpin_g19_c01 76 +SkateBoarding/v_SkateBoarding_g20_c04 79 +PlayingFlute/v_PlayingFlute_g18_c05 61 +BlowingCandles/v_BlowingCandles_g22_c03 13 +WallPushups/v_WallPushups_g15_c05 98 +YoYo/v_YoYo_g23_c02 100 +Kayaking/v_Kayaking_g09_c06 48 +TrampolineJumping/v_TrampolineJumping_g10_c05 93 +FieldHockeyPenalty/v_FieldHockeyPenalty_g23_c01 28 +HighJump/v_HighJump_g11_c02 39 +Diving/v_Diving_g23_c04 25 +WritingOnBoard/v_WritingOnBoard_g11_c01 99 +Archery/v_Archery_g10_c02 2 +ApplyLipstick/v_ApplyLipstick_g09_c01 1 +Biking/v_Biking_g21_c02 10 +HeadMassage/v_HeadMassage_g19_c03 38 +HandstandWalking/v_HandstandWalking_g09_c03 37 +Knitting/v_Knitting_g08_c02 49 +PizzaTossing/v_PizzaTossing_g23_c01 57 +BoxingPunchingBag/v_BoxingPunchingBag_g14_c02 16 +PlayingPiano/v_PlayingPiano_g23_c02 63 +HulaHoop/v_HulaHoop_g15_c02 42 +PlayingFlute/v_PlayingFlute_g13_c01 61 +Surfing/v_Surfing_g17_c02 87 +Haircut/v_Haircut_g20_c05 33 +PullUps/v_PullUps_g11_c04 69 +Knitting/v_Knitting_g23_c03 49 +CricketShot/v_CricketShot_g12_c02 23 +Billiards/v_Billiards_g14_c06 11 +Swing/v_Swing_g19_c01 88 +JumpingJack/v_JumpingJack_g20_c03 46 +Diving/v_Diving_g20_c03 25 +IceDancing/v_IceDancing_g19_c03 43 +HighJump/v_HighJump_g13_c05 39 +PlayingFlute/v_PlayingFlute_g16_c04 61 +Biking/v_Biking_g17_c03 10 +TennisSwing/v_TennisSwing_g21_c06 91 +FrontCrawl/v_FrontCrawl_g24_c04 31 +TableTennisShot/v_TableTennisShot_g19_c06 89 +Haircut/v_Haircut_g17_c02 33 +BlowingCandles/v_BlowingCandles_g13_c02 13 +TableTennisShot/v_TableTennisShot_g19_c05 89 +Typing/v_Typing_g10_c03 94 +BlowingCandles/v_BlowingCandles_g25_c01 13 +HandstandWalking/v_HandstandWalking_g15_c04 37 +Kayaking/v_Kayaking_g20_c07 48 +CricketBowling/v_CricketBowling_g14_c02 22 +Bowling/v_Bowling_g16_c02 15 +PlayingTabla/v_PlayingTabla_g19_c04 65 +BoxingSpeedBag/v_BoxingSpeedBag_g12_c03 17 +PlayingFlute/v_PlayingFlute_g08_c02 61 +CliffDiving/v_CliffDiving_g22_c02 21 +SoccerJuggling/v_SoccerJuggling_g09_c07 83 +Rafting/v_Rafting_g08_c06 72 +RockClimbingIndoor/v_RockClimbingIndoor_g13_c04 73 +Kayaking/v_Kayaking_g17_c05 48 +PlayingCello/v_PlayingCello_g09_c06 58 +PlayingFlute/v_PlayingFlute_g09_c07 61 +MoppingFloor/v_MoppingFloor_g20_c02 54 +PushUps/v_PushUps_g20_c04 71 +HammerThrow/v_HammerThrow_g15_c03 35 +HorseRace/v_HorseRace_g20_c02 40 +WalkingWithDog/v_WalkingWithDog_g13_c01 97 +PlayingSitar/v_PlayingSitar_g12_c01 64 +BoxingPunchingBag/v_BoxingPunchingBag_g19_c05 16 +BlowDryHair/v_BlowDryHair_g24_c03 12 +BasketballDunk/v_BasketballDunk_g23_c02 8 +CliffDiving/v_CliffDiving_g12_c06 21 +BlowingCandles/v_BlowingCandles_g10_c01 13 +PlayingViolin/v_PlayingViolin_g10_c02 66 +PushUps/v_PushUps_g25_c01 71 +Archery/v_Archery_g13_c07 2 +BaseballPitch/v_BaseballPitch_g24_c04 6 +Rowing/v_Rowing_g21_c01 75 +Skijet/v_Skijet_g12_c01 81 +PushUps/v_PushUps_g09_c01 71 +Nunchucks/v_Nunchucks_g17_c07 55 +SalsaSpin/v_SalsaSpin_g25_c01 76 +PlayingTabla/v_PlayingTabla_g24_c04 65 +PlayingCello/v_PlayingCello_g22_c02 58 +JavelinThrow/v_JavelinThrow_g22_c02 44 +BrushingTeeth/v_BrushingTeeth_g17_c05 19 +Basketball/v_Basketball_g23_c05 7 +Knitting/v_Knitting_g19_c02 49 +PlayingDaf/v_PlayingDaf_g14_c03 59 +Billiards/v_Billiards_g21_c04 11 +RockClimbingIndoor/v_RockClimbingIndoor_g08_c03 73 +Hammering/v_Hammering_g24_c06 34 +BenchPress/v_BenchPress_g12_c04 9 +PlayingGuitar/v_PlayingGuitar_g23_c02 62 +WritingOnBoard/v_WritingOnBoard_g23_c01 99 +PlayingGuitar/v_PlayingGuitar_g24_c04 62 +FloorGymnastics/v_FloorGymnastics_g16_c01 29 +HandstandWalking/v_HandstandWalking_g15_c03 37 +WalkingWithDog/v_WalkingWithDog_g19_c01 97 +TennisSwing/v_TennisSwing_g22_c01 91 +Typing/v_Typing_g16_c04 94 +PullUps/v_PullUps_g20_c03 69 +WallPushups/v_WallPushups_g22_c01 98 +Lunges/v_Lunges_g20_c03 51 +Surfing/v_Surfing_g18_c03 87 +BlowingCandles/v_BlowingCandles_g23_c02 13 +PlayingGuitar/v_PlayingGuitar_g17_c04 62 +PullUps/v_PullUps_g23_c04 69 +Haircut/v_Haircut_g24_c01 33 +Skijet/v_Skijet_g09_c01 81 +SoccerPenalty/v_SoccerPenalty_g20_c05 84 +Skiing/v_Skiing_g19_c04 80 +PlayingSitar/v_PlayingSitar_g19_c07 64 +Rafting/v_Rafting_g22_c03 72 +Biking/v_Biking_g13_c06 10 +PullUps/v_PullUps_g21_c03 69 +BlowDryHair/v_BlowDryHair_g08_c05 12 +Archery/v_Archery_g23_c06 2 +CleanAndJerk/v_CleanAndJerk_g09_c02 20 +HorseRiding/v_HorseRiding_g11_c05 41 +Billiards/v_Billiards_g10_c04 11 +WalkingWithDog/v_WalkingWithDog_g18_c04 97 +HammerThrow/v_HammerThrow_g13_c03 35 +HorseRiding/v_HorseRiding_g16_c06 41 +BoxingPunchingBag/v_BoxingPunchingBag_g22_c07 16 +BandMarching/v_BandMarching_g25_c07 5 +Punch/v_Punch_g13_c03 70 +HorseRiding/v_HorseRiding_g14_c06 41 +Skijet/v_Skijet_g09_c04 81 +ApplyEyeMakeup/v_ApplyEyeMakeup_g14_c04 0 +MoppingFloor/v_MoppingFloor_g14_c04 54 +BalanceBeam/v_BalanceBeam_g24_c04 4 +Mixing/v_Mixing_g08_c01 53 +MilitaryParade/v_MilitaryParade_g10_c02 52 +SalsaSpin/v_SalsaSpin_g20_c04 76 +Haircut/v_Haircut_g21_c02 33 +JumpingJack/v_JumpingJack_g14_c03 46 +HighJump/v_HighJump_g11_c07 39 +HeadMassage/v_HeadMassage_g11_c04 38 +Diving/v_Diving_g11_c01 25 +BlowDryHair/v_BlowDryHair_g11_c03 12 +WallPushups/v_WallPushups_g13_c02 98 +SkateBoarding/v_SkateBoarding_g13_c04 79 +UnevenBars/v_UnevenBars_g08_c03 95 +Hammering/v_Hammering_g13_c06 34 +ShavingBeard/v_ShavingBeard_g19_c02 77 +BandMarching/v_BandMarching_g11_c05 5 +BoxingPunchingBag/v_BoxingPunchingBag_g25_c07 16 +SoccerJuggling/v_SoccerJuggling_g25_c01 83 +Biking/v_Biking_g08_c01 10 +PlayingDhol/v_PlayingDhol_g20_c02 60 +HorseRace/v_HorseRace_g14_c01 40 +HorseRace/v_HorseRace_g12_c03 40 +HulaHoop/v_HulaHoop_g19_c01 42 +FloorGymnastics/v_FloorGymnastics_g18_c04 29 +Swing/v_Swing_g22_c04 88 +Basketball/v_Basketball_g23_c02 7 +Basketball/v_Basketball_g25_c07 7 +PlayingDaf/v_PlayingDaf_g18_c04 59 +Rowing/v_Rowing_g22_c01 75 +CricketBowling/v_CricketBowling_g25_c03 22 +BoxingSpeedBag/v_BoxingSpeedBag_g15_c07 17 +HammerThrow/v_HammerThrow_g21_c03 35 +FloorGymnastics/v_FloorGymnastics_g15_c05 29 +YoYo/v_YoYo_g12_c01 100 +Diving/v_Diving_g16_c04 25 +MilitaryParade/v_MilitaryParade_g17_c01 52 +TableTennisShot/v_TableTennisShot_g13_c04 89 +BoxingPunchingBag/v_BoxingPunchingBag_g12_c04 16 +BlowDryHair/v_BlowDryHair_g12_c06 12 +BandMarching/v_BandMarching_g23_c02 5 +HighJump/v_HighJump_g11_c03 39 +FrontCrawl/v_FrontCrawl_g14_c05 31 +HorseRace/v_HorseRace_g20_c05 40 +Bowling/v_Bowling_g19_c04 15 +ParallelBars/v_ParallelBars_g10_c02 56 +SkateBoarding/v_SkateBoarding_g15_c03 79 +Kayaking/v_Kayaking_g21_c02 48 +TableTennisShot/v_TableTennisShot_g16_c02 89 +PizzaTossing/v_PizzaTossing_g14_c04 57 +FrisbeeCatch/v_FrisbeeCatch_g13_c01 30 +Fencing/v_Fencing_g17_c04 27 +WritingOnBoard/v_WritingOnBoard_g22_c03 99 +BlowDryHair/v_BlowDryHair_g21_c06 12 +Archery/v_Archery_g08_c04 2 +Rafting/v_Rafting_g15_c01 72 +HandstandPushups/v_HandStandPushups_g16_c06 36 +HorseRiding/v_HorseRiding_g25_c07 41 +Knitting/v_Knitting_g09_c02 49 +PlayingCello/v_PlayingCello_g24_c01 58 +PlayingPiano/v_PlayingPiano_g15_c01 63 +SoccerJuggling/v_SoccerJuggling_g13_c02 83 +WritingOnBoard/v_WritingOnBoard_g23_c06 99 +StillRings/v_StillRings_g21_c01 85 +PushUps/v_PushUps_g22_c01 71 +PullUps/v_PullUps_g16_c03 69 +Biking/v_Biking_g09_c03 10 +Rowing/v_Rowing_g24_c05 75 +Punch/v_Punch_g16_c05 70 +PlayingDhol/v_PlayingDhol_g10_c06 60 +PlayingViolin/v_PlayingViolin_g08_c04 66 +PlayingGuitar/v_PlayingGuitar_g25_c03 62 +HorseRace/v_HorseRace_g12_c01 40 +HorseRiding/v_HorseRiding_g21_c02 41 +StillRings/v_StillRings_g20_c01 85 +Knitting/v_Knitting_g21_c03 49 +PlayingDhol/v_PlayingDhol_g22_c04 60 +BlowingCandles/v_BlowingCandles_g12_c05 13 +Haircut/v_Haircut_g19_c02 33 +Hammering/v_Hammering_g10_c05 34 +BenchPress/v_BenchPress_g16_c02 9 +GolfSwing/v_GolfSwing_g09_c01 32 +JavelinThrow/v_JavelinThrow_g12_c03 44 +WritingOnBoard/v_WritingOnBoard_g09_c06 99 +Rafting/v_Rafting_g11_c02 72 +PlayingViolin/v_PlayingViolin_g23_c04 66 +JavelinThrow/v_JavelinThrow_g18_c02 44 +SumoWrestling/v_SumoWrestling_g18_c04 86 +Basketball/v_Basketball_g11_c01 7 +Skiing/v_Skiing_g11_c01 80 +WallPushups/v_WallPushups_g19_c04 98 +CricketBowling/v_CricketBowling_g25_c07 22 +PlayingSitar/v_PlayingSitar_g25_c03 64 +PlayingTabla/v_PlayingTabla_g18_c05 65 +WritingOnBoard/v_WritingOnBoard_g09_c03 99 +BalanceBeam/v_BalanceBeam_g22_c01 4 +JumpRope/v_JumpRope_g22_c05 47 +Fencing/v_Fencing_g21_c04 27 +PlayingFlute/v_PlayingFlute_g24_c04 61 +CliffDiving/v_CliffDiving_g14_c01 21 +VolleyballSpiking/v_VolleyballSpiking_g25_c04 96 +CricketBowling/v_CricketBowling_g16_c05 22 +BenchPress/v_BenchPress_g14_c02 9 +Surfing/v_Surfing_g25_c02 87 +JumpingJack/v_JumpingJack_g25_c02 46 +HorseRiding/v_HorseRiding_g13_c02 41 +BoxingPunchingBag/v_BoxingPunchingBag_g25_c06 16 +PlayingDaf/v_PlayingDaf_g14_c07 59 +PullUps/v_PullUps_g22_c02 69 +JumpRope/v_JumpRope_g15_c04 47 +Kayaking/v_Kayaking_g18_c03 48 +WallPushups/v_WallPushups_g16_c02 98 +Drumming/v_Drumming_g21_c06 26 +ApplyEyeMakeup/v_ApplyEyeMakeup_g18_c04 0 +CleanAndJerk/v_CleanAndJerk_g12_c04 20 +CricketShot/v_CricketShot_g17_c05 23 +Billiards/v_Billiards_g16_c01 11 +SumoWrestling/v_SumoWrestling_g18_c01 86 +HighJump/v_HighJump_g12_c02 39 +PullUps/v_PullUps_g18_c03 69 +PlayingSitar/v_PlayingSitar_g16_c03 64 +Archery/v_Archery_g15_c04 2 +Hammering/v_Hammering_g15_c07 34 +FrontCrawl/v_FrontCrawl_g12_c03 31 +PlayingGuitar/v_PlayingGuitar_g19_c07 62 +PizzaTossing/v_PizzaTossing_g25_c04 57 +PlayingDaf/v_PlayingDaf_g14_c05 59 +GolfSwing/v_GolfSwing_g09_c04 32 +ShavingBeard/v_ShavingBeard_g25_c06 77 +SalsaSpin/v_SalsaSpin_g20_c02 76 +PlayingViolin/v_PlayingViolin_g10_c03 66 +MoppingFloor/v_MoppingFloor_g08_c01 54 +Archery/v_Archery_g20_c04 2 +WallPushups/v_WallPushups_g13_c03 98 +BoxingSpeedBag/v_BoxingSpeedBag_g09_c05 17 +Punch/v_Punch_g22_c01 70 +PlayingGuitar/v_PlayingGuitar_g17_c02 62 +Archery/v_Archery_g20_c01 2 +UnevenBars/v_UnevenBars_g10_c03 95 +Drumming/v_Drumming_g12_c05 26 +RopeClimbing/v_RopeClimbing_g20_c01 74 +WallPushups/v_WallPushups_g25_c05 98 +BlowingCandles/v_BlowingCandles_g19_c04 13 +Surfing/v_Surfing_g10_c01 87 +FrisbeeCatch/v_FrisbeeCatch_g12_c05 30 +Rafting/v_Rafting_g25_c04 72 +BaseballPitch/v_BaseballPitch_g22_c02 6 +Bowling/v_Bowling_g09_c07 15 +Shotput/v_Shotput_g08_c02 78 +CricketBowling/v_CricketBowling_g16_c02 22 +HighJump/v_HighJump_g08_c01 39 +Fencing/v_Fencing_g10_c01 27 +PlayingViolin/v_PlayingViolin_g25_c01 66 +Knitting/v_Knitting_g14_c02 49 +PushUps/v_PushUps_g14_c04 71 +LongJump/v_LongJump_g13_c06 50 +HorseRiding/v_HorseRiding_g12_c04 41 +BrushingTeeth/v_BrushingTeeth_g24_c07 19 +HulaHoop/v_HulaHoop_g24_c02 42 +BenchPress/v_BenchPress_g25_c02 9 +Bowling/v_Bowling_g25_c04 15 +JumpRope/v_JumpRope_g20_c06 47 +JugglingBalls/v_JugglingBalls_g23_c01 45 +Punch/v_Punch_g20_c04 70 +Basketball/v_Basketball_g23_c01 7 +SoccerPenalty/v_SoccerPenalty_g16_c01 84 +BodyWeightSquats/v_BodyWeightSquats_g08_c03 14 +HighJump/v_HighJump_g15_c04 39 +ThrowDiscus/v_ThrowDiscus_g12_c02 92 +Archery/v_Archery_g20_c06 2 +PlayingTabla/v_PlayingTabla_g21_c03 65 +PlayingTabla/v_PlayingTabla_g15_c02 65 +ApplyEyeMakeup/v_ApplyEyeMakeup_g22_c05 0 +Basketball/v_Basketball_g12_c03 7 +WallPushups/v_WallPushups_g10_c05 98 +BodyWeightSquats/v_BodyWeightSquats_g25_c01 14 +BenchPress/v_BenchPress_g25_c05 9 +PlayingViolin/v_PlayingViolin_g21_c01 66 +RockClimbingIndoor/v_RockClimbingIndoor_g21_c02 73 +BlowingCandles/v_BlowingCandles_g10_c02 13 +Skijet/v_Skijet_g15_c02 81 +Rowing/v_Rowing_g16_c01 75 +BoxingSpeedBag/v_BoxingSpeedBag_g15_c03 17 +GolfSwing/v_GolfSwing_g08_c03 32 +BlowDryHair/v_BlowDryHair_g22_c04 12 +PlayingDhol/v_PlayingDhol_g11_c03 60 +BabyCrawling/v_BabyCrawling_g11_c03 3 +IceDancing/v_IceDancing_g08_c01 43 +BaseballPitch/v_BaseballPitch_g17_c02 6 +Basketball/v_Basketball_g13_c01 7 +RopeClimbing/v_RopeClimbing_g20_c03 74 +BoxingSpeedBag/v_BoxingSpeedBag_g13_c07 17 +WallPushups/v_WallPushups_g22_c03 98 +Swing/v_Swing_g12_c01 88 +MoppingFloor/v_MoppingFloor_g08_c03 54 +HighJump/v_HighJump_g18_c03 39 +WalkingWithDog/v_WalkingWithDog_g18_c02 97 +Lunges/v_Lunges_g12_c01 51 +VolleyballSpiking/v_VolleyballSpiking_g08_c05 96 +PushUps/v_PushUps_g19_c01 71 +PlayingSitar/v_PlayingSitar_g14_c01 64 +MoppingFloor/v_MoppingFloor_g17_c03 54 +BreastStroke/v_BreastStroke_g25_c02 18 +RopeClimbing/v_RopeClimbing_g15_c04 74 +PommelHorse/v_PommelHorse_g19_c03 68 +ApplyEyeMakeup/v_ApplyEyeMakeup_g21_c03 0 +Hammering/v_Hammering_g08_c04 34 +RopeClimbing/v_RopeClimbing_g11_c02 74 +MilitaryParade/v_MilitaryParade_g14_c02 52 +StillRings/v_StillRings_g15_c04 85 +ThrowDiscus/v_ThrowDiscus_g13_c04 92 +FrisbeeCatch/v_FrisbeeCatch_g18_c03 30 +WallPushups/v_WallPushups_g17_c07 98 +FieldHockeyPenalty/v_FieldHockeyPenalty_g22_c01 28 +HeadMassage/v_HeadMassage_g22_c05 38 +PlayingDhol/v_PlayingDhol_g13_c01 60 +Punch/v_Punch_g24_c04 70 +WalkingWithDog/v_WalkingWithDog_g22_c04 97 +PlayingSitar/v_PlayingSitar_g17_c04 64 +HulaHoop/v_HulaHoop_g14_c01 42 +CliffDiving/v_CliffDiving_g20_c05 21 +Surfing/v_Surfing_g20_c01 87 +Shotput/v_Shotput_g16_c06 78 +Shotput/v_Shotput_g23_c06 78 +PoleVault/v_PoleVault_g16_c03 67 +MilitaryParade/v_MilitaryParade_g10_c05 52 +CricketShot/v_CricketShot_g22_c03 23 +RockClimbingIndoor/v_RockClimbingIndoor_g14_c02 73 +PlayingTabla/v_PlayingTabla_g20_c04 65 +CliffDiving/v_CliffDiving_g15_c04 21 +PlayingTabla/v_PlayingTabla_g12_c05 65 +PizzaTossing/v_PizzaTossing_g11_c03 57 +JavelinThrow/v_JavelinThrow_g11_c01 44 +Hammering/v_Hammering_g18_c01 34 +TennisSwing/v_TennisSwing_g17_c03 91 +RockClimbingIndoor/v_RockClimbingIndoor_g23_c03 73 +BenchPress/v_BenchPress_g11_c03 9 +HulaHoop/v_HulaHoop_g09_c04 42 +BenchPress/v_BenchPress_g08_c03 9 +Typing/v_Typing_g18_c03 94 +PlayingDhol/v_PlayingDhol_g18_c05 60 +PlayingFlute/v_PlayingFlute_g21_c02 61 +Hammering/v_Hammering_g23_c05 34 +Nunchucks/v_Nunchucks_g25_c03 55 +Surfing/v_Surfing_g24_c01 87 +WritingOnBoard/v_WritingOnBoard_g25_c03 99 +Drumming/v_Drumming_g24_c01 26 +Swing/v_Swing_g11_c01 88 +MoppingFloor/v_MoppingFloor_g23_c04 54 +TrampolineJumping/v_TrampolineJumping_g15_c05 93 +LongJump/v_LongJump_g15_c02 50 +BlowDryHair/v_BlowDryHair_g20_c05 12 +PlayingCello/v_PlayingCello_g17_c01 58 +MilitaryParade/v_MilitaryParade_g08_c04 52 +Kayaking/v_Kayaking_g11_c03 48 +FrisbeeCatch/v_FrisbeeCatch_g24_c02 30 +PizzaTossing/v_PizzaTossing_g15_c02 57 +PlayingDhol/v_PlayingDhol_g14_c03 60 +BaseballPitch/v_BaseballPitch_g08_c01 6 +BrushingTeeth/v_BrushingTeeth_g24_c03 19 +PlayingPiano/v_PlayingPiano_g13_c02 63 +CricketBowling/v_CricketBowling_g16_c03 22 +BalanceBeam/v_BalanceBeam_g18_c01 4 +MilitaryParade/v_MilitaryParade_g24_c03 52 +SalsaSpin/v_SalsaSpin_g19_c03 76 +Drumming/v_Drumming_g14_c01 26 +BaseballPitch/v_BaseballPitch_g16_c01 6 +WallPushups/v_WallPushups_g08_c04 98 +ThrowDiscus/v_ThrowDiscus_g25_c04 92 +HorseRiding/v_HorseRiding_g21_c04 41 +Basketball/v_Basketball_g13_c04 7 +Surfing/v_Surfing_g18_c01 87 +GolfSwing/v_GolfSwing_g19_c01 32 +PlayingSitar/v_PlayingSitar_g10_c03 64 +ShavingBeard/v_ShavingBeard_g19_c07 77 +SoccerJuggling/v_SoccerJuggling_g14_c02 83 +CliffDiving/v_CliffDiving_g20_c03 21 +PlayingDhol/v_PlayingDhol_g12_c01 60 +Rowing/v_Rowing_g11_c01 75 +BoxingPunchingBag/v_BoxingPunchingBag_g16_c03 16 +CricketShot/v_CricketShot_g18_c01 23 +BlowDryHair/v_BlowDryHair_g10_c07 12 +LongJump/v_LongJump_g11_c01 50 +Rowing/v_Rowing_g25_c02 75 +WallPushups/v_WallPushups_g23_c04 98 +Knitting/v_Knitting_g11_c06 49 +TaiChi/v_TaiChi_g21_c02 90 +MilitaryParade/v_MilitaryParade_g10_c03 52 +Fencing/v_Fencing_g13_c01 27 +GolfSwing/v_GolfSwing_g25_c04 32 +StillRings/v_StillRings_g12_c03 85 +Punch/v_Punch_g22_c04 70 +SalsaSpin/v_SalsaSpin_g16_c04 76 +SkateBoarding/v_SkateBoarding_g15_c01 79 +ThrowDiscus/v_ThrowDiscus_g21_c02 92 +BlowDryHair/v_BlowDryHair_g21_c02 12 +HeadMassage/v_HeadMassage_g25_c01 38 +CricketShot/v_CricketShot_g18_c05 23 +Knitting/v_Knitting_g15_c06 49 +FrisbeeCatch/v_FrisbeeCatch_g17_c01 30 +BlowDryHair/v_BlowDryHair_g10_c06 12 +CricketBowling/v_CricketBowling_g09_c02 22 +WritingOnBoard/v_WritingOnBoard_g23_c07 99 +BabyCrawling/v_BabyCrawling_g13_c03 3 +BoxingSpeedBag/v_BoxingSpeedBag_g09_c01 17 +FrontCrawl/v_FrontCrawl_g24_c01 31 +SoccerPenalty/v_SoccerPenalty_g08_c03 84 +BasketballDunk/v_BasketballDunk_g15_c03 8 +ApplyEyeMakeup/v_ApplyEyeMakeup_g16_c02 0 +VolleyballSpiking/v_VolleyballSpiking_g17_c03 96 +BodyWeightSquats/v_BodyWeightSquats_g24_c02 14 +Knitting/v_Knitting_g24_c01 49 +BoxingSpeedBag/v_BoxingSpeedBag_g14_c05 17 +PlayingTabla/v_PlayingTabla_g19_c01 65 +Skijet/v_Skijet_g13_c03 81 +Rafting/v_Rafting_g12_c02 72 +CuttingInKitchen/v_CuttingInKitchen_g12_c04 24 +Archery/v_Archery_g10_c03 2 +Basketball/v_Basketball_g08_c04 7 +Biking/v_Biking_g08_c06 10 +PoleVault/v_PoleVault_g16_c04 67 +SoccerPenalty/v_SoccerPenalty_g23_c03 84 +FieldHockeyPenalty/v_FieldHockeyPenalty_g20_c04 28 +Skiing/v_Skiing_g18_c03 80 +RockClimbingIndoor/v_RockClimbingIndoor_g20_c03 73 +PlayingDaf/v_PlayingDaf_g15_c03 59 +CliffDiving/v_CliffDiving_g21_c03 21 +HammerThrow/v_HammerThrow_g13_c06 35 +Skiing/v_Skiing_g23_c01 80 +Bowling/v_Bowling_g22_c04 15 +WritingOnBoard/v_WritingOnBoard_g19_c01 99 +MilitaryParade/v_MilitaryParade_g22_c04 52 +BlowDryHair/v_BlowDryHair_g15_c03 12 +Bowling/v_Bowling_g18_c02 15 +Surfing/v_Surfing_g08_c07 87 +HorseRiding/v_HorseRiding_g10_c05 41 +FrontCrawl/v_FrontCrawl_g16_c03 31 +SumoWrestling/v_SumoWrestling_g14_c02 86 +SalsaSpin/v_SalsaSpin_g11_c02 76 +PlayingPiano/v_PlayingPiano_g16_c03 63 +Archery/v_Archery_g20_c05 2 +TableTennisShot/v_TableTennisShot_g15_c01 89 +FloorGymnastics/v_FloorGymnastics_g13_c03 29 +PlayingGuitar/v_PlayingGuitar_g13_c04 62 +ParallelBars/v_ParallelBars_g13_c02 56 +HighJump/v_HighJump_g19_c04 39 +SoccerJuggling/v_SoccerJuggling_g16_c06 83 +HighJump/v_HighJump_g21_c04 39 +IceDancing/v_IceDancing_g18_c04 43 +BoxingSpeedBag/v_BoxingSpeedBag_g21_c04 17 +RockClimbingIndoor/v_RockClimbingIndoor_g22_c07 73 +Diving/v_Diving_g20_c06 25 +CricketShot/v_CricketShot_g10_c02 23 +ParallelBars/v_ParallelBars_g21_c04 56 +HighJump/v_HighJump_g21_c02 39 +PlayingDaf/v_PlayingDaf_g25_c02 59 +ParallelBars/v_ParallelBars_g15_c02 56 +PlayingSitar/v_PlayingSitar_g11_c05 64 +PushUps/v_PushUps_g25_c03 71 +Billiards/v_Billiards_g10_c02 11 +BasketballDunk/v_BasketballDunk_g19_c02 8 +BlowingCandles/v_BlowingCandles_g24_c04 13 +WritingOnBoard/v_WritingOnBoard_g13_c05 99 +Knitting/v_Knitting_g08_c04 49 +Rafting/v_Rafting_g25_c02 72 +SoccerJuggling/v_SoccerJuggling_g18_c01 83 +BrushingTeeth/v_BrushingTeeth_g16_c03 19 +CricketBowling/v_CricketBowling_g12_c03 22 +Surfing/v_Surfing_g14_c03 87 +IceDancing/v_IceDancing_g19_c05 43 +UnevenBars/v_UnevenBars_g20_c02 95 +YoYo/v_YoYo_g16_c06 100 +BodyWeightSquats/v_BodyWeightSquats_g25_c03 14 +IceDancing/v_IceDancing_g17_c04 43 +Shotput/v_Shotput_g24_c03 78 +BlowDryHair/v_BlowDryHair_g08_c04 12 +TableTennisShot/v_TableTennisShot_g10_c04 89 +Typing/v_Typing_g17_c02 94 +SkateBoarding/v_SkateBoarding_g08_c05 79 +HighJump/v_HighJump_g17_c03 39 +PushUps/v_PushUps_g19_c04 71 +PizzaTossing/v_PizzaTossing_g14_c02 57 +JugglingBalls/v_JugglingBalls_g17_c03 45 +SumoWrestling/v_SumoWrestling_g19_c04 86 +PlayingSitar/v_PlayingSitar_g17_c05 64 +PlayingDaf/v_PlayingDaf_g19_c01 59 +Kayaking/v_Kayaking_g10_c02 48 +BalanceBeam/v_BalanceBeam_g11_c01 4 +HammerThrow/v_HammerThrow_g12_c01 35 +CricketShot/v_CricketShot_g16_c02 23 +HeadMassage/v_HeadMassage_g15_c07 38 +PlayingPiano/v_PlayingPiano_g20_c02 63 +MilitaryParade/v_MilitaryParade_g23_c01 52 +WalkingWithDog/v_WalkingWithDog_g15_c01 97 +FieldHockeyPenalty/v_FieldHockeyPenalty_g08_c04 28 +VolleyballSpiking/v_VolleyballSpiking_g08_c04 96 +CliffDiving/v_CliffDiving_g25_c02 21 +PlayingGuitar/v_PlayingGuitar_g25_c04 62 +BasketballDunk/v_BasketballDunk_g15_c07 8 +Bowling/v_Bowling_g25_c02 15 +WritingOnBoard/v_WritingOnBoard_g18_c02 99 +CliffDiving/v_CliffDiving_g11_c06 21 +StillRings/v_StillRings_g23_c01 85 +CleanAndJerk/v_CleanAndJerk_g18_c01 20 +BlowingCandles/v_BlowingCandles_g11_c03 13 +Nunchucks/v_Nunchucks_g25_c04 55 +BoxingPunchingBag/v_BoxingPunchingBag_g20_c06 16 +Shotput/v_Shotput_g13_c04 78 +Rafting/v_Rafting_g20_c01 72 +IceDancing/v_IceDancing_g14_c04 43 +CricketShot/v_CricketShot_g22_c06 23 +Knitting/v_Knitting_g16_c03 49 +HandstandWalking/v_HandstandWalking_g18_c04 37 +SoccerJuggling/v_SoccerJuggling_g24_c05 83 +Skiing/v_Skiing_g08_c02 80 +FrisbeeCatch/v_FrisbeeCatch_g18_c02 30 +Hammering/v_Hammering_g21_c03 34 +BoxingPunchingBag/v_BoxingPunchingBag_g20_c05 16 +HammerThrow/v_HammerThrow_g08_c01 35 +MilitaryParade/v_MilitaryParade_g18_c02 52 +CleanAndJerk/v_CleanAndJerk_g10_c01 20 +PlayingPiano/v_PlayingPiano_g10_c03 63 +CliffDiving/v_CliffDiving_g13_c02 21 +Nunchucks/v_Nunchucks_g14_c01 55 +BandMarching/v_BandMarching_g10_c01 5 +PushUps/v_PushUps_g23_c02 71 +ShavingBeard/v_ShavingBeard_g17_c07 77 +HandstandWalking/v_HandstandWalking_g22_c03 37 +WritingOnBoard/v_WritingOnBoard_g21_c02 99 +PlayingSitar/v_PlayingSitar_g11_c02 64 +TaiChi/v_TaiChi_g11_c02 90 +StillRings/v_StillRings_g11_c04 85 +Hammering/v_Hammering_g22_c06 34 +UnevenBars/v_UnevenBars_g15_c01 95 +Nunchucks/v_Nunchucks_g16_c03 55 +GolfSwing/v_GolfSwing_g23_c06 32 +Archery/v_Archery_g12_c04 2 +BodyWeightSquats/v_BodyWeightSquats_g22_c03 14 +Bowling/v_Bowling_g24_c02 15 +CliffDiving/v_CliffDiving_g17_c01 21 +Billiards/v_Billiards_g17_c01 11 +Punch/v_Punch_g11_c02 70 +BandMarching/v_BandMarching_g24_c02 5 +Rowing/v_Rowing_g23_c03 75 +SalsaSpin/v_SalsaSpin_g09_c02 76 +PlayingSitar/v_PlayingSitar_g21_c01 64 +BandMarching/v_BandMarching_g23_c04 5 +RockClimbingIndoor/v_RockClimbingIndoor_g25_c05 73 +Diving/v_Diving_g24_c05 25 +CuttingInKitchen/v_CuttingInKitchen_g08_c03 24 +Surfing/v_Surfing_g16_c07 87 +FieldHockeyPenalty/v_FieldHockeyPenalty_g25_c04 28 +PullUps/v_PullUps_g13_c02 69 +JumpRope/v_JumpRope_g12_c04 47 +PlayingTabla/v_PlayingTabla_g09_c02 65 +HorseRiding/v_HorseRiding_g21_c03 41 +CleanAndJerk/v_CleanAndJerk_g17_c02 20 +BenchPress/v_BenchPress_g11_c02 9 +FieldHockeyPenalty/v_FieldHockeyPenalty_g25_c05 28 +WallPushups/v_WallPushups_g16_c01 98 +TennisSwing/v_TennisSwing_g24_c03 91 +CuttingInKitchen/v_CuttingInKitchen_g16_c01 24 +BabyCrawling/v_BabyCrawling_g22_c03 3 +MilitaryParade/v_MilitaryParade_g21_c04 52 +JugglingBalls/v_JugglingBalls_g22_c02 45 +SkateBoarding/v_SkateBoarding_g09_c02 79 +SkyDiving/v_SkyDiving_g19_c02 82 +Nunchucks/v_Nunchucks_g18_c01 55 +Fencing/v_Fencing_g11_c01 27 +FloorGymnastics/v_FloorGymnastics_g21_c03 29 +LongJump/v_LongJump_g24_c01 50 +Billiards/v_Billiards_g19_c02 11 +Shotput/v_Shotput_g12_c04 78 +YoYo/v_YoYo_g22_c03 100 +PlayingDaf/v_PlayingDaf_g15_c01 59 +FieldHockeyPenalty/v_FieldHockeyPenalty_g21_c04 28 +BalanceBeam/v_BalanceBeam_g17_c03 4 +FieldHockeyPenalty/v_FieldHockeyPenalty_g12_c02 28 +BrushingTeeth/v_BrushingTeeth_g25_c01 19 +HorseRace/v_HorseRace_g17_c01 40 +LongJump/v_LongJump_g13_c02 50 +Typing/v_Typing_g14_c06 94 +SkyDiving/v_SkyDiving_g09_c04 82 +TableTennisShot/v_TableTennisShot_g23_c06 89 +PoleVault/v_PoleVault_g10_c06 67 +PlayingSitar/v_PlayingSitar_g09_c05 64 +CliffDiving/v_CliffDiving_g23_c04 21 +SumoWrestling/v_SumoWrestling_g17_c04 86 +CleanAndJerk/v_CleanAndJerk_g18_c04 20 +WritingOnBoard/v_WritingOnBoard_g09_c05 99 +Typing/v_Typing_g09_c01 94 +ShavingBeard/v_ShavingBeard_g19_c01 77 +Punch/v_Punch_g23_c01 70 +HeadMassage/v_HeadMassage_g08_c03 38 +GolfSwing/v_GolfSwing_g17_c07 32 +CuttingInKitchen/v_CuttingInKitchen_g10_c03 24 +BoxingSpeedBag/v_BoxingSpeedBag_g24_c04 17 +Archery/v_Archery_g10_c04 2 +Rafting/v_Rafting_g11_c04 72 +Skijet/v_Skijet_g21_c01 81 +CricketBowling/v_CricketBowling_g08_c04 22 +ApplyLipstick/v_ApplyLipstick_g25_c01 1 +PlayingDhol/v_PlayingDhol_g12_c02 60 +PlayingDaf/v_PlayingDaf_g14_c04 59 +FloorGymnastics/v_FloorGymnastics_g18_c03 29 +WalkingWithDog/v_WalkingWithDog_g18_c01 97 +Archery/v_Archery_g09_c02 2 +BasketballDunk/v_BasketballDunk_g09_c02 8 +Biking/v_Biking_g20_c04 10 +CricketShot/v_CricketShot_g21_c01 23 +PushUps/v_PushUps_g11_c03 71 +SkyDiving/v_SkyDiving_g09_c02 82 +BenchPress/v_BenchPress_g21_c03 9 +UnevenBars/v_UnevenBars_g24_c03 95 +Hammering/v_Hammering_g20_c02 34 +Surfing/v_Surfing_g21_c04 87 +PoleVault/v_PoleVault_g18_c06 67 +TennisSwing/v_TennisSwing_g10_c01 91 +Knitting/v_Knitting_g13_c03 49 +BoxingSpeedBag/v_BoxingSpeedBag_g19_c05 17 +Shotput/v_Shotput_g25_c04 78 +CliffDiving/v_CliffDiving_g10_c02 21 +MilitaryParade/v_MilitaryParade_g21_c03 52 +BlowingCandles/v_BlowingCandles_g12_c06 13 +FieldHockeyPenalty/v_FieldHockeyPenalty_g20_c01 28 +TrampolineJumping/v_TrampolineJumping_g24_c03 93 +PlayingDhol/v_PlayingDhol_g13_c03 60 +VolleyballSpiking/v_VolleyballSpiking_g09_c03 96 +FrisbeeCatch/v_FrisbeeCatch_g25_c02 30 +PlayingDhol/v_PlayingDhol_g13_c07 60 +PlayingFlute/v_PlayingFlute_g08_c01 61 +JumpRope/v_JumpRope_g08_c02 47 +SumoWrestling/v_SumoWrestling_g15_c02 86 +HorseRace/v_HorseRace_g25_c03 40 +Fencing/v_Fencing_g17_c02 27 +BlowingCandles/v_BlowingCandles_g09_c02 13 +FrisbeeCatch/v_FrisbeeCatch_g24_c04 30 +ParallelBars/v_ParallelBars_g10_c01 56 +UnevenBars/v_UnevenBars_g24_c02 95 +Fencing/v_Fencing_g09_c06 27 +BlowingCandles/v_BlowingCandles_g23_c03 13 +TrampolineJumping/v_TrampolineJumping_g20_c02 93 +HorseRiding/v_HorseRiding_g23_c03 41 +ParallelBars/v_ParallelBars_g15_c03 56 +BoxingSpeedBag/v_BoxingSpeedBag_g08_c05 17 +SumoWrestling/v_SumoWrestling_g08_c05 86 +WalkingWithDog/v_WalkingWithDog_g17_c04 97 +BandMarching/v_BandMarching_g23_c01 5 +WallPushups/v_WallPushups_g17_c03 98 +RopeClimbing/v_RopeClimbing_g23_c04 74 +Archery/v_Archery_g18_c02 2 +PlayingViolin/v_PlayingViolin_g21_c02 66 +PlayingCello/v_PlayingCello_g15_c04 58 +SoccerPenalty/v_SoccerPenalty_g08_c04 84 +YoYo/v_YoYo_g09_c02 100 +HulaHoop/v_HulaHoop_g11_c01 42 +HandstandWalking/v_HandstandWalking_g15_c02 37 +SoccerJuggling/v_SoccerJuggling_g25_c05 83 +Mixing/v_Mixing_g23_c01 53 +ThrowDiscus/v_ThrowDiscus_g24_c03 92 +PlayingDhol/v_PlayingDhol_g15_c01 60 +RockClimbingIndoor/v_RockClimbingIndoor_g16_c03 73 +StillRings/v_StillRings_g23_c03 85 +Hammering/v_Hammering_g11_c04 34 +PlayingGuitar/v_PlayingGuitar_g21_c06 62 +WritingOnBoard/v_WritingOnBoard_g25_c01 99 +JumpingJack/v_JumpingJack_g13_c05 46 +MilitaryParade/v_MilitaryParade_g20_c01 52 +CricketShot/v_CricketShot_g24_c01 23 +LongJump/v_LongJump_g11_c04 50 +Mixing/v_Mixing_g11_c04 53 +PlayingDhol/v_PlayingDhol_g19_c02 60 +MoppingFloor/v_MoppingFloor_g15_c02 54 +Hammering/v_Hammering_g21_c01 34 +YoYo/v_YoYo_g24_c02 100 +Archery/v_Archery_g08_c03 2 +Hammering/v_Hammering_g10_c03 34 +UnevenBars/v_UnevenBars_g25_c04 95 +VolleyballSpiking/v_VolleyballSpiking_g08_c06 96 +SkyDiving/v_SkyDiving_g15_c02 82 +PlayingDhol/v_PlayingDhol_g08_c05 60 +PlayingCello/v_PlayingCello_g17_c04 58 +HandstandWalking/v_HandstandWalking_g12_c02 37 +HorseRace/v_HorseRace_g11_c07 40 +HeadMassage/v_HeadMassage_g15_c03 38 +HorseRiding/v_HorseRiding_g25_c03 41 +TennisSwing/v_TennisSwing_g11_c02 91 +StillRings/v_StillRings_g18_c04 85 +IceDancing/v_IceDancing_g09_c01 43 +RopeClimbing/v_RopeClimbing_g19_c04 74 +TennisSwing/v_TennisSwing_g13_c03 91 +PlayingViolin/v_PlayingViolin_g09_c04 66 +WalkingWithDog/v_WalkingWithDog_g12_c05 97 +CricketShot/v_CricketShot_g08_c01 23 +SalsaSpin/v_SalsaSpin_g10_c03 76 +WallPushups/v_WallPushups_g18_c01 98 +BenchPress/v_BenchPress_g23_c04 9 +Biking/v_Biking_g21_c03 10 +BodyWeightSquats/v_BodyWeightSquats_g09_c06 14 +Punch/v_Punch_g19_c05 70 +ShavingBeard/v_ShavingBeard_g24_c03 77 +ShavingBeard/v_ShavingBeard_g16_c01 77 +PlayingDaf/v_PlayingDaf_g17_c02 59 +ParallelBars/v_ParallelBars_g09_c05 56 +ShavingBeard/v_ShavingBeard_g16_c02 77 +StillRings/v_StillRings_g17_c02 85 +ApplyLipstick/v_ApplyLipstick_g09_c02 1 +HammerThrow/v_HammerThrow_g20_c02 35 +HammerThrow/v_HammerThrow_g11_c04 35 +YoYo/v_YoYo_g18_c02 100 +JumpingJack/v_JumpingJack_g18_c03 46 +Diving/v_Diving_g12_c05 25 +Lunges/v_Lunges_g16_c01 51 +StillRings/v_StillRings_g19_c01 85 +Knitting/v_Knitting_g21_c01 49 +BrushingTeeth/v_BrushingTeeth_g25_c06 19 +HulaHoop/v_HulaHoop_g11_c04 42 +FloorGymnastics/v_FloorGymnastics_g17_c03 29 +HandstandPushups/v_HandStandPushups_g16_c05 36 +LongJump/v_LongJump_g25_c03 50 +Surfing/v_Surfing_g20_c06 87 +TennisSwing/v_TennisSwing_g25_c06 91 +Surfing/v_Surfing_g14_c02 87 +Punch/v_Punch_g17_c02 70 +Drumming/v_Drumming_g11_c03 26 +TableTennisShot/v_TableTennisShot_g08_c03 89 +HammerThrow/v_HammerThrow_g12_c04 35 +FieldHockeyPenalty/v_FieldHockeyPenalty_g12_c05 28 +SoccerJuggling/v_SoccerJuggling_g24_c06 83 +BrushingTeeth/v_BrushingTeeth_g20_c04 19 +Kayaking/v_Kayaking_g23_c06 48 +BlowingCandles/v_BlowingCandles_g25_c05 13 +TrampolineJumping/v_TrampolineJumping_g22_c03 93 +PlayingDaf/v_PlayingDaf_g20_c06 59 +StillRings/v_StillRings_g19_c04 85 +LongJump/v_LongJump_g13_c03 50 +PushUps/v_PushUps_g22_c04 71 +JavelinThrow/v_JavelinThrow_g23_c01 44 +Hammering/v_Hammering_g25_c02 34 +CricketShot/v_CricketShot_g19_c03 23 +HammerThrow/v_HammerThrow_g09_c03 35 +BodyWeightSquats/v_BodyWeightSquats_g14_c03 14 +VolleyballSpiking/v_VolleyballSpiking_g11_c05 96 +PommelHorse/v_PommelHorse_g23_c02 68 +Kayaking/v_Kayaking_g20_c03 48 +Diving/v_Diving_g10_c06 25 +PlayingPiano/v_PlayingPiano_g13_c04 63 +SkateBoarding/v_SkateBoarding_g18_c04 79 +BasketballDunk/v_BasketballDunk_g17_c04 8 +CricketShot/v_CricketShot_g08_c07 23 +TableTennisShot/v_TableTennisShot_g09_c05 89 +FloorGymnastics/v_FloorGymnastics_g22_c02 29 +BlowDryHair/v_BlowDryHair_g20_c01 12 +PlayingFlute/v_PlayingFlute_g25_c05 61 +MilitaryParade/v_MilitaryParade_g12_c01 52 +CliffDiving/v_CliffDiving_g24_c04 21 +ApplyEyeMakeup/v_ApplyEyeMakeup_g17_c01 0 +HighJump/v_HighJump_g22_c01 39 +Skijet/v_Skijet_g17_c01 81 +HandstandPushups/v_HandStandPushups_g25_c06 36 +FrontCrawl/v_FrontCrawl_g18_c04 31 +BasketballDunk/v_BasketballDunk_g20_c06 8 +FrisbeeCatch/v_FrisbeeCatch_g24_c01 30 +PlayingTabla/v_PlayingTabla_g17_c02 65 +WallPushups/v_WallPushups_g25_c01 98 +PushUps/v_PushUps_g25_c04 71 +UnevenBars/v_UnevenBars_g09_c02 95 +TableTennisShot/v_TableTennisShot_g09_c02 89 +CuttingInKitchen/v_CuttingInKitchen_g10_c02 24 +Rowing/v_Rowing_g15_c06 75 +BoxingSpeedBag/v_BoxingSpeedBag_g09_c06 17 +GolfSwing/v_GolfSwing_g08_c05 32 +BandMarching/v_BandMarching_g14_c01 5 +Shotput/v_Shotput_g13_c02 78 +Hammering/v_Hammering_g18_c05 34 +Diving/v_Diving_g13_c01 25 +HorseRace/v_HorseRace_g11_c04 40 +BasketballDunk/v_BasketballDunk_g16_c02 8 +Mixing/v_Mixing_g12_c05 53 +TennisSwing/v_TennisSwing_g08_c03 91 +ParallelBars/v_ParallelBars_g16_c03 56 +SalsaSpin/v_SalsaSpin_g18_c01 76 +PlayingViolin/v_PlayingViolin_g20_c04 66 +WalkingWithDog/v_WalkingWithDog_g11_c04 97 +HulaHoop/v_HulaHoop_g25_c04 42 +HorseRiding/v_HorseRiding_g11_c02 41 +Nunchucks/v_Nunchucks_g13_c03 55 +YoYo/v_YoYo_g19_c01 100 +Lunges/v_Lunges_g11_c06 51 +Diving/v_Diving_g11_c06 25 +JumpRope/v_JumpRope_g16_c01 47 +PlayingDhol/v_PlayingDhol_g18_c02 60 +Biking/v_Biking_g24_c07 10 +Punch/v_Punch_g19_c06 70 +WalkingWithDog/v_WalkingWithDog_g24_c04 97 +ShavingBeard/v_ShavingBeard_g14_c06 77 +FrontCrawl/v_FrontCrawl_g18_c05 31 +JumpingJack/v_JumpingJack_g25_c03 46 +PlayingSitar/v_PlayingSitar_g10_c01 64 +LongJump/v_LongJump_g23_c04 50 +HorseRiding/v_HorseRiding_g16_c04 41 +LongJump/v_LongJump_g15_c01 50 +Punch/v_Punch_g24_c06 70 +BandMarching/v_BandMarching_g17_c02 5 +BandMarching/v_BandMarching_g16_c02 5 +FrisbeeCatch/v_FrisbeeCatch_g08_c05 30 +BrushingTeeth/v_BrushingTeeth_g19_c01 19 +ApplyEyeMakeup/v_ApplyEyeMakeup_g10_c03 0 +HulaHoop/v_HulaHoop_g12_c03 42 +HorseRiding/v_HorseRiding_g17_c06 41 +PlayingCello/v_PlayingCello_g16_c07 58 +PlayingCello/v_PlayingCello_g16_c01 58 +Skiing/v_Skiing_g19_c02 80 +SoccerJuggling/v_SoccerJuggling_g10_c01 83 +JavelinThrow/v_JavelinThrow_g09_c03 44 +Drumming/v_Drumming_g10_c01 26 +MilitaryParade/v_MilitaryParade_g18_c03 52 +JumpRope/v_JumpRope_g22_c01 47 +BlowDryHair/v_BlowDryHair_g21_c03 12 +ApplyLipstick/v_ApplyLipstick_g24_c01 1 +FloorGymnastics/v_FloorGymnastics_g25_c04 29 +TrampolineJumping/v_TrampolineJumping_g15_c04 93 +PlayingDhol/v_PlayingDhol_g18_c04 60 +BoxingPunchingBag/v_BoxingPunchingBag_g22_c05 16 +Punch/v_Punch_g19_c07 70 +Bowling/v_Bowling_g10_c03 15 +BrushingTeeth/v_BrushingTeeth_g20_c03 19 +HandstandPushups/v_HandStandPushups_g11_c02 36 +BodyWeightSquats/v_BodyWeightSquats_g18_c03 14 +Mixing/v_Mixing_g14_c02 53 +SoccerJuggling/v_SoccerJuggling_g11_c04 83 +PlayingCello/v_PlayingCello_g18_c06 58 +PlayingTabla/v_PlayingTabla_g21_c01 65 +PoleVault/v_PoleVault_g12_c07 67 +HandstandPushups/v_HandStandPushups_g08_c03 36 +JugglingBalls/v_JugglingBalls_g17_c02 45 +JumpingJack/v_JumpingJack_g24_c01 46 +Haircut/v_Haircut_g23_c03 33 +HandstandPushups/v_HandStandPushups_g13_c06 36 +YoYo/v_YoYo_g23_c03 100 +RopeClimbing/v_RopeClimbing_g14_c01 74 +JumpRope/v_JumpRope_g20_c03 47 +Skijet/v_Skijet_g22_c03 81 +HeadMassage/v_HeadMassage_g25_c04 38 +PommelHorse/v_PommelHorse_g23_c04 68 +CricketShot/v_CricketShot_g24_c05 23 +BoxingPunchingBag/v_BoxingPunchingBag_g09_c04 16 +HighJump/v_HighJump_g16_c01 39 +Surfing/v_Surfing_g20_c05 87 +JugglingBalls/v_JugglingBalls_g11_c03 45 +PlayingCello/v_PlayingCello_g10_c01 58 +Rowing/v_Rowing_g11_c03 75 +PlayingCello/v_PlayingCello_g20_c07 58 +IceDancing/v_IceDancing_g19_c06 43 +TableTennisShot/v_TableTennisShot_g12_c02 89 +BoxingPunchingBag/v_BoxingPunchingBag_g18_c01 16 +BenchPress/v_BenchPress_g11_c04 9 +PushUps/v_PushUps_g16_c02 71 +HandstandPushups/v_HandStandPushups_g23_c04 36 +ShavingBeard/v_ShavingBeard_g25_c01 77 +Skiing/v_Skiing_g21_c01 80 +HulaHoop/v_HulaHoop_g14_c03 42 +VolleyballSpiking/v_VolleyballSpiking_g19_c02 96 +BlowDryHair/v_BlowDryHair_g13_c03 12 +YoYo/v_YoYo_g22_c01 100 +BodyWeightSquats/v_BodyWeightSquats_g21_c02 14 +BoxingSpeedBag/v_BoxingSpeedBag_g24_c03 17 +RopeClimbing/v_RopeClimbing_g24_c02 74 +PlayingViolin/v_PlayingViolin_g11_c03 66 +Punch/v_Punch_g19_c02 70 +BenchPress/v_BenchPress_g17_c02 9 +RopeClimbing/v_RopeClimbing_g18_c02 74 +Haircut/v_Haircut_g10_c05 33 +CliffDiving/v_CliffDiving_g25_c03 21 +Nunchucks/v_Nunchucks_g10_c04 55 +Skiing/v_Skiing_g19_c05 80 +Rowing/v_Rowing_g19_c06 75 +PlayingDhol/v_PlayingDhol_g25_c07 60 +ParallelBars/v_ParallelBars_g10_c04 56 +WalkingWithDog/v_WalkingWithDog_g11_c03 97 +Bowling/v_Bowling_g08_c02 15 +PlayingFlute/v_PlayingFlute_g16_c06 61 +Biking/v_Biking_g19_c02 10 +ParallelBars/v_ParallelBars_g19_c02 56 +ParallelBars/v_ParallelBars_g14_c01 56 +HandstandWalking/v_HandstandWalking_g23_c03 37 +UnevenBars/v_UnevenBars_g08_c01 95 +Bowling/v_Bowling_g10_c02 15 +Basketball/v_Basketball_g12_c02 7 +Nunchucks/v_Nunchucks_g10_c03 55 +Typing/v_Typing_g16_c02 94 +BabyCrawling/v_BabyCrawling_g09_c03 3 +TaiChi/v_TaiChi_g14_c01 90 +Drumming/v_Drumming_g20_c01 26 +HeadMassage/v_HeadMassage_g13_c02 38 +Kayaking/v_Kayaking_g13_c04 48 +Haircut/v_Haircut_g23_c07 33 +SalsaSpin/v_SalsaSpin_g22_c02 76 +Diving/v_Diving_g13_c03 25 +ApplyLipstick/v_ApplyLipstick_g14_c02 1 +TableTennisShot/v_TableTennisShot_g20_c03 89 +SumoWrestling/v_SumoWrestling_g09_c04 86 +PlayingTabla/v_PlayingTabla_g09_c01 65 +GolfSwing/v_GolfSwing_g25_c07 32 +CricketShot/v_CricketShot_g10_c01 23 +Shotput/v_Shotput_g15_c07 78 +Archery/v_Archery_g22_c04 2 +Rafting/v_Rafting_g14_c01 72 +WallPushups/v_WallPushups_g12_c05 98 +Billiards/v_Billiards_g09_c07 11 +TrampolineJumping/v_TrampolineJumping_g25_c01 93 +PizzaTossing/v_PizzaTossing_g08_c02 57 +MilitaryParade/v_MilitaryParade_g22_c01 52 +ParallelBars/v_ParallelBars_g23_c02 56 +Typing/v_Typing_g19_c02 94 +FrontCrawl/v_FrontCrawl_g23_c02 31 +PlayingTabla/v_PlayingTabla_g10_c03 65 +Skijet/v_Skijet_g20_c04 81 +PizzaTossing/v_PizzaTossing_g21_c04 57 +CleanAndJerk/v_CleanAndJerk_g19_c02 20 +LongJump/v_LongJump_g12_c02 50 +Rafting/v_Rafting_g20_c04 72 +PommelHorse/v_PommelHorse_g25_c01 68 +CricketBowling/v_CricketBowling_g18_c04 22 +JugglingBalls/v_JugglingBalls_g12_c05 45 +Biking/v_Biking_g19_c01 10 +BasketballDunk/v_BasketballDunk_g23_c04 8 +FloorGymnastics/v_FloorGymnastics_g23_c02 29 +Bowling/v_Bowling_g21_c02 15 +BenchPress/v_BenchPress_g20_c07 9 +HandstandWalking/v_HandstandWalking_g11_c03 37 +TrampolineJumping/v_TrampolineJumping_g23_c02 93 +BreastStroke/v_BreastStroke_g08_c01 18 +PlayingSitar/v_PlayingSitar_g08_c05 64 +PlayingViolin/v_PlayingViolin_g16_c03 66 +BoxingSpeedBag/v_BoxingSpeedBag_g20_c01 17 +FrontCrawl/v_FrontCrawl_g15_c02 31 +Rowing/v_Rowing_g16_c04 75 +TrampolineJumping/v_TrampolineJumping_g11_c06 93 +Swing/v_Swing_g25_c04 88 +CuttingInKitchen/v_CuttingInKitchen_g20_c01 24 +VolleyballSpiking/v_VolleyballSpiking_g14_c04 96 +CricketBowling/v_CricketBowling_g15_c02 22 +PlayingGuitar/v_PlayingGuitar_g09_c04 62 +ShavingBeard/v_ShavingBeard_g16_c05 77 +WallPushups/v_WallPushups_g17_c04 98 +FieldHockeyPenalty/v_FieldHockeyPenalty_g10_c07 28 +LongJump/v_LongJump_g17_c02 50 +Skijet/v_Skijet_g08_c04 81 +JumpingJack/v_JumpingJack_g15_c01 46 +Lunges/v_Lunges_g18_c01 51 +BrushingTeeth/v_BrushingTeeth_g24_c05 19 +MilitaryParade/v_MilitaryParade_g19_c07 52 +RockClimbingIndoor/v_RockClimbingIndoor_g16_c01 73 +HorseRace/v_HorseRace_g12_c04 40 +CliffDiving/v_CliffDiving_g18_c04 21 +HorseRiding/v_HorseRiding_g11_c07 41 +HorseRiding/v_HorseRiding_g09_c01 41 +CuttingInKitchen/v_CuttingInKitchen_g16_c04 24 +Rowing/v_Rowing_g08_c03 75 +LongJump/v_LongJump_g23_c02 50 +HandstandPushups/v_HandStandPushups_g23_c05 36 +PoleVault/v_PoleVault_g19_c07 67 +BoxingPunchingBag/v_BoxingPunchingBag_g21_c04 16 +Swing/v_Swing_g23_c04 88 +TableTennisShot/v_TableTennisShot_g24_c02 89 +Shotput/v_Shotput_g14_c04 78 +FloorGymnastics/v_FloorGymnastics_g18_c02 29 +PlayingDhol/v_PlayingDhol_g16_c04 60 +BreastStroke/v_BreastStroke_g18_c03 18 +ShavingBeard/v_ShavingBeard_g09_c03 77 +JumpRope/v_JumpRope_g21_c01 47 +PlayingSitar/v_PlayingSitar_g23_c04 64 +HorseRiding/v_HorseRiding_g15_c05 41 +HammerThrow/v_HammerThrow_g10_c04 35 +FieldHockeyPenalty/v_FieldHockeyPenalty_g20_c03 28 +JumpRope/v_JumpRope_g14_c03 47 +BandMarching/v_BandMarching_g16_c01 5 +SoccerPenalty/v_SoccerPenalty_g15_c03 84 +PoleVault/v_PoleVault_g17_c04 67 +Diving/v_Diving_g24_c07 25 +SoccerJuggling/v_SoccerJuggling_g08_c01 83 +LongJump/v_LongJump_g21_c01 50 +BenchPress/v_BenchPress_g25_c07 9 +TableTennisShot/v_TableTennisShot_g19_c01 89 +RockClimbingIndoor/v_RockClimbingIndoor_g16_c05 73 +HammerThrow/v_HammerThrow_g14_c01 35 +SumoWrestling/v_SumoWrestling_g21_c02 86 +UnevenBars/v_UnevenBars_g08_c02 95 +FloorGymnastics/v_FloorGymnastics_g25_c05 29 +HandstandPushups/v_HandStandPushups_g25_c04 36 +GolfSwing/v_GolfSwing_g20_c02 32 +Nunchucks/v_Nunchucks_g15_c02 55 +PlayingDaf/v_PlayingDaf_g20_c01 59 +PlayingCello/v_PlayingCello_g17_c02 58 +ShavingBeard/v_ShavingBeard_g25_c03 77 +HammerThrow/v_HammerThrow_g08_c02 35 +GolfSwing/v_GolfSwing_g18_c05 32 +Lunges/v_Lunges_g24_c01 51 +Basketball/v_Basketball_g24_c01 7 +Biking/v_Biking_g25_c03 10 +ShavingBeard/v_ShavingBeard_g14_c03 77 +SalsaSpin/v_SalsaSpin_g22_c04 76 +Rafting/v_Rafting_g24_c01 72 +Biking/v_Biking_g21_c07 10 +TableTennisShot/v_TableTennisShot_g11_c02 89 +HandstandWalking/v_HandstandWalking_g24_c01 37 +Punch/v_Punch_g20_c01 70 +SkateBoarding/v_SkateBoarding_g17_c01 79 +Swing/v_Swing_g21_c05 88 +MoppingFloor/v_MoppingFloor_g20_c01 54 +SoccerJuggling/v_SoccerJuggling_g13_c01 83 +Rowing/v_Rowing_g21_c04 75 +YoYo/v_YoYo_g19_c03 100 +BandMarching/v_BandMarching_g08_c04 5 +Knitting/v_Knitting_g15_c03 49 +Punch/v_Punch_g18_c03 70 +Rowing/v_Rowing_g25_c05 75 +BaseballPitch/v_BaseballPitch_g20_c01 6 +Billiards/v_Billiards_g13_c06 11 +Mixing/v_Mixing_g22_c04 53 +Haircut/v_Haircut_g20_c01 33 +BenchPress/v_BenchPress_g22_c05 9 +LongJump/v_LongJump_g15_c04 50 +PommelHorse/v_PommelHorse_g13_c05 68 +JugglingBalls/v_JugglingBalls_g13_c03 45 +SalsaSpin/v_SalsaSpin_g18_c04 76 +FrisbeeCatch/v_FrisbeeCatch_g12_c02 30 +YoYo/v_YoYo_g23_c01 100 +TableTennisShot/v_TableTennisShot_g09_c07 89 +Surfing/v_Surfing_g08_c04 87 +PlayingDaf/v_PlayingDaf_g18_c07 59 +BodyWeightSquats/v_BodyWeightSquats_g23_c04 14 +FieldHockeyPenalty/v_FieldHockeyPenalty_g10_c06 28 +ParallelBars/v_ParallelBars_g24_c01 56 +Biking/v_Biking_g19_c03 10 +Skiing/v_Skiing_g21_c07 80 +RockClimbingIndoor/v_RockClimbingIndoor_g19_c03 73 +BlowingCandles/v_BlowingCandles_g09_c04 13 +BoxingSpeedBag/v_BoxingSpeedBag_g23_c05 17 +WalkingWithDog/v_WalkingWithDog_g23_c03 97 +HandstandWalking/v_HandstandWalking_g14_c01 37 +TableTennisShot/v_TableTennisShot_g08_c04 89 +MilitaryParade/v_MilitaryParade_g09_c03 52 +Biking/v_Biking_g23_c02 10 +Punch/v_Punch_g21_c03 70 +BreastStroke/v_BreastStroke_g19_c04 18 +BreastStroke/v_BreastStroke_g13_c04 18 +LongJump/v_LongJump_g25_c04 50 +HulaHoop/v_HulaHoop_g22_c05 42 +HorseRiding/v_HorseRiding_g10_c04 41 +FieldHockeyPenalty/v_FieldHockeyPenalty_g14_c03 28 +PoleVault/v_PoleVault_g15_c02 67 +JumpRope/v_JumpRope_g25_c01 47 +FrontCrawl/v_FrontCrawl_g11_c04 31 +HammerThrow/v_HammerThrow_g22_c07 35 +CricketBowling/v_CricketBowling_g21_c05 22 +HulaHoop/v_HulaHoop_g10_c02 42 +Kayaking/v_Kayaking_g11_c04 48 +BoxingSpeedBag/v_BoxingSpeedBag_g24_c01 17 +Diving/v_Diving_g09_c06 25 +Kayaking/v_Kayaking_g25_c04 48 +TableTennisShot/v_TableTennisShot_g25_c02 89 +WritingOnBoard/v_WritingOnBoard_g13_c04 99 +Bowling/v_Bowling_g09_c04 15 +HeadMassage/v_HeadMassage_g08_c04 38 +HorseRiding/v_HorseRiding_g14_c04 41 +BlowingCandles/v_BlowingCandles_g11_c04 13 +Swing/v_Swing_g16_c03 88 +HulaHoop/v_HulaHoop_g22_c01 42 +FloorGymnastics/v_FloorGymnastics_g15_c06 29 +JugglingBalls/v_JugglingBalls_g21_c01 45 +PullUps/v_PullUps_g10_c04 69 +PlayingViolin/v_PlayingViolin_g20_c01 66 +Surfing/v_Surfing_g19_c04 87 +BandMarching/v_BandMarching_g10_c04 5 +FrisbeeCatch/v_FrisbeeCatch_g09_c04 30 +Billiards/v_Billiards_g25_c01 11 +Skijet/v_Skijet_g12_c04 81 +Drumming/v_Drumming_g12_c02 26 +SoccerPenalty/v_SoccerPenalty_g17_c01 84 +WalkingWithDog/v_WalkingWithDog_g09_c03 97 +PushUps/v_PushUps_g13_c01 71 +GolfSwing/v_GolfSwing_g13_c04 32 +HammerThrow/v_HammerThrow_g10_c01 35 +Shotput/v_Shotput_g25_c03 78 +TrampolineJumping/v_TrampolineJumping_g10_c02 93 +BoxingSpeedBag/v_BoxingSpeedBag_g17_c02 17 +PlayingDhol/v_PlayingDhol_g22_c07 60 +WritingOnBoard/v_WritingOnBoard_g25_c02 99 +BoxingPunchingBag/v_BoxingPunchingBag_g08_c02 16 +BaseballPitch/v_BaseballPitch_g14_c04 6 +Kayaking/v_Kayaking_g16_c03 48 +PlayingFlute/v_PlayingFlute_g17_c02 61 +Haircut/v_Haircut_g18_c02 33 +Punch/v_Punch_g16_c06 70 +FrisbeeCatch/v_FrisbeeCatch_g11_c01 30 +FloorGymnastics/v_FloorGymnastics_g10_c05 29 +GolfSwing/v_GolfSwing_g19_c02 32 +ThrowDiscus/v_ThrowDiscus_g22_c02 92 +ParallelBars/v_ParallelBars_g21_c01 56 +ParallelBars/v_ParallelBars_g12_c03 56 +ThrowDiscus/v_ThrowDiscus_g10_c05 92 +LongJump/v_LongJump_g08_c03 50 +PlayingSitar/v_PlayingSitar_g10_c04 64 +BenchPress/v_BenchPress_g23_c01 9 +SkateBoarding/v_SkateBoarding_g16_c03 79 +BlowDryHair/v_BlowDryHair_g15_c05 12 +TaiChi/v_TaiChi_g18_c04 90 +SumoWrestling/v_SumoWrestling_g08_c07 86 +Biking/v_Biking_g22_c03 10 +WallPushups/v_WallPushups_g18_c05 98 +BrushingTeeth/v_BrushingTeeth_g10_c05 19 +Basketball/v_Basketball_g20_c04 7 +BrushingTeeth/v_BrushingTeeth_g21_c02 19 +HulaHoop/v_HulaHoop_g24_c03 42 +PlayingPiano/v_PlayingPiano_g20_c03 63 +Drumming/v_Drumming_g08_c01 26 +YoYo/v_YoYo_g14_c04 100 +BabyCrawling/v_BabyCrawling_g15_c02 3 +TennisSwing/v_TennisSwing_g20_c06 91 +TennisSwing/v_TennisSwing_g23_c04 91 +SkyDiving/v_SkyDiving_g14_c02 82 +RockClimbingIndoor/v_RockClimbingIndoor_g15_c02 73 +HandstandWalking/v_HandstandWalking_g18_c05 37 +Lunges/v_Lunges_g13_c03 51 +CricketShot/v_CricketShot_g17_c06 23 +JumpingJack/v_JumpingJack_g11_c02 46 +StillRings/v_StillRings_g21_c07 85 +RockClimbingIndoor/v_RockClimbingIndoor_g10_c06 73 +HeadMassage/v_HeadMassage_g12_c02 38 +HammerThrow/v_HammerThrow_g14_c04 35 +PlayingFlute/v_PlayingFlute_g11_c01 61 +ShavingBeard/v_ShavingBeard_g21_c02 77 +JumpingJack/v_JumpingJack_g19_c05 46 +Mixing/v_Mixing_g11_c03 53 +Mixing/v_Mixing_g13_c05 53 +RopeClimbing/v_RopeClimbing_g13_c02 74 +PullUps/v_PullUps_g13_c04 69 +HammerThrow/v_HammerThrow_g25_c02 35 +ApplyEyeMakeup/v_ApplyEyeMakeup_g14_c05 0 +SkyDiving/v_SkyDiving_g10_c02 82 +FieldHockeyPenalty/v_FieldHockeyPenalty_g17_c01 28 +GolfSwing/v_GolfSwing_g15_c01 32 +CleanAndJerk/v_CleanAndJerk_g22_c03 20 +Archery/v_Archery_g16_c01 2 +Haircut/v_Haircut_g11_c02 33 +Nunchucks/v_Nunchucks_g10_c05 55 +PommelHorse/v_PommelHorse_g19_c04 68 +FieldHockeyPenalty/v_FieldHockeyPenalty_g14_c02 28 +PlayingDaf/v_PlayingDaf_g10_c05 59 +BrushingTeeth/v_BrushingTeeth_g25_c02 19 +HorseRiding/v_HorseRiding_g22_c04 41 +HammerThrow/v_HammerThrow_g22_c01 35 +PommelHorse/v_PommelHorse_g21_c03 68 +HandstandPushups/v_HandStandPushups_g14_c04 36 +YoYo/v_YoYo_g19_c06 100 +ThrowDiscus/v_ThrowDiscus_g14_c04 92 +Rafting/v_Rafting_g18_c04 72 +RockClimbingIndoor/v_RockClimbingIndoor_g23_c06 73 +WalkingWithDog/v_WalkingWithDog_g23_c02 97 +CleanAndJerk/v_CleanAndJerk_g20_c05 20 +PlayingDhol/v_PlayingDhol_g11_c05 60 +BrushingTeeth/v_BrushingTeeth_g21_c03 19 +WallPushups/v_WallPushups_g19_c01 98 +BandMarching/v_BandMarching_g23_c06 5 +Hammering/v_Hammering_g09_c02 34 +BasketballDunk/v_BasketballDunk_g21_c03 8 +TrampolineJumping/v_TrampolineJumping_g19_c05 93 +BrushingTeeth/v_BrushingTeeth_g20_c05 19 +TaiChi/v_TaiChi_g19_c04 90 +HorseRace/v_HorseRace_g19_c02 40 +MilitaryParade/v_MilitaryParade_g25_c02 52 +BlowDryHair/v_BlowDryHair_g16_c03 12 +Diving/v_Diving_g23_c06 25 +BrushingTeeth/v_BrushingTeeth_g13_c03 19 +BlowDryHair/v_BlowDryHair_g18_c04 12 +Haircut/v_Haircut_g18_c04 33 +WritingOnBoard/v_WritingOnBoard_g20_c01 99 +MoppingFloor/v_MoppingFloor_g14_c02 54 +SalsaSpin/v_SalsaSpin_g17_c02 76 +PlayingSitar/v_PlayingSitar_g23_c07 64 +FieldHockeyPenalty/v_FieldHockeyPenalty_g08_c01 28 +SalsaSpin/v_SalsaSpin_g25_c02 76 +CuttingInKitchen/v_CuttingInKitchen_g15_c02 24 +BasketballDunk/v_BasketballDunk_g11_c02 8 +TaiChi/v_TaiChi_g16_c04 90 +CleanAndJerk/v_CleanAndJerk_g13_c04 20 +PushUps/v_PushUps_g21_c01 71 +WallPushups/v_WallPushups_g21_c01 98 +Punch/v_Punch_g09_c07 70 +Haircut/v_Haircut_g13_c03 33 +Skijet/v_Skijet_g24_c01 81 +BoxingPunchingBag/v_BoxingPunchingBag_g24_c04 16 +Typing/v_Typing_g11_c03 94 +Skiing/v_Skiing_g12_c04 80 +SoccerJuggling/v_SoccerJuggling_g18_c03 83 +Mixing/v_Mixing_g14_c06 53 +HandstandPushups/v_HandStandPushups_g20_c07 36 +FrisbeeCatch/v_FrisbeeCatch_g11_c02 30 +Typing/v_Typing_g12_c07 94 +Bowling/v_Bowling_g13_c03 15 +Skijet/v_Skijet_g08_c03 81 +Swing/v_Swing_g11_c04 88 +TrampolineJumping/v_TrampolineJumping_g25_c04 93 +YoYo/v_YoYo_g17_c04 100 +HammerThrow/v_HammerThrow_g25_c01 35 +RockClimbingIndoor/v_RockClimbingIndoor_g20_c01 73 +FieldHockeyPenalty/v_FieldHockeyPenalty_g13_c01 28 +SkyDiving/v_SkyDiving_g13_c01 82 +HandstandPushups/v_HandStandPushups_g16_c07 36 +Nunchucks/v_Nunchucks_g22_c04 55 +PlayingDhol/v_PlayingDhol_g09_c02 60 +BasketballDunk/v_BasketballDunk_g21_c04 8 +StillRings/v_StillRings_g25_c06 85 +Nunchucks/v_Nunchucks_g10_c06 55 +TennisSwing/v_TennisSwing_g25_c02 91 +JumpRope/v_JumpRope_g19_c03 47 +TrampolineJumping/v_TrampolineJumping_g12_c05 93 +ApplyEyeMakeup/v_ApplyEyeMakeup_g18_c05 0 +PlayingTabla/v_PlayingTabla_g09_c04 65 +PoleVault/v_PoleVault_g23_c06 67 +BasketballDunk/v_BasketballDunk_g22_c02 8 +Basketball/v_Basketball_g16_c06 7 +PlayingSitar/v_PlayingSitar_g14_c04 64 +IceDancing/v_IceDancing_g23_c06 43 +TrampolineJumping/v_TrampolineJumping_g09_c03 93 +JavelinThrow/v_JavelinThrow_g24_c05 44 +WritingOnBoard/v_WritingOnBoard_g22_c04 99 +HorseRiding/v_HorseRiding_g12_c03 41 +JugglingBalls/v_JugglingBalls_g15_c03 45 +CuttingInKitchen/v_CuttingInKitchen_g20_c04 24 +Mixing/v_Mixing_g08_c04 53 +IceDancing/v_IceDancing_g20_c01 43 +BandMarching/v_BandMarching_g22_c05 5 +HeadMassage/v_HeadMassage_g18_c03 38 +CleanAndJerk/v_CleanAndJerk_g09_c03 20 +FloorGymnastics/v_FloorGymnastics_g12_c06 29 +ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c05 0 +BabyCrawling/v_BabyCrawling_g22_c01 3 +Knitting/v_Knitting_g13_c05 49 +Bowling/v_Bowling_g21_c04 15 +Swing/v_Swing_g17_c03 88 +JumpingJack/v_JumpingJack_g13_c07 46 +Rowing/v_Rowing_g18_c04 75 +JumpRope/v_JumpRope_g23_c03 47 +BoxingPunchingBag/v_BoxingPunchingBag_g24_c06 16 +JumpingJack/v_JumpingJack_g18_c02 46 +PushUps/v_PushUps_g13_c04 71 +PlayingCello/v_PlayingCello_g09_c03 58 +SkyDiving/v_SkyDiving_g24_c01 82 +Mixing/v_Mixing_g16_c03 53 +UnevenBars/v_UnevenBars_g10_c02 95 +SoccerPenalty/v_SoccerPenalty_g15_c04 84 +Bowling/v_Bowling_g09_c01 15 +ShavingBeard/v_ShavingBeard_g20_c06 77 +FieldHockeyPenalty/v_FieldHockeyPenalty_g24_c02 28 +BreastStroke/v_BreastStroke_g24_c02 18 +Drumming/v_Drumming_g23_c03 26 +Surfing/v_Surfing_g17_c05 87 +Surfing/v_Surfing_g09_c03 87 +StillRings/v_StillRings_g11_c03 85 +BodyWeightSquats/v_BodyWeightSquats_g25_c07 14 +Billiards/v_Billiards_g11_c06 11 +PoleVault/v_PoleVault_g09_c02 67 +BandMarching/v_BandMarching_g19_c03 5 +Mixing/v_Mixing_g25_c01 53 +BenchPress/v_BenchPress_g19_c02 9 +FrisbeeCatch/v_FrisbeeCatch_g20_c06 30 +Drumming/v_Drumming_g16_c03 26 +Shotput/v_Shotput_g14_c01 78 +LongJump/v_LongJump_g16_c02 50 +MilitaryParade/v_MilitaryParade_g15_c03 52 +Diving/v_Diving_g21_c01 25 +RopeClimbing/v_RopeClimbing_g12_c01 74 +SoccerPenalty/v_SoccerPenalty_g23_c06 84 +UnevenBars/v_UnevenBars_g23_c01 95 +Bowling/v_Bowling_g10_c07 15 +PullUps/v_PullUps_g19_c03 69 +PoleVault/v_PoleVault_g22_c04 67 +TennisSwing/v_TennisSwing_g13_c07 91 +Skiing/v_Skiing_g14_c04 80 +Punch/v_Punch_g16_c03 70 +Haircut/v_Haircut_g19_c03 33 +YoYo/v_YoYo_g08_c02 100 +PlayingDaf/v_PlayingDaf_g19_c07 59 +Fencing/v_Fencing_g14_c04 27 +Diving/v_Diving_g19_c04 25 +PlayingSitar/v_PlayingSitar_g14_c07 64 +HeadMassage/v_HeadMassage_g18_c04 38 +Lunges/v_Lunges_g23_c02 51 +MoppingFloor/v_MoppingFloor_g16_c04 54 +FieldHockeyPenalty/v_FieldHockeyPenalty_g11_c02 28 +BlowingCandles/v_BlowingCandles_g09_c03 13 +Archery/v_Archery_g25_c03 2 +FrisbeeCatch/v_FrisbeeCatch_g19_c02 30 +MoppingFloor/v_MoppingFloor_g09_c03 54 +CliffDiving/v_CliffDiving_g10_c03 21 +GolfSwing/v_GolfSwing_g13_c03 32 +Knitting/v_Knitting_g17_c02 49 +Lunges/v_Lunges_g16_c02 51 +Skiing/v_Skiing_g20_c07 80 +JumpRope/v_JumpRope_g21_c06 47 +Rafting/v_Rafting_g15_c03 72 +Kayaking/v_Kayaking_g08_c03 48 +IceDancing/v_IceDancing_g12_c01 43 +BoxingSpeedBag/v_BoxingSpeedBag_g23_c03 17 +CricketShot/v_CricketShot_g20_c05 23 +JugglingBalls/v_JugglingBalls_g23_c04 45 +Hammering/v_Hammering_g15_c03 34 +PlayingCello/v_PlayingCello_g16_c06 58 +BoxingSpeedBag/v_BoxingSpeedBag_g20_c03 17 +Shotput/v_Shotput_g12_c07 78 +SoccerPenalty/v_SoccerPenalty_g14_c05 84 +TrampolineJumping/v_TrampolineJumping_g14_c03 93 +JumpingJack/v_JumpingJack_g22_c04 46 +YoYo/v_YoYo_g21_c01 100 +CliffDiving/v_CliffDiving_g20_c02 21 +Nunchucks/v_Nunchucks_g17_c04 55 +Mixing/v_Mixing_g09_c01 53 +BoxingPunchingBag/v_BoxingPunchingBag_g22_c03 16 +PlayingDhol/v_PlayingDhol_g16_c01 60 +Biking/v_Biking_g11_c03 10 +FrisbeeCatch/v_FrisbeeCatch_g21_c02 30 +PlayingDhol/v_PlayingDhol_g21_c06 60 +Surfing/v_Surfing_g10_c04 87 +PlayingSitar/v_PlayingSitar_g18_c06 64 +BenchPress/v_BenchPress_g25_c04 9 +Fencing/v_Fencing_g15_c03 27 +PlayingCello/v_PlayingCello_g18_c04 58 +Biking/v_Biking_g10_c03 10 +BenchPress/v_BenchPress_g08_c02 9 +JugglingBalls/v_JugglingBalls_g19_c04 45 +TennisSwing/v_TennisSwing_g19_c04 91 +IceDancing/v_IceDancing_g13_c03 43 +WallPushups/v_WallPushups_g16_c05 98 +HorseRiding/v_HorseRiding_g20_c06 41 +SalsaSpin/v_SalsaSpin_g15_c06 76 +BenchPress/v_BenchPress_g18_c01 9 +JugglingBalls/v_JugglingBalls_g19_c01 45 +PlayingDhol/v_PlayingDhol_g19_c01 60 +FloorGymnastics/v_FloorGymnastics_g17_c01 29 +VolleyballSpiking/v_VolleyballSpiking_g13_c01 96 +Haircut/v_Haircut_g18_c01 33 +SoccerJuggling/v_SoccerJuggling_g23_c06 83 +Shotput/v_Shotput_g23_c03 78 +PoleVault/v_PoleVault_g25_c03 67 +Knitting/v_Knitting_g13_c01 49 +Shotput/v_Shotput_g10_c06 78 +JumpRope/v_JumpRope_g20_c01 47 +JumpingJack/v_JumpingJack_g13_c04 46 +BasketballDunk/v_BasketballDunk_g17_c07 8 +BaseballPitch/v_BaseballPitch_g09_c01 6 +CleanAndJerk/v_CleanAndJerk_g24_c02 20 +MilitaryParade/v_MilitaryParade_g12_c04 52 +JavelinThrow/v_JavelinThrow_g16_c05 44 +HandstandWalking/v_HandstandWalking_g14_c03 37 +MilitaryParade/v_MilitaryParade_g15_c06 52 +Shotput/v_Shotput_g12_c05 78 +Drumming/v_Drumming_g10_c07 26 +CliffDiving/v_CliffDiving_g11_c02 21 +Diving/v_Diving_g17_c03 25 +PlayingDaf/v_PlayingDaf_g11_c02 59 +SoccerPenalty/v_SoccerPenalty_g08_c01 84 +CricketBowling/v_CricketBowling_g23_c02 22 +PlayingViolin/v_PlayingViolin_g16_c02 66 +Biking/v_Biking_g22_c01 10 +PizzaTossing/v_PizzaTossing_g12_c01 57 +Kayaking/v_Kayaking_g18_c05 48 +PushUps/v_PushUps_g10_c02 71 +Typing/v_Typing_g21_c02 94 +Lunges/v_Lunges_g14_c06 51 +JumpingJack/v_JumpingJack_g13_c06 46 +Skijet/v_Skijet_g15_c01 81 +SkateBoarding/v_SkateBoarding_g14_c02 79 +BoxingSpeedBag/v_BoxingSpeedBag_g10_c03 17 +ParallelBars/v_ParallelBars_g14_c03 56 +RockClimbingIndoor/v_RockClimbingIndoor_g08_c05 73 +MoppingFloor/v_MoppingFloor_g11_c02 54 +UnevenBars/v_UnevenBars_g25_c02 95 +FieldHockeyPenalty/v_FieldHockeyPenalty_g15_c04 28 +Mixing/v_Mixing_g21_c04 53 +ApplyLipstick/v_ApplyLipstick_g23_c04 1 +Archery/v_Archery_g10_c05 2 +CleanAndJerk/v_CleanAndJerk_g13_c01 20 +Drumming/v_Drumming_g10_c03 26 +Nunchucks/v_Nunchucks_g13_c02 55 +ApplyLipstick/v_ApplyLipstick_g21_c04 1 +YoYo/v_YoYo_g21_c02 100 +Haircut/v_Haircut_g13_c04 33 +Bowling/v_Bowling_g15_c03 15 +PizzaTossing/v_PizzaTossing_g19_c05 57 +Skijet/v_Skijet_g25_c04 81 +JumpRope/v_JumpRope_g11_c06 47 +Knitting/v_Knitting_g19_c04 49 +Rowing/v_Rowing_g09_c06 75 +PlayingPiano/v_PlayingPiano_g12_c02 63 +Hammering/v_Hammering_g25_c04 34 +SalsaSpin/v_SalsaSpin_g14_c05 76 +HandstandWalking/v_HandstandWalking_g22_c04 37 +MoppingFloor/v_MoppingFloor_g23_c01 54 +HeadMassage/v_HeadMassage_g24_c04 38 +Typing/v_Typing_g10_c01 94 +Kayaking/v_Kayaking_g18_c06 48 +CricketShot/v_CricketShot_g25_c04 23 +CricketShot/v_CricketShot_g20_c02 23 +FieldHockeyPenalty/v_FieldHockeyPenalty_g19_c02 28 +Lunges/v_Lunges_g12_c02 51 +Swing/v_Swing_g17_c02 88 +SoccerPenalty/v_SoccerPenalty_g25_c02 84 +VolleyballSpiking/v_VolleyballSpiking_g12_c01 96 +Rowing/v_Rowing_g10_c01 75 +PushUps/v_PushUps_g10_c03 71 +BlowingCandles/v_BlowingCandles_g15_c01 13 +HighJump/v_HighJump_g14_c04 39 +PlayingGuitar/v_PlayingGuitar_g18_c03 62 +CricketBowling/v_CricketBowling_g15_c01 22 +TaiChi/v_TaiChi_g20_c03 90 +Fencing/v_Fencing_g21_c03 27 +SalsaSpin/v_SalsaSpin_g24_c02 76 +ShavingBeard/v_ShavingBeard_g09_c01 77 +Archery/v_Archery_g25_c04 2 +BasketballDunk/v_BasketballDunk_g25_c01 8 +BoxingSpeedBag/v_BoxingSpeedBag_g12_c05 17 +Bowling/v_Bowling_g17_c05 15 +Hammering/v_Hammering_g14_c03 34 +PlayingCello/v_PlayingCello_g19_c01 58 +SoccerPenalty/v_SoccerPenalty_g20_c02 84 +TrampolineJumping/v_TrampolineJumping_g23_c01 93 +PlayingDhol/v_PlayingDhol_g21_c02 60 +PlayingDaf/v_PlayingDaf_g22_c04 59 +Typing/v_Typing_g15_c02 94 +Basketball/v_Basketball_g19_c01 7 +PushUps/v_PushUps_g08_c01 71 +TrampolineJumping/v_TrampolineJumping_g17_c03 93 +Rowing/v_Rowing_g18_c02 75 +PlayingDhol/v_PlayingDhol_g20_c03 60 +Kayaking/v_Kayaking_g08_c01 48 +SalsaSpin/v_SalsaSpin_g23_c01 76 +Haircut/v_Haircut_g10_c04 33 +SoccerPenalty/v_SoccerPenalty_g25_c04 84 +FrisbeeCatch/v_FrisbeeCatch_g08_c03 30 +HorseRiding/v_HorseRiding_g17_c01 41 +BoxingSpeedBag/v_BoxingSpeedBag_g21_c01 17 +BlowingCandles/v_BlowingCandles_g17_c04 13 +Fencing/v_Fencing_g17_c01 27 +ThrowDiscus/v_ThrowDiscus_g23_c05 92 +BabyCrawling/v_BabyCrawling_g11_c01 3 +Nunchucks/v_Nunchucks_g11_c06 55 +RockClimbingIndoor/v_RockClimbingIndoor_g22_c03 73 +Swing/v_Swing_g17_c04 88 +CricketBowling/v_CricketBowling_g17_c04 22 +WritingOnBoard/v_WritingOnBoard_g22_c05 99 +PlayingDaf/v_PlayingDaf_g09_c03 59 +Rowing/v_Rowing_g25_c01 75 +PoleVault/v_PoleVault_g20_c04 67 +Rowing/v_Rowing_g11_c02 75 +Diving/v_Diving_g24_c01 25 +SkateBoarding/v_SkateBoarding_g17_c04 79 +CricketShot/v_CricketShot_g21_c06 23 +PushUps/v_PushUps_g24_c04 71 +Swing/v_Swing_g15_c06 88 +Punch/v_Punch_g12_c05 70 +ShavingBeard/v_ShavingBeard_g10_c02 77 +Typing/v_Typing_g24_c02 94 +PlayingPiano/v_PlayingPiano_g11_c01 63 +HighJump/v_HighJump_g19_c05 39 +RockClimbingIndoor/v_RockClimbingIndoor_g14_c01 73 +Rafting/v_Rafting_g24_c03 72 +HammerThrow/v_HammerThrow_g22_c03 35 +BodyWeightSquats/v_BodyWeightSquats_g19_c01 14 +HulaHoop/v_HulaHoop_g08_c01 42 +BandMarching/v_BandMarching_g12_c06 5 +UnevenBars/v_UnevenBars_g22_c02 95 +BoxingPunchingBag/v_BoxingPunchingBag_g19_c06 16 +JumpRope/v_JumpRope_g25_c04 47 +TennisSwing/v_TennisSwing_g10_c05 91 +Hammering/v_Hammering_g15_c01 34 +BalanceBeam/v_BalanceBeam_g10_c01 4 +Mixing/v_Mixing_g24_c01 53 +SalsaSpin/v_SalsaSpin_g14_c02 76 +Rafting/v_Rafting_g15_c04 72 +BabyCrawling/v_BabyCrawling_g21_c01 3 +PlayingCello/v_PlayingCello_g14_c03 58 +CricketShot/v_CricketShot_g15_c04 23 +JumpingJack/v_JumpingJack_g24_c02 46 +Rafting/v_Rafting_g18_c03 72 +TaiChi/v_TaiChi_g18_c02 90 +BandMarching/v_BandMarching_g19_c05 5 +HeadMassage/v_HeadMassage_g25_c05 38 +PullUps/v_PullUps_g14_c02 69 +BoxingPunchingBag/v_BoxingPunchingBag_g20_c07 16 +HighJump/v_HighJump_g23_c06 39 +Diving/v_Diving_g13_c02 25 +SoccerJuggling/v_SoccerJuggling_g18_c07 83 +Drumming/v_Drumming_g25_c05 26 +Nunchucks/v_Nunchucks_g09_c01 55 +ApplyEyeMakeup/v_ApplyEyeMakeup_g22_c04 0 +GolfSwing/v_GolfSwing_g24_c03 32 +BenchPress/v_BenchPress_g19_c01 9 +Archery/v_Archery_g15_c06 2 +Billiards/v_Billiards_g18_c06 11 +YoYo/v_YoYo_g17_c01 100 +HulaHoop/v_HulaHoop_g10_c03 42 +MoppingFloor/v_MoppingFloor_g24_c02 54 +Nunchucks/v_Nunchucks_g08_c04 55 +HulaHoop/v_HulaHoop_g14_c02 42 +TaiChi/v_TaiChi_g15_c02 90 +Skijet/v_Skijet_g18_c04 81 +BrushingTeeth/v_BrushingTeeth_g12_c02 19 +BasketballDunk/v_BasketballDunk_g14_c05 8 +Hammering/v_Hammering_g13_c07 34 +BrushingTeeth/v_BrushingTeeth_g22_c02 19 +Surfing/v_Surfing_g10_c06 87 +CuttingInKitchen/v_CuttingInKitchen_g25_c01 24 +HeadMassage/v_HeadMassage_g17_c06 38 +ShavingBeard/v_ShavingBeard_g18_c03 77 +Diving/v_Diving_g10_c04 25 +HandstandPushups/v_HandStandPushups_g19_c02 36 +SoccerPenalty/v_SoccerPenalty_g23_c04 84 +WalkingWithDog/v_WalkingWithDog_g24_c05 97 +BaseballPitch/v_BaseballPitch_g20_c03 6 +CuttingInKitchen/v_CuttingInKitchen_g19_c04 24 +JavelinThrow/v_JavelinThrow_g24_c04 44 +TennisSwing/v_TennisSwing_g15_c02 91 +SkyDiving/v_SkyDiving_g18_c04 82 +RopeClimbing/v_RopeClimbing_g16_c03 74 +StillRings/v_StillRings_g23_c02 85 +HandstandWalking/v_HandstandWalking_g23_c02 37 +CuttingInKitchen/v_CuttingInKitchen_g24_c03 24 +ApplyEyeMakeup/v_ApplyEyeMakeup_g16_c05 0 +PlayingViolin/v_PlayingViolin_g09_c03 66 +PlayingDhol/v_PlayingDhol_g13_c04 60 +PlayingCello/v_PlayingCello_g23_c06 58 +TableTennisShot/v_TableTennisShot_g18_c02 89 +CliffDiving/v_CliffDiving_g11_c04 21 +ApplyEyeMakeup/v_ApplyEyeMakeup_g12_c03 0 +TaiChi/v_TaiChi_g11_c03 90 +RockClimbingIndoor/v_RockClimbingIndoor_g17_c06 73 +BabyCrawling/v_BabyCrawling_g18_c03 3 +ShavingBeard/v_ShavingBeard_g20_c01 77 +Diving/v_Diving_g21_c03 25 +PushUps/v_PushUps_g18_c04 71 +IceDancing/v_IceDancing_g25_c07 43 +PlayingGuitar/v_PlayingGuitar_g15_c04 62 +HammerThrow/v_HammerThrow_g19_c06 35 +BasketballDunk/v_BasketballDunk_g16_c04 8 +PlayingSitar/v_PlayingSitar_g09_c03 64 +WallPushups/v_WallPushups_g12_c03 98 +Swing/v_Swing_g14_c04 88 +HulaHoop/v_HulaHoop_g19_c02 42 +BenchPress/v_BenchPress_g15_c06 9 +HeadMassage/v_HeadMassage_g25_c02 38 +Lunges/v_Lunges_g14_c05 51 +CricketShot/v_CricketShot_g21_c03 23 +CleanAndJerk/v_CleanAndJerk_g14_c02 20 +FieldHockeyPenalty/v_FieldHockeyPenalty_g24_c01 28 +ShavingBeard/v_ShavingBeard_g17_c04 77 +PlayingGuitar/v_PlayingGuitar_g19_c06 62 +Diving/v_Diving_g18_c03 25 +Billiards/v_Billiards_g22_c01 11 +BenchPress/v_BenchPress_g18_c05 9 +ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c01 0 +FloorGymnastics/v_FloorGymnastics_g20_c05 29 +CliffDiving/v_CliffDiving_g12_c02 21 +Drumming/v_Drumming_g18_c06 26 +TaiChi/v_TaiChi_g10_c02 90 +RockClimbingIndoor/v_RockClimbingIndoor_g24_c01 73 +JumpingJack/v_JumpingJack_g17_c04 46 +TaiChi/v_TaiChi_g23_c03 90 +TennisSwing/v_TennisSwing_g19_c02 91 +PlayingViolin/v_PlayingViolin_g19_c02 66 +CuttingInKitchen/v_CuttingInKitchen_g11_c03 24 +CricketBowling/v_CricketBowling_g20_c02 22 +SalsaSpin/v_SalsaSpin_g19_c04 76 +Bowling/v_Bowling_g17_c03 15 +SumoWrestling/v_SumoWrestling_g10_c04 86 +HorseRace/v_HorseRace_g25_c01 40 +Diving/v_Diving_g25_c01 25 +SkateBoarding/v_SkateBoarding_g08_c01 79 +BalanceBeam/v_BalanceBeam_g08_c03 4 +JumpingJack/v_JumpingJack_g08_c03 46 +MoppingFloor/v_MoppingFloor_g09_c02 54 +TaiChi/v_TaiChi_g14_c04 90 +HorseRace/v_HorseRace_g16_c04 40 +WalkingWithDog/v_WalkingWithDog_g20_c02 97 +PlayingDaf/v_PlayingDaf_g08_c07 59 +PushUps/v_PushUps_g12_c01 71 +MilitaryParade/v_MilitaryParade_g14_c05 52 +Nunchucks/v_Nunchucks_g10_c02 55 +IceDancing/v_IceDancing_g16_c04 43 +PlayingGuitar/v_PlayingGuitar_g25_c07 62 +Fencing/v_Fencing_g08_c02 27 +PlayingGuitar/v_PlayingGuitar_g09_c03 62 +Shotput/v_Shotput_g09_c02 78 +Rowing/v_Rowing_g17_c06 75 +Hammering/v_Hammering_g24_c02 34 +BandMarching/v_BandMarching_g15_c04 5 +PommelHorse/v_PommelHorse_g12_c01 68 +PlayingTabla/v_PlayingTabla_g25_c02 65 +SkateBoarding/v_SkateBoarding_g10_c05 79 +PlayingCello/v_PlayingCello_g10_c03 58 +LongJump/v_LongJump_g17_c01 50 +TrampolineJumping/v_TrampolineJumping_g16_c01 93 +Billiards/v_Billiards_g20_c01 11 +Punch/v_Punch_g24_c05 70 +PlayingSitar/v_PlayingSitar_g22_c01 64 +JavelinThrow/v_JavelinThrow_g10_c02 44 +Mixing/v_Mixing_g20_c03 53 +PlayingPiano/v_PlayingPiano_g24_c03 63 +BaseballPitch/v_BaseballPitch_g24_c02 6 +SkateBoarding/v_SkateBoarding_g09_c03 79 +PlayingDaf/v_PlayingDaf_g11_c05 59 +Drumming/v_Drumming_g15_c05 26 +SkyDiving/v_SkyDiving_g16_c01 82 +JumpingJack/v_JumpingJack_g19_c07 46 +HandstandPushups/v_HandStandPushups_g12_c07 36 +ShavingBeard/v_ShavingBeard_g22_c06 77 +HeadMassage/v_HeadMassage_g16_c02 38 +PlayingGuitar/v_PlayingGuitar_g23_c03 62 +PlayingDaf/v_PlayingDaf_g23_c01 59 +IceDancing/v_IceDancing_g08_c04 43 +HorseRace/v_HorseRace_g18_c02 40 +BreastStroke/v_BreastStroke_g15_c04 18 +WallPushups/v_WallPushups_g18_c04 98 +BandMarching/v_BandMarching_g11_c02 5 +Lunges/v_Lunges_g11_c01 51 +LongJump/v_LongJump_g18_c01 50 +BreastStroke/v_BreastStroke_g23_c02 18 +PlayingCello/v_PlayingCello_g19_c05 58 +SumoWrestling/v_SumoWrestling_g23_c04 86 +PlayingCello/v_PlayingCello_g24_c02 58 +SalsaSpin/v_SalsaSpin_g25_c04 76 +Typing/v_Typing_g09_c02 94 +LongJump/v_LongJump_g18_c05 50 +LongJump/v_LongJump_g23_c01 50 +FrisbeeCatch/v_FrisbeeCatch_g15_c02 30 +PizzaTossing/v_PizzaTossing_g09_c01 57 +Drumming/v_Drumming_g25_c03 26 +TrampolineJumping/v_TrampolineJumping_g14_c04 93 +HorseRiding/v_HorseRiding_g19_c04 41 +BabyCrawling/v_BabyCrawling_g20_c02 3 +LongJump/v_LongJump_g17_c05 50 +SkateBoarding/v_SkateBoarding_g15_c04 79 +PlayingPiano/v_PlayingPiano_g18_c01 63 +JavelinThrow/v_JavelinThrow_g10_c05 44 +HighJump/v_HighJump_g13_c03 39 +PlayingDaf/v_PlayingDaf_g24_c04 59 +Nunchucks/v_Nunchucks_g16_c04 55 +Billiards/v_Billiards_g25_c04 11 +HorseRace/v_HorseRace_g09_c02 40 +HighJump/v_HighJump_g19_c03 39 +CliffDiving/v_CliffDiving_g14_c02 21 +HandstandPushups/v_HandStandPushups_g14_c02 36 +RopeClimbing/v_RopeClimbing_g18_c05 74 +CricketShot/v_CricketShot_g11_c01 23 +BreastStroke/v_BreastStroke_g21_c02 18 +SoccerJuggling/v_SoccerJuggling_g20_c06 83 +Lunges/v_Lunges_g21_c04 51 +TableTennisShot/v_TableTennisShot_g23_c05 89 +FloorGymnastics/v_FloorGymnastics_g09_c02 29 +SoccerJuggling/v_SoccerJuggling_g24_c07 83 +Rowing/v_Rowing_g24_c04 75 +Haircut/v_Haircut_g10_c02 33 +BlowingCandles/v_BlowingCandles_g08_c02 13 +TrampolineJumping/v_TrampolineJumping_g23_c04 93 +Archery/v_Archery_g19_c01 2 +CuttingInKitchen/v_CuttingInKitchen_g10_c07 24 +HorseRiding/v_HorseRiding_g13_c01 41 +ApplyLipstick/v_ApplyLipstick_g22_c03 1 +ShavingBeard/v_ShavingBeard_g17_c06 77 +BasketballDunk/v_BasketballDunk_g14_c07 8 +Bowling/v_Bowling_g25_c03 15 +JumpingJack/v_JumpingJack_g16_c03 46 +HandstandPushups/v_HandStandPushups_g21_c04 36 +TennisSwing/v_TennisSwing_g13_c04 91 +Typing/v_Typing_g08_c02 94 +Knitting/v_Knitting_g22_c03 49 +HeadMassage/v_HeadMassage_g23_c07 38 +CricketShot/v_CricketShot_g09_c06 23 +Diving/v_Diving_g21_c07 25 +MoppingFloor/v_MoppingFloor_g20_c03 54 +LongJump/v_LongJump_g11_c03 50 +JumpingJack/v_JumpingJack_g16_c04 46 +Diving/v_Diving_g08_c03 25 +Punch/v_Punch_g09_c05 70 +IceDancing/v_IceDancing_g11_c04 43 +HighJump/v_HighJump_g14_c01 39 +TrampolineJumping/v_TrampolineJumping_g17_c02 93 +PlayingFlute/v_PlayingFlute_g20_c01 61 +UnevenBars/v_UnevenBars_g11_c02 95 +JugglingBalls/v_JugglingBalls_g10_c03 45 +HeadMassage/v_HeadMassage_g18_c06 38 +Bowling/v_Bowling_g15_c07 15 +BaseballPitch/v_BaseballPitch_g17_c07 6 +PlayingCello/v_PlayingCello_g10_c05 58 +MilitaryParade/v_MilitaryParade_g15_c02 52 +RockClimbingIndoor/v_RockClimbingIndoor_g09_c01 73 +HorseRace/v_HorseRace_g17_c05 40 +PlayingFlute/v_PlayingFlute_g09_c01 61 +PlayingDaf/v_PlayingDaf_g10_c02 59 +Knitting/v_Knitting_g10_c06 49 +BenchPress/v_BenchPress_g20_c03 9 +ShavingBeard/v_ShavingBeard_g12_c06 77 +BoxingPunchingBag/v_BoxingPunchingBag_g23_c05 16 +PlayingDhol/v_PlayingDhol_g25_c05 60 +CricketShot/v_CricketShot_g13_c06 23 +Kayaking/v_Kayaking_g18_c07 48 +Nunchucks/v_Nunchucks_g20_c02 55 +BoxingPunchingBag/v_BoxingPunchingBag_g22_c04 16 +BreastStroke/v_BreastStroke_g23_c03 18 +SumoWrestling/v_SumoWrestling_g15_c03 86 +TennisSwing/v_TennisSwing_g20_c05 91 +BreastStroke/v_BreastStroke_g23_c01 18 +Kayaking/v_Kayaking_g19_c05 48 +ApplyEyeMakeup/v_ApplyEyeMakeup_g21_c01 0 +BlowDryHair/v_BlowDryHair_g09_c01 12 +CuttingInKitchen/v_CuttingInKitchen_g15_c04 24 +PlayingTabla/v_PlayingTabla_g24_c03 65 +PushUps/v_PushUps_g20_c02 71 +Mixing/v_Mixing_g08_c03 53 +TableTennisShot/v_TableTennisShot_g22_c03 89 +TrampolineJumping/v_TrampolineJumping_g13_c02 93 +Drumming/v_Drumming_g19_c01 26 +Knitting/v_Knitting_g14_c01 49 +ThrowDiscus/v_ThrowDiscus_g09_c03 92 +Punch/v_Punch_g12_c04 70 +HorseRace/v_HorseRace_g19_c06 40 +Skiing/v_Skiing_g15_c01 80 +FrisbeeCatch/v_FrisbeeCatch_g22_c02 30 +CleanAndJerk/v_CleanAndJerk_g19_c04 20 +BaseballPitch/v_BaseballPitch_g20_c04 6 +BenchPress/v_BenchPress_g22_c06 9 +HeadMassage/v_HeadMassage_g11_c03 38 +UnevenBars/v_UnevenBars_g21_c04 95 +Typing/v_Typing_g08_c03 94 +CricketShot/v_CricketShot_g15_c06 23 +JumpRope/v_JumpRope_g24_c04 47 +GolfSwing/v_GolfSwing_g09_c03 32 +JumpingJack/v_JumpingJack_g19_c03 46 +SalsaSpin/v_SalsaSpin_g23_c02 76 +PlayingFlute/v_PlayingFlute_g12_c05 61 +PlayingFlute/v_PlayingFlute_g10_c02 61 +PushUps/v_PushUps_g17_c01 71 +PushUps/v_PushUps_g14_c03 71 +Diving/v_Diving_g14_c02 25 +ApplyEyeMakeup/v_ApplyEyeMakeup_g23_c04 0 +HorseRiding/v_HorseRiding_g09_c06 41 +PlayingDhol/v_PlayingDhol_g16_c06 60 +BodyWeightSquats/v_BodyWeightSquats_g19_c04 14 +SoccerPenalty/v_SoccerPenalty_g18_c04 84 +CricketBowling/v_CricketBowling_g18_c05 22 +BodyWeightSquats/v_BodyWeightSquats_g08_c02 14 +CliffDiving/v_CliffDiving_g14_c07 21 +PlayingTabla/v_PlayingTabla_g16_c03 65 +Punch/v_Punch_g14_c04 70 +ShavingBeard/v_ShavingBeard_g19_c03 77 +PlayingGuitar/v_PlayingGuitar_g08_c06 62 +SoccerPenalty/v_SoccerPenalty_g11_c05 84 +BenchPress/v_BenchPress_g09_c06 9 +BandMarching/v_BandMarching_g18_c03 5 +CleanAndJerk/v_CleanAndJerk_g22_c01 20 +Diving/v_Diving_g25_c03 25 +TrampolineJumping/v_TrampolineJumping_g08_c01 93 +Bowling/v_Bowling_g24_c05 15 +BenchPress/v_BenchPress_g25_c06 9 +Archery/v_Archery_g21_c04 2 +SumoWrestling/v_SumoWrestling_g09_c02 86 +BreastStroke/v_BreastStroke_g12_c02 18 +PlayingViolin/v_PlayingViolin_g11_c02 66 +Swing/v_Swing_g25_c01 88 +BreastStroke/v_BreastStroke_g18_c05 18 +BabyCrawling/v_BabyCrawling_g22_c07 3 +IceDancing/v_IceDancing_g24_c04 43 +CricketBowling/v_CricketBowling_g24_c04 22 +HorseRiding/v_HorseRiding_g22_c07 41 +BabyCrawling/v_BabyCrawling_g22_c06 3 +Shotput/v_Shotput_g23_c04 78 +TennisSwing/v_TennisSwing_g10_c02 91 +Lunges/v_Lunges_g15_c01 51 +FieldHockeyPenalty/v_FieldHockeyPenalty_g13_c03 28 +Diving/v_Diving_g19_c03 25 +Typing/v_Typing_g20_c07 94 +SalsaSpin/v_SalsaSpin_g13_c02 76 +HulaHoop/v_HulaHoop_g11_c02 42 +Kayaking/v_Kayaking_g21_c05 48 +HorseRiding/v_HorseRiding_g23_c04 41 +CleanAndJerk/v_CleanAndJerk_g16_c01 20 +Nunchucks/v_Nunchucks_g22_c01 55 +PullUps/v_PullUps_g09_c02 69 +BabyCrawling/v_BabyCrawling_g08_c01 3 +CricketShot/v_CricketShot_g23_c04 23 +UnevenBars/v_UnevenBars_g13_c03 95 +Lunges/v_Lunges_g19_c07 51 +HandstandPushups/v_HandStandPushups_g18_c04 36 +MilitaryParade/v_MilitaryParade_g11_c05 52 +YoYo/v_YoYo_g18_c04 100 +Haircut/v_Haircut_g23_c04 33 +Shotput/v_Shotput_g23_c05 78 +TennisSwing/v_TennisSwing_g24_c06 91 +BodyWeightSquats/v_BodyWeightSquats_g17_c04 14 +CricketShot/v_CricketShot_g13_c02 23 +UnevenBars/v_UnevenBars_g16_c05 95 +PlayingFlute/v_PlayingFlute_g14_c06 61 +PlayingGuitar/v_PlayingGuitar_g23_c01 62 +Typing/v_Typing_g10_c05 94 +SoccerPenalty/v_SoccerPenalty_g23_c02 84 +Rafting/v_Rafting_g22_c01 72 +UnevenBars/v_UnevenBars_g15_c07 95 +SkateBoarding/v_SkateBoarding_g21_c02 79 +Skiing/v_Skiing_g08_c01 80 +PlayingViolin/v_PlayingViolin_g10_c04 66 +FrontCrawl/v_FrontCrawl_g14_c03 31 +Rowing/v_Rowing_g13_c02 75 +BenchPress/v_BenchPress_g12_c07 9 +MilitaryParade/v_MilitaryParade_g23_c02 52 +Drumming/v_Drumming_g20_c02 26 +Typing/v_Typing_g21_c04 94 +BodyWeightSquats/v_BodyWeightSquats_g25_c05 14 +Rafting/v_Rafting_g08_c02 72 +MilitaryParade/v_MilitaryParade_g15_c01 52 +PushUps/v_PushUps_g15_c01 71 +Rafting/v_Rafting_g18_c02 72 +TaiChi/v_TaiChi_g25_c01 90 +PlayingDaf/v_PlayingDaf_g09_c02 59 +Fencing/v_Fencing_g23_c04 27 +WalkingWithDog/v_WalkingWithDog_g18_c03 97 +HeadMassage/v_HeadMassage_g17_c02 38 +Archery/v_Archery_g24_c05 2 +Swing/v_Swing_g21_c04 88 +CricketShot/v_CricketShot_g22_c07 23 +Surfing/v_Surfing_g14_c04 87 +BaseballPitch/v_BaseballPitch_g23_c04 6 +BandMarching/v_BandMarching_g21_c06 5 +ShavingBeard/v_ShavingBeard_g11_c01 77 +PlayingPiano/v_PlayingPiano_g24_c01 63 +Typing/v_Typing_g20_c02 94 +WalkingWithDog/v_WalkingWithDog_g24_c02 97 +PlayingGuitar/v_PlayingGuitar_g10_c04 62 +UnevenBars/v_UnevenBars_g11_c04 95 +BodyWeightSquats/v_BodyWeightSquats_g23_c01 14 +Knitting/v_Knitting_g25_c04 49 +Lunges/v_Lunges_g10_c02 51 +FrontCrawl/v_FrontCrawl_g08_c02 31 +Diving/v_Diving_g22_c01 25 +Rowing/v_Rowing_g09_c05 75 +HandstandPushups/v_HandStandPushups_g25_c03 36 +Swing/v_Swing_g10_c03 88 +HammerThrow/v_HammerThrow_g24_c02 35 +Hammering/v_Hammering_g14_c02 34 +TennisSwing/v_TennisSwing_g08_c05 91 +Swing/v_Swing_g12_c04 88 +HeadMassage/v_HeadMassage_g11_c07 38 +Kayaking/v_Kayaking_g13_c01 48 +WritingOnBoard/v_WritingOnBoard_g22_c06 99 +HulaHoop/v_HulaHoop_g23_c03 42 +FloorGymnastics/v_FloorGymnastics_g14_c05 29 +HulaHoop/v_HulaHoop_g16_c03 42 +Punch/v_Punch_g10_c06 70 +CricketBowling/v_CricketBowling_g15_c06 22 +CricketShot/v_CricketShot_g25_c05 23 +YoYo/v_YoYo_g11_c06 100 +BandMarching/v_BandMarching_g09_c07 5 +RopeClimbing/v_RopeClimbing_g18_c03 74 +Surfing/v_Surfing_g17_c06 87 +PlayingCello/v_PlayingCello_g22_c04 58 +Fencing/v_Fencing_g18_c04 27 +RockClimbingIndoor/v_RockClimbingIndoor_g22_c04 73 +BasketballDunk/v_BasketballDunk_g12_c04 8 +HighJump/v_HighJump_g09_c02 39 +PlayingCello/v_PlayingCello_g17_c06 58 +BoxingSpeedBag/v_BoxingSpeedBag_g14_c06 17 +FloorGymnastics/v_FloorGymnastics_g09_c01 29 +Basketball/v_Basketball_g15_c06 7 +Skiing/v_Skiing_g22_c04 80 +FloorGymnastics/v_FloorGymnastics_g14_c03 29 +BasketballDunk/v_BasketballDunk_g09_c01 8 +CuttingInKitchen/v_CuttingInKitchen_g20_c03 24 +SkateBoarding/v_SkateBoarding_g19_c03 79 +Diving/v_Diving_g19_c02 25 +Basketball/v_Basketball_g20_c01 7 +CuttingInKitchen/v_CuttingInKitchen_g17_c03 24 +YoYo/v_YoYo_g20_c03 100 +PlayingCello/v_PlayingCello_g09_c07 58 +SumoWrestling/v_SumoWrestling_g15_c01 86 +HeadMassage/v_HeadMassage_g11_c06 38 +CricketShot/v_CricketShot_g16_c07 23 +SumoWrestling/v_SumoWrestling_g18_c06 86 +HandstandWalking/v_HandstandWalking_g18_c07 37 +HighJump/v_HighJump_g24_c05 39 +IceDancing/v_IceDancing_g18_c01 43 +BabyCrawling/v_BabyCrawling_g16_c06 3 +MoppingFloor/v_MoppingFloor_g17_c01 54 +Shotput/v_Shotput_g15_c04 78 +MoppingFloor/v_MoppingFloor_g25_c03 54 +BaseballPitch/v_BaseballPitch_g25_c03 6 +IceDancing/v_IceDancing_g25_c03 43 +Biking/v_Biking_g13_c01 10 +PlayingCello/v_PlayingCello_g15_c06 58 +Drumming/v_Drumming_g11_c07 26 +PlayingViolin/v_PlayingViolin_g18_c01 66 +RockClimbingIndoor/v_RockClimbingIndoor_g19_c04 73 +Drumming/v_Drumming_g25_c07 26 +Hammering/v_Hammering_g19_c06 34 +VolleyballSpiking/v_VolleyballSpiking_g22_c02 96 +Basketball/v_Basketball_g25_c02 7 +LongJump/v_LongJump_g19_c05 50 +PizzaTossing/v_PizzaTossing_g16_c01 57 +BreastStroke/v_BreastStroke_g17_c02 18 +JugglingBalls/v_JugglingBalls_g22_c03 45 +BaseballPitch/v_BaseballPitch_g09_c02 6 +Punch/v_Punch_g10_c05 70 +Diving/v_Diving_g09_c04 25 +BrushingTeeth/v_BrushingTeeth_g18_c01 19 +Punch/v_Punch_g18_c04 70 +HandstandWalking/v_HandstandWalking_g16_c04 37 +SumoWrestling/v_SumoWrestling_g20_c03 86 +BodyWeightSquats/v_BodyWeightSquats_g17_c03 14 +JugglingBalls/v_JugglingBalls_g23_c02 45 +MilitaryParade/v_MilitaryParade_g08_c05 52 +Shotput/v_Shotput_g24_c04 78 +HeadMassage/v_HeadMassage_g17_c01 38 +PommelHorse/v_PommelHorse_g23_c03 68 +HorseRace/v_HorseRace_g23_c05 40 +PlayingSitar/v_PlayingSitar_g19_c06 64 +Lunges/v_Lunges_g17_c01 51 +BandMarching/v_BandMarching_g10_c06 5 +ApplyEyeMakeup/v_ApplyEyeMakeup_g17_c05 0 +BlowDryHair/v_BlowDryHair_g16_c02 12 +BaseballPitch/v_BaseballPitch_g22_c05 6 +SkateBoarding/v_SkateBoarding_g18_c02 79 +Fencing/v_Fencing_g20_c01 27 +WritingOnBoard/v_WritingOnBoard_g20_c05 99 +SkyDiving/v_SkyDiving_g24_c02 82 +JumpRope/v_JumpRope_g10_c04 47 +ApplyEyeMakeup/v_ApplyEyeMakeup_g16_c04 0 +BasketballDunk/v_BasketballDunk_g23_c05 8 +HandstandPushups/v_HandStandPushups_g12_c04 36 +BlowDryHair/v_BlowDryHair_g14_c01 12 +UnevenBars/v_UnevenBars_g17_c03 95 +CuttingInKitchen/v_CuttingInKitchen_g17_c02 24 +Billiards/v_Billiards_g25_c02 11 +Swing/v_Swing_g14_c03 88 +Lunges/v_Lunges_g25_c04 51 +BasketballDunk/v_BasketballDunk_g08_c02 8 +FrontCrawl/v_FrontCrawl_g16_c01 31 +Rafting/v_Rafting_g17_c06 72 +JugglingBalls/v_JugglingBalls_g20_c02 45 +BoxingPunchingBag/v_BoxingPunchingBag_g12_c07 16 +Surfing/v_Surfing_g09_c02 87 +PlayingCello/v_PlayingCello_g25_c07 58 +CliffDiving/v_CliffDiving_g17_c04 21 +Fencing/v_Fencing_g24_c03 27 +BoxingPunchingBag/v_BoxingPunchingBag_g11_c02 16 +HeadMassage/v_HeadMassage_g08_c02 38 +PlayingFlute/v_PlayingFlute_g14_c04 61 +SalsaSpin/v_SalsaSpin_g14_c04 76 +PizzaTossing/v_PizzaTossing_g10_c02 57 +PizzaTossing/v_PizzaTossing_g13_c02 57 +HammerThrow/v_HammerThrow_g18_c01 35 +PommelHorse/v_PommelHorse_g13_c02 68 +SkateBoarding/v_SkateBoarding_g11_c03 79 +Rowing/v_Rowing_g14_c07 75 +PlayingTabla/v_PlayingTabla_g16_c02 65 +PlayingDaf/v_PlayingDaf_g10_c06 59 +RopeClimbing/v_RopeClimbing_g17_c02 74 +PlayingDaf/v_PlayingDaf_g25_c03 59 +PlayingViolin/v_PlayingViolin_g08_c02 66 +HighJump/v_HighJump_g08_c04 39 +Archery/v_Archery_g13_c05 2 +HammerThrow/v_HammerThrow_g08_c04 35 +HeadMassage/v_HeadMassage_g20_c02 38 +Hammering/v_Hammering_g18_c03 34 +Typing/v_Typing_g20_c05 94 +PlayingDaf/v_PlayingDaf_g20_c05 59 +ShavingBeard/v_ShavingBeard_g17_c05 77 +SkyDiving/v_SkyDiving_g25_c03 82 +BodyWeightSquats/v_BodyWeightSquats_g10_c03 14 +Haircut/v_Haircut_g16_c05 33 +PlayingGuitar/v_PlayingGuitar_g19_c02 62 +Diving/v_Diving_g20_c07 25 +HandstandPushups/v_HandStandPushups_g20_c03 36 +ApplyEyeMakeup/v_ApplyEyeMakeup_g12_c06 0 +BreastStroke/v_BreastStroke_g22_c02 18 +WritingOnBoard/v_WritingOnBoard_g25_c05 99 +FrontCrawl/v_FrontCrawl_g23_c05 31 +BasketballDunk/v_BasketballDunk_g17_c02 8 +Typing/v_Typing_g19_c04 94 +BandMarching/v_BandMarching_g21_c05 5 +BoxingSpeedBag/v_BoxingSpeedBag_g08_c01 17 +SumoWrestling/v_SumoWrestling_g13_c04 86 +SalsaSpin/v_SalsaSpin_g24_c01 76 +ApplyLipstick/v_ApplyLipstick_g22_c06 1 +Mixing/v_Mixing_g13_c04 53 +RockClimbingIndoor/v_RockClimbingIndoor_g24_c07 73 +PlayingFlute/v_PlayingFlute_g22_c05 61 +Mixing/v_Mixing_g09_c05 53 +Rafting/v_Rafting_g17_c03 72 +Mixing/v_Mixing_g25_c02 53 +PlayingViolin/v_PlayingViolin_g13_c04 66 +PommelHorse/v_PommelHorse_g22_c06 68 +Archery/v_Archery_g11_c04 2 +JavelinThrow/v_JavelinThrow_g23_c05 44 +HorseRiding/v_HorseRiding_g10_c06 41 +PullUps/v_PullUps_g21_c01 69 +HandstandWalking/v_HandstandWalking_g22_c01 37 +HighJump/v_HighJump_g16_c03 39 +Swing/v_Swing_g15_c01 88 +PlayingViolin/v_PlayingViolin_g12_c04 66 +ApplyLipstick/v_ApplyLipstick_g12_c04 1 +BandMarching/v_BandMarching_g09_c05 5 +BlowDryHair/v_BlowDryHair_g13_c04 12 +RockClimbingIndoor/v_RockClimbingIndoor_g12_c03 73 +CricketBowling/v_CricketBowling_g17_c02 22 +BandMarching/v_BandMarching_g15_c06 5 +WallPushups/v_WallPushups_g22_c02 98 +HeadMassage/v_HeadMassage_g14_c06 38 +PlayingTabla/v_PlayingTabla_g15_c04 65 +Lunges/v_Lunges_g19_c05 51 +GolfSwing/v_GolfSwing_g14_c01 32 +PlayingPiano/v_PlayingPiano_g17_c04 63 +Drumming/v_Drumming_g13_c05 26 +HandstandWalking/v_HandstandWalking_g17_c02 37 +IceDancing/v_IceDancing_g16_c02 43 +PlayingSitar/v_PlayingSitar_g13_c04 64 +LongJump/v_LongJump_g12_c05 50 +YoYo/v_YoYo_g22_c06 100 +ApplyEyeMakeup/v_ApplyEyeMakeup_g23_c03 0 +CricketShot/v_CricketShot_g17_c03 23 +BrushingTeeth/v_BrushingTeeth_g22_c01 19 +ParallelBars/v_ParallelBars_g12_c01 56 +YoYo/v_YoYo_g16_c03 100 +ParallelBars/v_ParallelBars_g25_c03 56 +Rafting/v_Rafting_g14_c02 72 +CleanAndJerk/v_CleanAndJerk_g19_c03 20 +TrampolineJumping/v_TrampolineJumping_g19_c03 93 +StillRings/v_StillRings_g14_c01 85 +BoxingSpeedBag/v_BoxingSpeedBag_g11_c05 17 +Biking/v_Biking_g10_c01 10 +HorseRiding/v_HorseRiding_g16_c02 41 +JumpingJack/v_JumpingJack_g15_c04 46 +BabyCrawling/v_BabyCrawling_g21_c03 3 +RockClimbingIndoor/v_RockClimbingIndoor_g11_c03 73 +PoleVault/v_PoleVault_g11_c05 67 +PommelHorse/v_PommelHorse_g22_c04 68 +ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c06 0 +HulaHoop/v_HulaHoop_g22_c02 42 +BlowingCandles/v_BlowingCandles_g18_c04 13 +PullUps/v_PullUps_g15_c03 69 +Mixing/v_Mixing_g15_c01 53 +PlayingFlute/v_PlayingFlute_g23_c02 61 +IceDancing/v_IceDancing_g20_c04 43 +PlayingPiano/v_PlayingPiano_g16_c06 63 +YoYo/v_YoYo_g17_c06 100 +PlayingDaf/v_PlayingDaf_g11_c01 59 +RockClimbingIndoor/v_RockClimbingIndoor_g14_c03 73 +VolleyballSpiking/v_VolleyballSpiking_g15_c01 96 +PlayingCello/v_PlayingCello_g11_c01 58 +RopeClimbing/v_RopeClimbing_g21_c01 74 +Drumming/v_Drumming_g23_c02 26 +SkyDiving/v_SkyDiving_g25_c02 82 +WallPushups/v_WallPushups_g17_c01 98 +PlayingPiano/v_PlayingPiano_g11_c03 63 +PlayingTabla/v_PlayingTabla_g22_c03 65 +PizzaTossing/v_PizzaTossing_g20_c04 57 +Biking/v_Biking_g24_c03 10 +StillRings/v_StillRings_g24_c04 85 +YoYo/v_YoYo_g13_c02 100 +TableTennisShot/v_TableTennisShot_g09_c01 89 +Billiards/v_Billiards_g12_c06 11 +YoYo/v_YoYo_g10_c04 100 +ThrowDiscus/v_ThrowDiscus_g16_c04 92 +TaiChi/v_TaiChi_g13_c01 90 +Skiing/v_Skiing_g18_c02 80 +SalsaSpin/v_SalsaSpin_g20_c03 76 +HulaHoop/v_HulaHoop_g15_c03 42 +ParallelBars/v_ParallelBars_g08_c05 56 +Lunges/v_Lunges_g18_c04 51 +FloorGymnastics/v_FloorGymnastics_g12_c05 29 +PlayingViolin/v_PlayingViolin_g22_c02 66 +Biking/v_Biking_g15_c04 10 +PlayingGuitar/v_PlayingGuitar_g20_c01 62 +BalanceBeam/v_BalanceBeam_g21_c01 4 +FrontCrawl/v_FrontCrawl_g10_c04 31 +TrampolineJumping/v_TrampolineJumping_g09_c05 93 +LongJump/v_LongJump_g17_c04 50 +VolleyballSpiking/v_VolleyballSpiking_g12_c04 96 +PlayingFlute/v_PlayingFlute_g09_c03 61 +BodyWeightSquats/v_BodyWeightSquats_g09_c05 14 +Rowing/v_Rowing_g14_c05 75 +MoppingFloor/v_MoppingFloor_g13_c03 54 +HeadMassage/v_HeadMassage_g22_c07 38 +TaiChi/v_TaiChi_g14_c02 90 +BodyWeightSquats/v_BodyWeightSquats_g18_c04 14 +Billiards/v_Billiards_g13_c07 11 +Swing/v_Swing_g12_c03 88 +YoYo/v_YoYo_g09_c03 100 +PlayingPiano/v_PlayingPiano_g18_c02 63 +PlayingDhol/v_PlayingDhol_g23_c05 60 +BoxingSpeedBag/v_BoxingSpeedBag_g14_c01 17 +Billiards/v_Billiards_g14_c07 11 +MilitaryParade/v_MilitaryParade_g25_c04 52 +PlayingViolin/v_PlayingViolin_g14_c04 66 +HorseRiding/v_HorseRiding_g10_c03 41 +CricketBowling/v_CricketBowling_g15_c03 22 +PlayingSitar/v_PlayingSitar_g24_c01 64 +CuttingInKitchen/v_CuttingInKitchen_g21_c01 24 +IceDancing/v_IceDancing_g18_c03 43 +LongJump/v_LongJump_g10_c04 50 +Skiing/v_Skiing_g21_c03 80 +HeadMassage/v_HeadMassage_g19_c04 38 +HorseRace/v_HorseRace_g22_c01 40 +SoccerJuggling/v_SoccerJuggling_g09_c04 83 +BalanceBeam/v_BalanceBeam_g23_c03 4 +TaiChi/v_TaiChi_g09_c03 90 +JumpingJack/v_JumpingJack_g13_c03 46 +PushUps/v_PushUps_g12_c02 71 +FieldHockeyPenalty/v_FieldHockeyPenalty_g14_c04 28 +Skiing/v_Skiing_g24_c05 80 +Nunchucks/v_Nunchucks_g23_c04 55 +Haircut/v_Haircut_g16_c02 33 +ThrowDiscus/v_ThrowDiscus_g21_c05 92 +JavelinThrow/v_JavelinThrow_g24_c02 44 +BoxingSpeedBag/v_BoxingSpeedBag_g25_c04 17 +TaiChi/v_TaiChi_g24_c01 90 +SumoWrestling/v_SumoWrestling_g12_c03 86 +Kayaking/v_Kayaking_g12_c02 48 +CricketShot/v_CricketShot_g19_c05 23 +HeadMassage/v_HeadMassage_g12_c07 38 +JumpRope/v_JumpRope_g19_c01 47 +TennisSwing/v_TennisSwing_g08_c04 91 +Basketball/v_Basketball_g13_c02 7 +JumpRope/v_JumpRope_g11_c03 47 +BenchPress/v_BenchPress_g23_c02 9 +PoleVault/v_PoleVault_g19_c02 67 +BasketballDunk/v_BasketballDunk_g24_c03 8 +HorseRace/v_HorseRace_g24_c04 40 +SoccerJuggling/v_SoccerJuggling_g23_c01 83 +SkyDiving/v_SkyDiving_g21_c03 82 +JavelinThrow/v_JavelinThrow_g20_c02 44 +BenchPress/v_BenchPress_g13_c07 9 +Diving/v_Diving_g13_c05 25 +Knitting/v_Knitting_g16_c05 49 +Archery/v_Archery_g15_c03 2 +BrushingTeeth/v_BrushingTeeth_g22_c04 19 +HandstandPushups/v_HandStandPushups_g14_c01 36 +BenchPress/v_BenchPress_g08_c07 9 +VolleyballSpiking/v_VolleyballSpiking_g11_c02 96 +SkateBoarding/v_SkateBoarding_g10_c06 79 +PullUps/v_PullUps_g24_c01 69 +SoccerJuggling/v_SoccerJuggling_g21_c04 83 +Bowling/v_Bowling_g20_c07 15 +Drumming/v_Drumming_g13_c03 26 +BandMarching/v_BandMarching_g18_c06 5 +ShavingBeard/v_ShavingBeard_g09_c04 77 +HammerThrow/v_HammerThrow_g23_c06 35 +ThrowDiscus/v_ThrowDiscus_g18_c01 92 +BandMarching/v_BandMarching_g21_c02 5 +BasketballDunk/v_BasketballDunk_g14_c04 8 +FieldHockeyPenalty/v_FieldHockeyPenalty_g24_c03 28 +HammerThrow/v_HammerThrow_g21_c02 35 +CricketBowling/v_CricketBowling_g24_c02 22 +TaiChi/v_TaiChi_g20_c01 90 +ParallelBars/v_ParallelBars_g15_c04 56 +Haircut/v_Haircut_g23_c01 33 +SumoWrestling/v_SumoWrestling_g19_c01 86 +BalanceBeam/v_BalanceBeam_g17_c01 4 +Lunges/v_Lunges_g13_c04 51 +RopeClimbing/v_RopeClimbing_g25_c05 74 +ParallelBars/v_ParallelBars_g08_c04 56 +BasketballDunk/v_BasketballDunk_g25_c03 8 +FloorGymnastics/v_FloorGymnastics_g18_c05 29 +PoleVault/v_PoleVault_g25_c04 67 +Biking/v_Biking_g08_c02 10 +StillRings/v_StillRings_g09_c04 85 +VolleyballSpiking/v_VolleyballSpiking_g21_c01 96 +Nunchucks/v_Nunchucks_g23_c02 55 +TaiChi/v_TaiChi_g12_c02 90 +Shotput/v_Shotput_g23_c02 78 +FrontCrawl/v_FrontCrawl_g18_c07 31 +UnevenBars/v_UnevenBars_g14_c04 95 +BenchPress/v_BenchPress_g14_c05 9 +Diving/v_Diving_g21_c06 25 +RopeClimbing/v_RopeClimbing_g13_c05 74 +PullUps/v_PullUps_g17_c04 69 +HandstandPushups/v_HandStandPushups_g22_c01 36 +PlayingCello/v_PlayingCello_g25_c02 58 +Kayaking/v_Kayaking_g24_c02 48 +SkateBoarding/v_SkateBoarding_g25_c01 79 +JumpRope/v_JumpRope_g15_c02 47 +IceDancing/v_IceDancing_g16_c01 43 +WallPushups/v_WallPushups_g10_c03 98 +TaiChi/v_TaiChi_g13_c04 90 +WallPushups/v_WallPushups_g20_c06 98 +LongJump/v_LongJump_g18_c02 50 +WritingOnBoard/v_WritingOnBoard_g20_c02 99 +PlayingDhol/v_PlayingDhol_g23_c07 60 +IceDancing/v_IceDancing_g12_c05 43 +Billiards/v_Billiards_g24_c02 11 +Bowling/v_Bowling_g15_c01 15 +PlayingCello/v_PlayingCello_g11_c05 58 +Nunchucks/v_Nunchucks_g13_c04 55 +WalkingWithDog/v_WalkingWithDog_g20_c06 97 +BaseballPitch/v_BaseballPitch_g24_c07 6 +FrontCrawl/v_FrontCrawl_g13_c01 31 +Basketball/v_Basketball_g21_c01 7 +BoxingSpeedBag/v_BoxingSpeedBag_g22_c01 17 +SoccerPenalty/v_SoccerPenalty_g17_c03 84 +WritingOnBoard/v_WritingOnBoard_g24_c03 99 +BrushingTeeth/v_BrushingTeeth_g08_c03 19 +BlowingCandles/v_BlowingCandles_g17_c01 13 +HorseRiding/v_HorseRiding_g11_c04 41 +Skiing/v_Skiing_g13_c06 80 +JugglingBalls/v_JugglingBalls_g18_c02 45 +PoleVault/v_PoleVault_g14_c07 67 +ApplyEyeMakeup/v_ApplyEyeMakeup_g10_c01 0 +Drumming/v_Drumming_g12_c06 26 +CricketBowling/v_CricketBowling_g23_c04 22 +PoleVault/v_PoleVault_g10_c02 67 +Biking/v_Biking_g16_c01 10 +Rowing/v_Rowing_g14_c01 75 +PizzaTossing/v_PizzaTossing_g21_c01 57 +Typing/v_Typing_g15_c03 94 +PlayingPiano/v_PlayingPiano_g23_c04 63 +SkateBoarding/v_SkateBoarding_g17_c02 79 +PlayingCello/v_PlayingCello_g11_c06 58 +HorseRace/v_HorseRace_g23_c02 40 +BandMarching/v_BandMarching_g25_c04 5 +BasketballDunk/v_BasketballDunk_g10_c03 8 +PlayingDhol/v_PlayingDhol_g23_c03 60 +BandMarching/v_BandMarching_g21_c04 5 +BenchPress/v_BenchPress_g19_c05 9 +TennisSwing/v_TennisSwing_g11_c03 91 +BrushingTeeth/v_BrushingTeeth_g18_c05 19 +Typing/v_Typing_g12_c01 94 +MoppingFloor/v_MoppingFloor_g17_c06 54 +PlayingSitar/v_PlayingSitar_g08_c04 64 +WallPushups/v_WallPushups_g16_c07 98 +MilitaryParade/v_MilitaryParade_g14_c04 52 +UnevenBars/v_UnevenBars_g12_c04 95 +ShavingBeard/v_ShavingBeard_g23_c02 77 +TrampolineJumping/v_TrampolineJumping_g16_c06 93 +ThrowDiscus/v_ThrowDiscus_g12_c06 92 +BabyCrawling/v_BabyCrawling_g23_c02 3 +ApplyEyeMakeup/v_ApplyEyeMakeup_g23_c02 0 +Knitting/v_Knitting_g22_c04 49 +TrampolineJumping/v_TrampolineJumping_g12_c04 93 +BalanceBeam/v_BalanceBeam_g21_c05 4 +Billiards/v_Billiards_g22_c05 11 +Bowling/v_Bowling_g19_c05 15 +Skiing/v_Skiing_g13_c01 80 +HeadMassage/v_HeadMassage_g23_c05 38 +Shotput/v_Shotput_g13_c03 78 +CricketShot/v_CricketShot_g13_c03 23 +WallPushups/v_WallPushups_g15_c04 98 +ParallelBars/v_ParallelBars_g14_c02 56 +Hammering/v_Hammering_g17_c04 34 +ParallelBars/v_ParallelBars_g09_c01 56 +RopeClimbing/v_RopeClimbing_g14_c03 74 +CricketShot/v_CricketShot_g12_c01 23 +PlayingTabla/v_PlayingTabla_g25_c05 65 +Punch/v_Punch_g11_c03 70 +CuttingInKitchen/v_CuttingInKitchen_g14_c02 24 +CricketShot/v_CricketShot_g13_c05 23 +Surfing/v_Surfing_g12_c07 87 +WalkingWithDog/v_WalkingWithDog_g10_c01 97 +FrisbeeCatch/v_FrisbeeCatch_g17_c04 30 +Basketball/v_Basketball_g21_c04 7 +BandMarching/v_BandMarching_g15_c07 5 +HorseRace/v_HorseRace_g16_c03 40 +BreastStroke/v_BreastStroke_g10_c04 18 +LongJump/v_LongJump_g14_c03 50 +IceDancing/v_IceDancing_g23_c01 43 +FrontCrawl/v_FrontCrawl_g14_c06 31 +Billiards/v_Billiards_g24_c05 11 +RopeClimbing/v_RopeClimbing_g18_c01 74 +VolleyballSpiking/v_VolleyballSpiking_g24_c01 96 +Drumming/v_Drumming_g16_c02 26 +TrampolineJumping/v_TrampolineJumping_g11_c01 93 +Hammering/v_Hammering_g18_c07 34 +JumpRope/v_JumpRope_g12_c02 47 +PlayingTabla/v_PlayingTabla_g19_c02 65 +HorseRace/v_HorseRace_g08_c01 40 +BaseballPitch/v_BaseballPitch_g23_c05 6 +TrampolineJumping/v_TrampolineJumping_g20_c04 93 +BodyWeightSquats/v_BodyWeightSquats_g24_c01 14 +Drumming/v_Drumming_g19_c07 26 +TableTennisShot/v_TableTennisShot_g16_c05 89 +StillRings/v_StillRings_g21_c06 85 +SoccerPenalty/v_SoccerPenalty_g16_c04 84 +PlayingPiano/v_PlayingPiano_g19_c03 63 +JumpRope/v_JumpRope_g15_c05 47 +WritingOnBoard/v_WritingOnBoard_g18_c06 99 +SoccerJuggling/v_SoccerJuggling_g17_c05 83 +PlayingDaf/v_PlayingDaf_g08_c03 59 +HorseRace/v_HorseRace_g11_c05 40 +WritingOnBoard/v_WritingOnBoard_g18_c04 99 +Archery/v_Archery_g08_c01 2 +FrontCrawl/v_FrontCrawl_g08_c01 31 +Hammering/v_Hammering_g15_c05 34 +BabyCrawling/v_BabyCrawling_g16_c02 3 +JavelinThrow/v_JavelinThrow_g17_c03 44 +BandMarching/v_BandMarching_g11_c01 5 +PushUps/v_PushUps_g11_c02 71 +Nunchucks/v_Nunchucks_g08_c01 55 +JavelinThrow/v_JavelinThrow_g24_c06 44 +HandstandWalking/v_HandstandWalking_g13_c04 37 +PullUps/v_PullUps_g13_c03 69 +ApplyEyeMakeup/v_ApplyEyeMakeup_g20_c06 0 +PlayingPiano/v_PlayingPiano_g08_c01 63 +HulaHoop/v_HulaHoop_g25_c03 42 +Billiards/v_Billiards_g18_c07 11 +PlayingTabla/v_PlayingTabla_g15_c03 65 +Kayaking/v_Kayaking_g21_c06 48 +CricketShot/v_CricketShot_g15_c03 23 +LongJump/v_LongJump_g25_c01 50 +CliffDiving/v_CliffDiving_g19_c03 21 +Surfing/v_Surfing_g19_c02 87 +MilitaryParade/v_MilitaryParade_g11_c04 52 +HulaHoop/v_HulaHoop_g20_c03 42 +WallPushups/v_WallPushups_g17_c06 98 +Haircut/v_Haircut_g19_c06 33 +PlayingCello/v_PlayingCello_g14_c02 58 +BenchPress/v_BenchPress_g21_c02 9 +Basketball/v_Basketball_g25_c05 7 +PlayingGuitar/v_PlayingGuitar_g22_c03 62 +WalkingWithDog/v_WalkingWithDog_g21_c01 97 +JumpingJack/v_JumpingJack_g17_c03 46 +BlowingCandles/v_BlowingCandles_g14_c01 13 +Punch/v_Punch_g08_c03 70 +Haircut/v_Haircut_g17_c01 33 +PoleVault/v_PoleVault_g23_c02 67 +CricketShot/v_CricketShot_g21_c02 23 +HeadMassage/v_HeadMassage_g15_c06 38 +BoxingPunchingBag/v_BoxingPunchingBag_g15_c01 16 +Archery/v_Archery_g23_c02 2 +StillRings/v_StillRings_g08_c02 85 +SoccerJuggling/v_SoccerJuggling_g17_c06 83 +CliffDiving/v_CliffDiving_g13_c01 21 +Billiards/v_Billiards_g23_c04 11 +Mixing/v_Mixing_g25_c03 53 +Surfing/v_Surfing_g15_c07 87 +Rowing/v_Rowing_g09_c04 75 +ApplyLipstick/v_ApplyLipstick_g21_c02 1 +BoxingSpeedBag/v_BoxingSpeedBag_g13_c04 17 +Skiing/v_Skiing_g09_c01 80 +WritingOnBoard/v_WritingOnBoard_g13_c01 99 +Hammering/v_Hammering_g12_c05 34 +TennisSwing/v_TennisSwing_g22_c04 91 +HorseRiding/v_HorseRiding_g10_c07 41 +Bowling/v_Bowling_g11_c01 15 +Biking/v_Biking_g23_c01 10 +PlayingDhol/v_PlayingDhol_g12_c03 60 +Diving/v_Diving_g20_c02 25 +HulaHoop/v_HulaHoop_g10_c01 42 +CuttingInKitchen/v_CuttingInKitchen_g10_c04 24 +HandstandWalking/v_HandstandWalking_g23_c04 37 +BrushingTeeth/v_BrushingTeeth_g15_c04 19 +TaiChi/v_TaiChi_g24_c03 90 +Swing/v_Swing_g21_c03 88 +HandstandWalking/v_HandstandWalking_g25_c02 37 +Punch/v_Punch_g19_c03 70 +BlowDryHair/v_BlowDryHair_g25_c04 12 +Skiing/v_Skiing_g22_c05 80 +PommelHorse/v_PommelHorse_g13_c03 68 +TennisSwing/v_TennisSwing_g14_c03 91 +Hammering/v_Hammering_g10_c01 34 +SoccerJuggling/v_SoccerJuggling_g15_c03 83 +Rowing/v_Rowing_g09_c03 75 +SkateBoarding/v_SkateBoarding_g11_c05 79 +HandstandWalking/v_HandstandWalking_g08_c02 37 +PullUps/v_PullUps_g22_c03 69 +Archery/v_Archery_g11_c05 2 +BabyCrawling/v_BabyCrawling_g13_c01 3 +PlayingCello/v_PlayingCello_g09_c04 58 +VolleyballSpiking/v_VolleyballSpiking_g15_c03 96 +SkyDiving/v_SkyDiving_g11_c04 82 +Mixing/v_Mixing_g19_c03 53 +HighJump/v_HighJump_g09_c03 39 +Punch/v_Punch_g18_c02 70 +HammerThrow/v_HammerThrow_g12_c07 35 +BalanceBeam/v_BalanceBeam_g25_c03 4 +JugglingBalls/v_JugglingBalls_g22_c04 45 +PlayingGuitar/v_PlayingGuitar_g09_c01 62 +Surfing/v_Surfing_g13_c05 87 +Diving/v_Diving_g10_c02 25 +Basketball/v_Basketball_g19_c07 7 +HulaHoop/v_HulaHoop_g16_c04 42 +HighJump/v_HighJump_g25_c01 39 +TableTennisShot/v_TableTennisShot_g13_c02 89 +BrushingTeeth/v_BrushingTeeth_g20_c02 19 +BandMarching/v_BandMarching_g08_c01 5 +JumpingJack/v_JumpingJack_g22_c01 46 +Hammering/v_Hammering_g17_c07 34 +HorseRiding/v_HorseRiding_g15_c01 41 +CricketBowling/v_CricketBowling_g19_c07 22 +ApplyLipstick/v_ApplyLipstick_g17_c03 1 +GolfSwing/v_GolfSwing_g21_c01 32 +TrampolineJumping/v_TrampolineJumping_g11_c04 93 +ApplyEyeMakeup/v_ApplyEyeMakeup_g15_c06 0 +RockClimbingIndoor/v_RockClimbingIndoor_g24_c03 73 +GolfSwing/v_GolfSwing_g12_c03 32 +BasketballDunk/v_BasketballDunk_g11_c05 8 +StillRings/v_StillRings_g15_c03 85 +Hammering/v_Hammering_g23_c04 34 +Kayaking/v_Kayaking_g24_c01 48 +TaiChi/v_TaiChi_g15_c04 90 +IceDancing/v_IceDancing_g09_c06 43 +BasketballDunk/v_BasketballDunk_g18_c01 8 +CuttingInKitchen/v_CuttingInKitchen_g14_c01 24 +BaseballPitch/v_BaseballPitch_g18_c07 6 +PlayingDhol/v_PlayingDhol_g18_c03 60 +CliffDiving/v_CliffDiving_g18_c06 21 +HeadMassage/v_HeadMassage_g09_c01 38 +BalanceBeam/v_BalanceBeam_g20_c04 4 +JumpRope/v_JumpRope_g10_c01 47 +ShavingBeard/v_ShavingBeard_g15_c07 77 +FrisbeeCatch/v_FrisbeeCatch_g19_c01 30 +Drumming/v_Drumming_g18_c02 26 +MilitaryParade/v_MilitaryParade_g12_c05 52 +Rafting/v_Rafting_g08_c01 72 +TrampolineJumping/v_TrampolineJumping_g10_c06 93 +JugglingBalls/v_JugglingBalls_g09_c02 45 +Drumming/v_Drumming_g11_c04 26 +Hammering/v_Hammering_g18_c06 34 +VolleyballSpiking/v_VolleyballSpiking_g20_c03 96 +PoleVault/v_PoleVault_g11_c02 67 +GolfSwing/v_GolfSwing_g11_c05 32 +IceDancing/v_IceDancing_g13_c05 43 +Biking/v_Biking_g14_c03 10 +Drumming/v_Drumming_g18_c05 26 +WritingOnBoard/v_WritingOnBoard_g14_c03 99 +PullUps/v_PullUps_g13_c01 69 +BaseballPitch/v_BaseballPitch_g17_c06 6 +SumoWrestling/v_SumoWrestling_g25_c01 86 +Swing/v_Swing_g20_c01 88 +JavelinThrow/v_JavelinThrow_g23_c02 44 +PlayingGuitar/v_PlayingGuitar_g10_c05 62 +Hammering/v_Hammering_g08_c03 34 +MoppingFloor/v_MoppingFloor_g18_c01 54 +Biking/v_Biking_g16_c02 10 +WritingOnBoard/v_WritingOnBoard_g12_c04 99 +Typing/v_Typing_g09_c04 94 +WallPushups/v_WallPushups_g15_c01 98 +Basketball/v_Basketball_g18_c03 7 +PoleVault/v_PoleVault_g21_c03 67 +PlayingFlute/v_PlayingFlute_g25_c01 61 +HorseRiding/v_HorseRiding_g19_c05 41 +Hammering/v_Hammering_g09_c03 34 +BreastStroke/v_BreastStroke_g18_c01 18 +BandMarching/v_BandMarching_g24_c04 5 +BenchPress/v_BenchPress_g12_c05 9 +BenchPress/v_BenchPress_g13_c01 9 +TableTennisShot/v_TableTennisShot_g09_c04 89 +PizzaTossing/v_PizzaTossing_g19_c02 57 +Swing/v_Swing_g13_c02 88 +SoccerPenalty/v_SoccerPenalty_g24_c06 84 +PlayingSitar/v_PlayingSitar_g22_c04 64 +RockClimbingIndoor/v_RockClimbingIndoor_g22_c01 73 +Rowing/v_Rowing_g08_c01 75 +PoleVault/v_PoleVault_g14_c01 67 +PullUps/v_PullUps_g14_c04 69 +BenchPress/v_BenchPress_g15_c02 9 +ParallelBars/v_ParallelBars_g18_c02 56 +BlowingCandles/v_BlowingCandles_g19_c02 13 +BasketballDunk/v_BasketballDunk_g14_c06 8 +BodyWeightSquats/v_BodyWeightSquats_g10_c02 14 +BoxingSpeedBag/v_BoxingSpeedBag_g13_c02 17 +BenchPress/v_BenchPress_g09_c04 9 +IceDancing/v_IceDancing_g13_c04 43 +ShavingBeard/v_ShavingBeard_g13_c01 77 +PlayingPiano/v_PlayingPiano_g16_c05 63 +CleanAndJerk/v_CleanAndJerk_g25_c01 20 +StillRings/v_StillRings_g16_c01 85 +PlayingDaf/v_PlayingDaf_g14_c01 59 +Shotput/v_Shotput_g08_c03 78 +FloorGymnastics/v_FloorGymnastics_g08_c01 29 +Swing/v_Swing_g18_c01 88 +SalsaSpin/v_SalsaSpin_g22_c03 76 +IceDancing/v_IceDancing_g25_c05 43 +BandMarching/v_BandMarching_g20_c02 5 +PlayingViolin/v_PlayingViolin_g21_c04 66 +PlayingGuitar/v_PlayingGuitar_g23_c05 62 +PlayingDaf/v_PlayingDaf_g16_c04 59 +MoppingFloor/v_MoppingFloor_g18_c05 54 +BoxingSpeedBag/v_BoxingSpeedBag_g08_c04 17 +BoxingPunchingBag/v_BoxingPunchingBag_g18_c02 16 +Nunchucks/v_Nunchucks_g17_c01 55 +Lunges/v_Lunges_g15_c03 51 +Biking/v_Biking_g13_c05 10 +Archery/v_Archery_g23_c01 2 +BoxingPunchingBag/v_BoxingPunchingBag_g22_c06 16 +TableTennisShot/v_TableTennisShot_g19_c04 89 +Nunchucks/v_Nunchucks_g21_c01 55 +Diving/v_Diving_g17_c04 25 +Fencing/v_Fencing_g13_c04 27 +SoccerJuggling/v_SoccerJuggling_g19_c03 83 +Kayaking/v_Kayaking_g14_c04 48 +HeadMassage/v_HeadMassage_g22_c04 38 +HeadMassage/v_HeadMassage_g09_c03 38 +WallPushups/v_WallPushups_g25_c03 98 +Skiing/v_Skiing_g24_c03 80 +SalsaSpin/v_SalsaSpin_g18_c03 76 +Lunges/v_Lunges_g23_c03 51 +Typing/v_Typing_g21_c01 94 +CleanAndJerk/v_CleanAndJerk_g17_c04 20 +Punch/v_Punch_g17_c03 70 +PlayingCello/v_PlayingCello_g18_c02 58 +Bowling/v_Bowling_g13_c01 15 +SoccerJuggling/v_SoccerJuggling_g10_c02 83 +YoYo/v_YoYo_g13_c01 100 +CuttingInKitchen/v_CuttingInKitchen_g13_c03 24 +Fencing/v_Fencing_g22_c01 27 +BrushingTeeth/v_BrushingTeeth_g15_c02 19 +SkyDiving/v_SkyDiving_g08_c04 82 +Hammering/v_Hammering_g13_c01 34 +PlayingTabla/v_PlayingTabla_g16_c05 65 +LongJump/v_LongJump_g20_c01 50 +PlayingGuitar/v_PlayingGuitar_g25_c06 62 +BenchPress/v_BenchPress_g22_c02 9 +HammerThrow/v_HammerThrow_g13_c02 35 +ParallelBars/v_ParallelBars_g22_c03 56 +SoccerPenalty/v_SoccerPenalty_g13_c02 84 +SoccerJuggling/v_SoccerJuggling_g16_c02 83 +WallPushups/v_WallPushups_g12_c02 98 +BoxingPunchingBag/v_BoxingPunchingBag_g10_c04 16 +HammerThrow/v_HammerThrow_g19_c03 35 +ApplyEyeMakeup/v_ApplyEyeMakeup_g12_c05 0 +HammerThrow/v_HammerThrow_g24_c01 35 +Knitting/v_Knitting_g24_c03 49 +TaiChi/v_TaiChi_g24_c02 90 +CliffDiving/v_CliffDiving_g25_c04 21 +SoccerJuggling/v_SoccerJuggling_g12_c02 83 +YoYo/v_YoYo_g15_c02 100 +BrushingTeeth/v_BrushingTeeth_g08_c06 19 +ParallelBars/v_ParallelBars_g19_c01 56 +PommelHorse/v_PommelHorse_g08_c01 68 +ShavingBeard/v_ShavingBeard_g18_c04 77 +PlayingPiano/v_PlayingPiano_g08_c03 63 +SoccerJuggling/v_SoccerJuggling_g14_c07 83 +TableTennisShot/v_TableTennisShot_g11_c05 89 +PlayingFlute/v_PlayingFlute_g19_c05 61 +PullUps/v_PullUps_g19_c04 69 +BaseballPitch/v_BaseballPitch_g18_c05 6 +PlayingTabla/v_PlayingTabla_g23_c01 65 +Diving/v_Diving_g10_c07 25 +SumoWrestling/v_SumoWrestling_g17_c03 86 +FieldHockeyPenalty/v_FieldHockeyPenalty_g09_c03 28 +PlayingTabla/v_PlayingTabla_g20_c02 65 +Punch/v_Punch_g13_c02 70 +Mixing/v_Mixing_g16_c01 53 +GolfSwing/v_GolfSwing_g22_c05 32 +Basketball/v_Basketball_g25_c01 7 +Basketball/v_Basketball_g09_c05 7 +Drumming/v_Drumming_g08_c02 26 +Kayaking/v_Kayaking_g09_c02 48 +PlayingGuitar/v_PlayingGuitar_g16_c07 62 +Nunchucks/v_Nunchucks_g08_c06 55 +YoYo/v_YoYo_g08_c04 100 +YoYo/v_YoYo_g25_c04 100 +Knitting/v_Knitting_g14_c03 49 +PlayingFlute/v_PlayingFlute_g22_c04 61 +PlayingPiano/v_PlayingPiano_g22_c04 63 +SumoWrestling/v_SumoWrestling_g10_c02 86 +Rowing/v_Rowing_g21_c03 75 +TennisSwing/v_TennisSwing_g22_c03 91 +HandstandPushups/v_HandStandPushups_g18_c03 36 +WritingOnBoard/v_WritingOnBoard_g17_c02 99 +IceDancing/v_IceDancing_g20_c07 43 +HammerThrow/v_HammerThrow_g10_c07 35 +Typing/v_Typing_g25_c02 94 +Archery/v_Archery_g17_c04 2 +FloorGymnastics/v_FloorGymnastics_g12_c01 29 +HammerThrow/v_HammerThrow_g20_c05 35 +BoxingPunchingBag/v_BoxingPunchingBag_g21_c06 16 +MoppingFloor/v_MoppingFloor_g15_c03 54 +JumpRope/v_JumpRope_g13_c02 47 +ApplyEyeMakeup/v_ApplyEyeMakeup_g24_c03 0 +Hammering/v_Hammering_g20_c01 34 +RopeClimbing/v_RopeClimbing_g16_c04 74 +PlayingCello/v_PlayingCello_g12_c01 58 +Skiing/v_Skiing_g11_c02 80 +PlayingGuitar/v_PlayingGuitar_g24_c02 62 +FloorGymnastics/v_FloorGymnastics_g25_c01 29 +YoYo/v_YoYo_g10_c02 100 +BreastStroke/v_BreastStroke_g09_c02 18 +VolleyballSpiking/v_VolleyballSpiking_g09_c02 96 +PlayingViolin/v_PlayingViolin_g16_c01 66 +BenchPress/v_BenchPress_g08_c05 9 +Bowling/v_Bowling_g18_c04 15 +WritingOnBoard/v_WritingOnBoard_g24_c06 99 +JugglingBalls/v_JugglingBalls_g10_c04 45 +ShavingBeard/v_ShavingBeard_g16_c04 77 +BlowDryHair/v_BlowDryHair_g23_c03 12 +HandstandPushups/v_HandStandPushups_g23_c01 36 +PizzaTossing/v_PizzaTossing_g24_c04 57 +SkateBoarding/v_SkateBoarding_g08_c03 79 +Shotput/v_Shotput_g10_c02 78 +ThrowDiscus/v_ThrowDiscus_g10_c02 92 +CricketBowling/v_CricketBowling_g25_c02 22 +GolfSwing/v_GolfSwing_g19_c05 32 +CricketBowling/v_CricketBowling_g14_c05 22 +PullUps/v_PullUps_g25_c04 69 +VolleyballSpiking/v_VolleyballSpiking_g15_c02 96 +MilitaryParade/v_MilitaryParade_g10_c04 52 +BreastStroke/v_BreastStroke_g14_c02 18 +Diving/v_Diving_g17_c02 25 +BrushingTeeth/v_BrushingTeeth_g24_c01 19 +HandstandPushups/v_HandStandPushups_g11_c05 36 +WalkingWithDog/v_WalkingWithDog_g20_c05 97 +JavelinThrow/v_JavelinThrow_g15_c05 44 +Mixing/v_Mixing_g17_c02 53 +BabyCrawling/v_BabyCrawling_g09_c02 3 +TennisSwing/v_TennisSwing_g23_c06 91 +BandMarching/v_BandMarching_g08_c06 5 +HeadMassage/v_HeadMassage_g22_c01 38 +SalsaSpin/v_SalsaSpin_g09_c04 76 +BenchPress/v_BenchPress_g16_c01 9 +Nunchucks/v_Nunchucks_g19_c01 55 +TennisSwing/v_TennisSwing_g13_c02 91 +Knitting/v_Knitting_g13_c04 49 +Swing/v_Swing_g08_c02 88 +PushUps/v_PushUps_g18_c01 71 +HorseRace/v_HorseRace_g25_c04 40 +HorseRace/v_HorseRace_g13_c01 40 +HulaHoop/v_HulaHoop_g20_c01 42 +StillRings/v_StillRings_g24_c03 85 +FloorGymnastics/v_FloorGymnastics_g15_c02 29 +Biking/v_Biking_g21_c01 10 +RopeClimbing/v_RopeClimbing_g14_c04 74 +Swing/v_Swing_g14_c02 88 +HorseRiding/v_HorseRiding_g15_c02 41 +PlayingTabla/v_PlayingTabla_g24_c01 65 +Bowling/v_Bowling_g22_c01 15 +Knitting/v_Knitting_g11_c01 49 +BasketballDunk/v_BasketballDunk_g21_c01 8 +PoleVault/v_PoleVault_g21_c01 67 +ShavingBeard/v_ShavingBeard_g15_c06 77 +MoppingFloor/v_MoppingFloor_g18_c06 54 +ApplyLipstick/v_ApplyLipstick_g10_c03 1 +VolleyballSpiking/v_VolleyballSpiking_g20_c01 96 +Haircut/v_Haircut_g21_c01 33 +HighJump/v_HighJump_g23_c01 39 +CliffDiving/v_CliffDiving_g18_c01 21 +PizzaTossing/v_PizzaTossing_g19_c03 57 +YoYo/v_YoYo_g18_c01 100 +Skiing/v_Skiing_g16_c05 80 +Haircut/v_Haircut_g11_c04 33 +Billiards/v_Billiards_g21_c02 11 +BalanceBeam/v_BalanceBeam_g23_c05 4 +Archery/v_Archery_g18_c06 2 +Lunges/v_Lunges_g24_c04 51 +PushUps/v_PushUps_g15_c03 71 +BandMarching/v_BandMarching_g25_c03 5 +Drumming/v_Drumming_g14_c05 26 +PlayingGuitar/v_PlayingGuitar_g22_c01 62 +WritingOnBoard/v_WritingOnBoard_g14_c04 99 +Bowling/v_Bowling_g20_c02 15 +BaseballPitch/v_BaseballPitch_g10_c04 6 +Typing/v_Typing_g22_c04 94 +HammerThrow/v_HammerThrow_g15_c05 35 +SalsaSpin/v_SalsaSpin_g12_c03 76 +CuttingInKitchen/v_CuttingInKitchen_g23_c04 24 +PlayingTabla/v_PlayingTabla_g12_c02 65 +PoleVault/v_PoleVault_g25_c07 67 +TaiChi/v_TaiChi_g12_c03 90 +PlayingFlute/v_PlayingFlute_g11_c06 61 +SalsaSpin/v_SalsaSpin_g22_c01 76 +HorseRiding/v_HorseRiding_g12_c01 41 +Mixing/v_Mixing_g20_c04 53 +PushUps/v_PushUps_g09_c04 71 +PlayingFlute/v_PlayingFlute_g10_c03 61 +SoccerPenalty/v_SoccerPenalty_g15_c01 84 +Lunges/v_Lunges_g25_c02 51 +Punch/v_Punch_g17_c05 70 +FloorGymnastics/v_FloorGymnastics_g12_c04 29 +HighJump/v_HighJump_g10_c01 39 +BenchPress/v_BenchPress_g09_c05 9 +Hammering/v_Hammering_g22_c04 34 +Drumming/v_Drumming_g16_c04 26 +BreastStroke/v_BreastStroke_g15_c02 18 +BalanceBeam/v_BalanceBeam_g19_c01 4 +PlayingTabla/v_PlayingTabla_g13_c05 65 +BandMarching/v_BandMarching_g22_c03 5 +CricketBowling/v_CricketBowling_g24_c01 22 +MoppingFloor/v_MoppingFloor_g10_c01 54 +HorseRace/v_HorseRace_g08_c02 40 +Basketball/v_Basketball_g22_c03 7 +CricketBowling/v_CricketBowling_g23_c01 22 +Basketball/v_Basketball_g19_c02 7 +Billiards/v_Billiards_g21_c05 11 +FloorGymnastics/v_FloorGymnastics_g11_c04 29 +ShavingBeard/v_ShavingBeard_g12_c02 77 +Rafting/v_Rafting_g21_c04 72 +BasketballDunk/v_BasketballDunk_g09_c04 8 +FloorGymnastics/v_FloorGymnastics_g09_c06 29 +Biking/v_Biking_g18_c04 10 +WalkingWithDog/v_WalkingWithDog_g14_c01 97 +ShavingBeard/v_ShavingBeard_g22_c04 77 +HorseRiding/v_HorseRiding_g24_c01 41 +FieldHockeyPenalty/v_FieldHockeyPenalty_g20_c02 28 +SumoWrestling/v_SumoWrestling_g17_c02 86 +PommelHorse/v_PommelHorse_g09_c03 68 +PlayingDaf/v_PlayingDaf_g08_c02 59 +HammerThrow/v_HammerThrow_g10_c03 35 +Mixing/v_Mixing_g18_c02 53 +JugglingBalls/v_JugglingBalls_g16_c01 45 +LongJump/v_LongJump_g16_c03 50 +ApplyLipstick/v_ApplyLipstick_g20_c04 1 +BrushingTeeth/v_BrushingTeeth_g25_c05 19 +Skiing/v_Skiing_g21_c06 80 +ParallelBars/v_ParallelBars_g17_c02 56 +Surfing/v_Surfing_g16_c01 87 +LongJump/v_LongJump_g21_c02 50 +BenchPress/v_BenchPress_g24_c03 9 +FloorGymnastics/v_FloorGymnastics_g15_c01 29 +Basketball/v_Basketball_g22_c06 7 +Knitting/v_Knitting_g21_c04 49 +HandstandWalking/v_HandstandWalking_g19_c01 37 +JumpRope/v_JumpRope_g21_c02 47 +BoxingPunchingBag/v_BoxingPunchingBag_g18_c05 16 +ThrowDiscus/v_ThrowDiscus_g14_c03 92 +BabyCrawling/v_BabyCrawling_g18_c01 3 +BandMarching/v_BandMarching_g08_c03 5 +FieldHockeyPenalty/v_FieldHockeyPenalty_g18_c02 28 +HeadMassage/v_HeadMassage_g11_c05 38 +CricketShot/v_CricketShot_g21_c05 23 +BandMarching/v_BandMarching_g20_c06 5 +HandstandWalking/v_HandstandWalking_g09_c02 37 +BalanceBeam/v_BalanceBeam_g24_c03 4 +CricketBowling/v_CricketBowling_g22_c06 22 +BandMarching/v_BandMarching_g11_c04 5 +TableTennisShot/v_TableTennisShot_g10_c01 89 +JumpingJack/v_JumpingJack_g10_c02 46 +StillRings/v_StillRings_g13_c02 85 +BaseballPitch/v_BaseballPitch_g19_c05 6 +BasketballDunk/v_BasketballDunk_g20_c03 8 +GolfSwing/v_GolfSwing_g24_c06 32 +Kayaking/v_Kayaking_g20_c06 48 +Typing/v_Typing_g13_c02 94 +Lunges/v_Lunges_g18_c05 51 +BalanceBeam/v_BalanceBeam_g13_c03 4 +HeadMassage/v_HeadMassage_g16_c04 38 +RockClimbingIndoor/v_RockClimbingIndoor_g23_c02 73 +BandMarching/v_BandMarching_g22_c02 5 +ApplyEyeMakeup/v_ApplyEyeMakeup_g18_c01 0 +TrampolineJumping/v_TrampolineJumping_g08_c02 93 +HorseRace/v_HorseRace_g17_c04 40 +PlayingDhol/v_PlayingDhol_g24_c05 60 +TableTennisShot/v_TableTennisShot_g21_c02 89 +PlayingPiano/v_PlayingPiano_g10_c01 63 +CricketBowling/v_CricketBowling_g19_c03 22 +Bowling/v_Bowling_g13_c05 15 +PoleVault/v_PoleVault_g21_c06 67 +PullUps/v_PullUps_g12_c01 69 +Drumming/v_Drumming_g15_c07 26 +IceDancing/v_IceDancing_g23_c03 43 +FloorGymnastics/v_FloorGymnastics_g14_c01 29 +Hammering/v_Hammering_g13_c04 34 +ThrowDiscus/v_ThrowDiscus_g25_c03 92 +HandstandWalking/v_HandstandWalking_g20_c02 37 +Basketball/v_Basketball_g14_c01 7 +CliffDiving/v_CliffDiving_g21_c01 21 +Archery/v_Archery_g15_c02 2 +BreastStroke/v_BreastStroke_g21_c04 18 +WalkingWithDog/v_WalkingWithDog_g12_c01 97 +PommelHorse/v_PommelHorse_g25_c02 68 +PlayingPiano/v_PlayingPiano_g17_c02 63 +Fencing/v_Fencing_g09_c05 27 +PlayingGuitar/v_PlayingGuitar_g23_c07 62 +Biking/v_Biking_g23_c04 10 +TaiChi/v_TaiChi_g16_c03 90 +JavelinThrow/v_JavelinThrow_g12_c04 44 +PlayingPiano/v_PlayingPiano_g16_c07 63 +PlayingSitar/v_PlayingSitar_g21_c07 64 +TableTennisShot/v_TableTennisShot_g21_c04 89 +Hammering/v_Hammering_g21_c04 34 +Swing/v_Swing_g16_c01 88 +BabyCrawling/v_BabyCrawling_g09_c01 3 +JugglingBalls/v_JugglingBalls_g14_c04 45 +CricketBowling/v_CricketBowling_g12_c01 22 +Drumming/v_Drumming_g22_c01 26 +Bowling/v_Bowling_g11_c04 15 +Drumming/v_Drumming_g09_c01 26 +CricketShot/v_CricketShot_g22_c02 23 +BodyWeightSquats/v_BodyWeightSquats_g12_c02 14 +SoccerJuggling/v_SoccerJuggling_g25_c06 83 +HandstandPushups/v_HandStandPushups_g12_c05 36 +HammerThrow/v_HammerThrow_g12_c03 35 +Bowling/v_Bowling_g08_c04 15 +WritingOnBoard/v_WritingOnBoard_g21_c07 99 +BenchPress/v_BenchPress_g13_c04 9 +IceDancing/v_IceDancing_g22_c04 43 +HorseRace/v_HorseRace_g18_c01 40 +PlayingDaf/v_PlayingDaf_g09_c01 59 +VolleyballSpiking/v_VolleyballSpiking_g11_c06 96 +JavelinThrow/v_JavelinThrow_g12_c01 44 +ApplyEyeMakeup/v_ApplyEyeMakeup_g08_c04 0 +Rafting/v_Rafting_g16_c02 72 +JugglingBalls/v_JugglingBalls_g14_c05 45 +SoccerPenalty/v_SoccerPenalty_g25_c01 84 +BabyCrawling/v_BabyCrawling_g21_c02 3 +BrushingTeeth/v_BrushingTeeth_g08_c04 19 +BabyCrawling/v_BabyCrawling_g10_c05 3 +WallPushups/v_WallPushups_g20_c02 98 +HandstandWalking/v_HandstandWalking_g12_c04 37 +VolleyballSpiking/v_VolleyballSpiking_g23_c01 96 +MoppingFloor/v_MoppingFloor_g18_c03 54 +ParallelBars/v_ParallelBars_g18_c04 56 +Kayaking/v_Kayaking_g24_c03 48 +Skijet/v_Skijet_g12_c02 81 +HorseRiding/v_HorseRiding_g09_c04 41 +SalsaSpin/v_SalsaSpin_g23_c03 76 +PlayingFlute/v_PlayingFlute_g16_c05 61 +SkateBoarding/v_SkateBoarding_g11_c02 79 +BlowDryHair/v_BlowDryHair_g17_c04 12 +HandstandPushups/v_HandStandPushups_g14_c03 36 +Haircut/v_Haircut_g09_c01 33 +PlayingCello/v_PlayingCello_g21_c04 58 +FrontCrawl/v_FrontCrawl_g20_c04 31 +CricketBowling/v_CricketBowling_g13_c03 22 +Haircut/v_Haircut_g19_c05 33 +PlayingDaf/v_PlayingDaf_g20_c02 59 +BlowDryHair/v_BlowDryHair_g23_c06 12 +Shotput/v_Shotput_g12_c01 78 +PlayingGuitar/v_PlayingGuitar_g25_c01 62 +TrampolineJumping/v_TrampolineJumping_g19_c01 93 +Biking/v_Biking_g21_c06 10 +CliffDiving/v_CliffDiving_g12_c01 21 +PlayingSitar/v_PlayingSitar_g18_c05 64 +TennisSwing/v_TennisSwing_g19_c05 91 +BodyWeightSquats/v_BodyWeightSquats_g11_c01 14 +BandMarching/v_BandMarching_g12_c01 5 +PullUps/v_PullUps_g09_c04 69 +Archery/v_Archery_g23_c03 2 +SalsaSpin/v_SalsaSpin_g12_c02 76 +FrisbeeCatch/v_FrisbeeCatch_g21_c05 30 +JugglingBalls/v_JugglingBalls_g18_c01 45 +SoccerPenalty/v_SoccerPenalty_g10_c03 84 +CricketBowling/v_CricketBowling_g19_c06 22 +PizzaTossing/v_PizzaTossing_g08_c04 57 +Billiards/v_Billiards_g17_c02 11 +BasketballDunk/v_BasketballDunk_g17_c06 8 +PlayingPiano/v_PlayingPiano_g16_c01 63 +BasketballDunk/v_BasketballDunk_g08_c01 8 +PlayingGuitar/v_PlayingGuitar_g13_c01 62 +PoleVault/v_PoleVault_g25_c05 67 +BoxingPunchingBag/v_BoxingPunchingBag_g21_c07 16 +Punch/v_Punch_g15_c06 70 +HammerThrow/v_HammerThrow_g11_c01 35 +CricketBowling/v_CricketBowling_g13_c05 22 +BlowDryHair/v_BlowDryHair_g13_c02 12 +Knitting/v_Knitting_g10_c01 49 +PushUps/v_PushUps_g23_c03 71 +CricketShot/v_CricketShot_g25_c02 23 +MilitaryParade/v_MilitaryParade_g18_c06 52 +Diving/v_Diving_g08_c01 25 +PlayingDaf/v_PlayingDaf_g18_c02 59 +MilitaryParade/v_MilitaryParade_g25_c05 52 +Surfing/v_Surfing_g22_c04 87 +Kayaking/v_Kayaking_g11_c01 48 +PlayingDhol/v_PlayingDhol_g14_c07 60 +BodyWeightSquats/v_BodyWeightSquats_g13_c01 14 +PlayingViolin/v_PlayingViolin_g19_c01 66 +BandMarching/v_BandMarching_g15_c05 5 +HammerThrow/v_HammerThrow_g16_c03 35 +TaiChi/v_TaiChi_g23_c02 90 +JumpingJack/v_JumpingJack_g21_c01 46 +PizzaTossing/v_PizzaTossing_g25_c02 57 +FrisbeeCatch/v_FrisbeeCatch_g08_c02 30 +HorseRiding/v_HorseRiding_g11_c01 41 +BlowDryHair/v_BlowDryHair_g15_c01 12 +SoccerPenalty/v_SoccerPenalty_g18_c02 84 +PushUps/v_PushUps_g08_c03 71 +Nunchucks/v_Nunchucks_g12_c02 55 +BandMarching/v_BandMarching_g18_c04 5 +BaseballPitch/v_BaseballPitch_g23_c02 6 +HammerThrow/v_HammerThrow_g25_c04 35 +Biking/v_Biking_g09_c02 10 +BalanceBeam/v_BalanceBeam_g11_c04 4 +PlayingDaf/v_PlayingDaf_g22_c02 59 +Bowling/v_Bowling_g13_c06 15 +HighJump/v_HighJump_g15_c03 39 +PlayingCello/v_PlayingCello_g23_c03 58 +BodyWeightSquats/v_BodyWeightSquats_g10_c04 14 +Swing/v_Swing_g24_c02 88 +Fencing/v_Fencing_g20_c03 27 +BlowDryHair/v_BlowDryHair_g16_c01 12 +PommelHorse/v_PommelHorse_g12_c04 68 +ThrowDiscus/v_ThrowDiscus_g11_c06 92 +PlayingFlute/v_PlayingFlute_g21_c06 61 +CliffDiving/v_CliffDiving_g08_c03 21 +Skiing/v_Skiing_g25_c05 80 +BalanceBeam/v_BalanceBeam_g23_c02 4 +PoleVault/v_PoleVault_g13_c04 67 +WalkingWithDog/v_WalkingWithDog_g11_c05 97 +Typing/v_Typing_g25_c03 94 +PullUps/v_PullUps_g15_c04 69 +Archery/v_Archery_g16_c04 2 +PlayingPiano/v_PlayingPiano_g15_c02 63 +SalsaSpin/v_SalsaSpin_g13_c04 76 +Shotput/v_Shotput_g22_c05 78 +CricketBowling/v_CricketBowling_g08_c05 22 +BaseballPitch/v_BaseballPitch_g17_c05 6 +Shotput/v_Shotput_g09_c06 78 +ParallelBars/v_ParallelBars_g17_c04 56 +BodyWeightSquats/v_BodyWeightSquats_g20_c01 14 +FrontCrawl/v_FrontCrawl_g21_c03 31 +JumpRope/v_JumpRope_g20_c05 47 +MilitaryParade/v_MilitaryParade_g19_c06 52 +JugglingBalls/v_JugglingBalls_g22_c01 45 +BlowingCandles/v_BlowingCandles_g14_c04 13 +Swing/v_Swing_g15_c05 88 +Rowing/v_Rowing_g14_c06 75 +Fencing/v_Fencing_g09_c04 27 +HorseRiding/v_HorseRiding_g15_c06 41 +PlayingViolin/v_PlayingViolin_g22_c01 66 +Skiing/v_Skiing_g20_c02 80 +BaseballPitch/v_BaseballPitch_g14_c01 6 +Skiing/v_Skiing_g08_c07 80 +FieldHockeyPenalty/v_FieldHockeyPenalty_g12_c06 28 +BreastStroke/v_BreastStroke_g19_c02 18 +SkyDiving/v_SkyDiving_g18_c01 82 +HandstandWalking/v_HandstandWalking_g16_c02 37 +HandstandPushups/v_HandStandPushups_g21_c02 36 +Swing/v_Swing_g20_c02 88 +JumpingJack/v_JumpingJack_g12_c01 46 +PlayingDhol/v_PlayingDhol_g23_c02 60 +PommelHorse/v_PommelHorse_g10_c01 68 +Nunchucks/v_Nunchucks_g18_c04 55 +HorseRiding/v_HorseRiding_g09_c03 41 +Rowing/v_Rowing_g13_c05 75 +ApplyLipstick/v_ApplyLipstick_g15_c05 1 +MilitaryParade/v_MilitaryParade_g16_c02 52 +SumoWrestling/v_SumoWrestling_g12_c05 86 +TableTennisShot/v_TableTennisShot_g13_c01 89 +IceDancing/v_IceDancing_g18_c06 43 +CricketShot/v_CricketShot_g25_c06 23 +BabyCrawling/v_BabyCrawling_g17_c03 3 +PizzaTossing/v_PizzaTossing_g15_c04 57 +Rafting/v_Rafting_g08_c05 72 +Hammering/v_Hammering_g19_c02 34 +JavelinThrow/v_JavelinThrow_g15_c02 44 +LongJump/v_LongJump_g24_c03 50 +FieldHockeyPenalty/v_FieldHockeyPenalty_g14_c06 28 +GolfSwing/v_GolfSwing_g15_c03 32 +WallPushups/v_WallPushups_g11_c02 98 +FieldHockeyPenalty/v_FieldHockeyPenalty_g23_c02 28 +RockClimbingIndoor/v_RockClimbingIndoor_g21_c01 73 +Billiards/v_Billiards_g09_c06 11 +PushUps/v_PushUps_g25_c02 71 +PlayingCello/v_PlayingCello_g20_c01 58 +PlayingGuitar/v_PlayingGuitar_g09_c02 62 +JugglingBalls/v_JugglingBalls_g18_c03 45 +PlayingDaf/v_PlayingDaf_g08_c01 59 +FieldHockeyPenalty/v_FieldHockeyPenalty_g09_c02 28 +VolleyballSpiking/v_VolleyballSpiking_g24_c02 96 +BodyWeightSquats/v_BodyWeightSquats_g24_c03 14 +Diving/v_Diving_g20_c01 25 +CricketShot/v_CricketShot_g16_c04 23 +WritingOnBoard/v_WritingOnBoard_g09_c02 99 +TennisSwing/v_TennisSwing_g15_c01 91 +Drumming/v_Drumming_g11_c05 26 +Lunges/v_Lunges_g13_c05 51 +ApplyLipstick/v_ApplyLipstick_g13_c01 1 +Skiing/v_Skiing_g10_c02 80 +Rowing/v_Rowing_g24_c03 75 +FieldHockeyPenalty/v_FieldHockeyPenalty_g09_c05 28 +HulaHoop/v_HulaHoop_g18_c04 42 +PlayingDhol/v_PlayingDhol_g11_c06 60 +Billiards/v_Billiards_g15_c03 11 +JugglingBalls/v_JugglingBalls_g25_c03 45 +Fencing/v_Fencing_g12_c05 27 +JumpRope/v_JumpRope_g16_c05 47 +VolleyballSpiking/v_VolleyballSpiking_g18_c02 96 +FrisbeeCatch/v_FrisbeeCatch_g23_c01 30 +HorseRace/v_HorseRace_g08_c04 40 +JumpRope/v_JumpRope_g24_c01 47 +VolleyballSpiking/v_VolleyballSpiking_g12_c02 96 +Diving/v_Diving_g08_c02 25 +PlayingCello/v_PlayingCello_g11_c04 58 +TableTennisShot/v_TableTennisShot_g16_c01 89 +Typing/v_Typing_g09_c06 94 +HandstandPushups/v_HandStandPushups_g16_c02 36 +UnevenBars/v_UnevenBars_g14_c03 95 +Fencing/v_Fencing_g25_c03 27 +CliffDiving/v_CliffDiving_g23_c05 21 +HorseRiding/v_HorseRiding_g19_c02 41 +HandstandPushups/v_HandStandPushups_g10_c01 36 +Surfing/v_Surfing_g11_c04 87 +PlayingDhol/v_PlayingDhol_g25_c04 60 +BlowingCandles/v_BlowingCandles_g13_c01 13 +PlayingDaf/v_PlayingDaf_g22_c05 59 +Nunchucks/v_Nunchucks_g08_c07 55 +Biking/v_Biking_g11_c02 10 +ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c04 0 +Surfing/v_Surfing_g16_c03 87 +Haircut/v_Haircut_g24_c07 33 +PlayingDaf/v_PlayingDaf_g13_c04 59 +Typing/v_Typing_g25_c04 94 +TrampolineJumping/v_TrampolineJumping_g24_c02 93 +MoppingFloor/v_MoppingFloor_g24_c01 54 +PlayingCello/v_PlayingCello_g13_c04 58 +BlowingCandles/v_BlowingCandles_g16_c04 13 +HammerThrow/v_HammerThrow_g18_c02 35 +PlayingGuitar/v_PlayingGuitar_g11_c01 62 +SkateBoarding/v_SkateBoarding_g09_c04 79 +HorseRace/v_HorseRace_g18_c04 40 +SkateBoarding/v_SkateBoarding_g20_c02 79 +Typing/v_Typing_g09_c05 94 +ParallelBars/v_ParallelBars_g21_c03 56 +GolfSwing/v_GolfSwing_g24_c02 32 +MilitaryParade/v_MilitaryParade_g24_c04 52 +BenchPress/v_BenchPress_g09_c03 9 +PlayingDaf/v_PlayingDaf_g22_c03 59 +BabyCrawling/v_BabyCrawling_g17_c02 3 +HandstandPushups/v_HandStandPushups_g08_c02 36 +Skijet/v_Skijet_g09_c02 81 +Biking/v_Biking_g24_c02 10 +HighJump/v_HighJump_g20_c01 39 +Drumming/v_Drumming_g20_c05 26 +HighJump/v_HighJump_g11_c01 39 +ApplyEyeMakeup/v_ApplyEyeMakeup_g09_c02 0 +Swing/v_Swing_g21_c06 88 +CricketShot/v_CricketShot_g08_c03 23 +PizzaTossing/v_PizzaTossing_g25_c03 57 +PlayingDaf/v_PlayingDaf_g21_c01 59 +HandstandPushups/v_HandStandPushups_g19_c04 36 +WritingOnBoard/v_WritingOnBoard_g14_c02 99 +BabyCrawling/v_BabyCrawling_g16_c05 3 +BreastStroke/v_BreastStroke_g17_c03 18 +BoxingSpeedBag/v_BoxingSpeedBag_g12_c04 17 +FrisbeeCatch/v_FrisbeeCatch_g23_c02 30 +BlowDryHair/v_BlowDryHair_g18_c01 12 +BandMarching/v_BandMarching_g14_c06 5 +CricketBowling/v_CricketBowling_g08_c01 22 +JugglingBalls/v_JugglingBalls_g21_c02 45 +BalanceBeam/v_BalanceBeam_g09_c03 4 +PlayingFlute/v_PlayingFlute_g14_c05 61 +PlayingViolin/v_PlayingViolin_g23_c02 66 +CliffDiving/v_CliffDiving_g13_c04 21 +PlayingGuitar/v_PlayingGuitar_g25_c05 62 +HulaHoop/v_HulaHoop_g20_c07 42 +Bowling/v_Bowling_g16_c03 15 +Archery/v_Archery_g21_c01 2 +WritingOnBoard/v_WritingOnBoard_g10_c04 99 +PlayingFlute/v_PlayingFlute_g25_c02 61 +Rowing/v_Rowing_g24_c01 75 +PlayingDhol/v_PlayingDhol_g22_c06 60 +HorseRace/v_HorseRace_g24_c06 40 +PlayingSitar/v_PlayingSitar_g19_c03 64 +PizzaTossing/v_PizzaTossing_g09_c02 57 +SalsaSpin/v_SalsaSpin_g16_c02 76 +BoxingPunchingBag/v_BoxingPunchingBag_g16_c05 16 +PushUps/v_PushUps_g14_c01 71 +Punch/v_Punch_g14_c06 70 +PoleVault/v_PoleVault_g17_c02 67 +JumpingJack/v_JumpingJack_g16_c02 46 +ThrowDiscus/v_ThrowDiscus_g16_c01 92 +SkyDiving/v_SkyDiving_g12_c01 82 +BoxingPunchingBag/v_BoxingPunchingBag_g08_c01 16 +Lunges/v_Lunges_g18_c02 51 +LongJump/v_LongJump_g14_c04 50 +Typing/v_Typing_g15_c01 94 +YoYo/v_YoYo_g19_c04 100 +PoleVault/v_PoleVault_g13_c03 67 +StillRings/v_StillRings_g20_c02 85 +SoccerJuggling/v_SoccerJuggling_g08_c04 83 +Hammering/v_Hammering_g09_c07 34 +Skijet/v_Skijet_g12_c03 81 +Rafting/v_Rafting_g15_c02 72 +PlayingGuitar/v_PlayingGuitar_g19_c04 62 +Haircut/v_Haircut_g24_c03 33 +Shotput/v_Shotput_g10_c03 78 +PlayingGuitar/v_PlayingGuitar_g25_c02 62 +Skiing/v_Skiing_g09_c04 80 +JavelinThrow/v_JavelinThrow_g25_c03 44 +PlayingTabla/v_PlayingTabla_g15_c01 65 +CuttingInKitchen/v_CuttingInKitchen_g11_c04 24 +Mixing/v_Mixing_g15_c02 53 +ApplyLipstick/v_ApplyLipstick_g09_c04 1 +PizzaTossing/v_PizzaTossing_g13_c01 57 +PlayingPiano/v_PlayingPiano_g13_c01 63 +TrampolineJumping/v_TrampolineJumping_g18_c01 93 +FieldHockeyPenalty/v_FieldHockeyPenalty_g17_c03 28 +PoleVault/v_PoleVault_g19_c03 67 +Rowing/v_Rowing_g20_c03 75 +JumpRope/v_JumpRope_g16_c04 47 +Fencing/v_Fencing_g09_c02 27 +PullUps/v_PullUps_g08_c01 69 +HammerThrow/v_HammerThrow_g13_c04 35 +RopeClimbing/v_RopeClimbing_g22_c01 74 +PlayingSitar/v_PlayingSitar_g21_c03 64 +PoleVault/v_PoleVault_g25_c06 67 +SalsaSpin/v_SalsaSpin_g12_c06 76 +ShavingBeard/v_ShavingBeard_g15_c01 77 +HammerThrow/v_HammerThrow_g10_c06 35 +StillRings/v_StillRings_g21_c02 85 +ShavingBeard/v_ShavingBeard_g09_c02 77 +Drumming/v_Drumming_g15_c02 26 +BasketballDunk/v_BasketballDunk_g18_c03 8 +Haircut/v_Haircut_g09_c05 33 +Kayaking/v_Kayaking_g21_c04 48 +PlayingFlute/v_PlayingFlute_g15_c03 61 +UnevenBars/v_UnevenBars_g14_c01 95 +WallPushups/v_WallPushups_g17_c02 98 +HandstandPushups/v_HandStandPushups_g19_c03 36 +BodyWeightSquats/v_BodyWeightSquats_g11_c02 14 +HighJump/v_HighJump_g15_c01 39 +FieldHockeyPenalty/v_FieldHockeyPenalty_g14_c01 28 +JumpingJack/v_JumpingJack_g25_c05 46 +PushUps/v_PushUps_g22_c03 71 +JumpRope/v_JumpRope_g22_c03 47 +JavelinThrow/v_JavelinThrow_g10_c04 44 +WalkingWithDog/v_WalkingWithDog_g13_c04 97 +SoccerPenalty/v_SoccerPenalty_g12_c04 84 +WritingOnBoard/v_WritingOnBoard_g20_c04 99 +JavelinThrow/v_JavelinThrow_g10_c03 44 +FieldHockeyPenalty/v_FieldHockeyPenalty_g22_c02 28 +SoccerJuggling/v_SoccerJuggling_g19_c02 83 +WritingOnBoard/v_WritingOnBoard_g25_c04 99 +HulaHoop/v_HulaHoop_g15_c04 42 +Biking/v_Biking_g11_c04 10 +ShavingBeard/v_ShavingBeard_g21_c01 77 +PlayingFlute/v_PlayingFlute_g23_c01 61 +Typing/v_Typing_g14_c05 94 +Rafting/v_Rafting_g11_c03 72 +BlowDryHair/v_BlowDryHair_g14_c05 12 +SoccerJuggling/v_SoccerJuggling_g10_c03 83 +Kayaking/v_Kayaking_g15_c02 48 +BabyCrawling/v_BabyCrawling_g12_c03 3 +TennisSwing/v_TennisSwing_g11_c07 91 +BandMarching/v_BandMarching_g18_c02 5 +HorseRiding/v_HorseRiding_g14_c03 41 +JavelinThrow/v_JavelinThrow_g16_c03 44 +Skijet/v_Skijet_g21_c04 81 +PlayingCello/v_PlayingCello_g22_c07 58 +CricketShot/v_CricketShot_g11_c05 23 +HighJump/v_HighJump_g22_c02 39 +MilitaryParade/v_MilitaryParade_g16_c01 52 +WallPushups/v_WallPushups_g20_c04 98 +BalanceBeam/v_BalanceBeam_g25_c02 4 +Bowling/v_Bowling_g21_c03 15 +WritingOnBoard/v_WritingOnBoard_g20_c07 99 +PoleVault/v_PoleVault_g23_c04 67 +BodyWeightSquats/v_BodyWeightSquats_g18_c02 14 +Basketball/v_Basketball_g19_c04 7 +BoxingSpeedBag/v_BoxingSpeedBag_g09_c04 17 +FrontCrawl/v_FrontCrawl_g11_c06 31 +FrisbeeCatch/v_FrisbeeCatch_g21_c04 30 +Mixing/v_Mixing_g19_c04 53 +ThrowDiscus/v_ThrowDiscus_g09_c01 92 +Haircut/v_Haircut_g16_c01 33 +TennisSwing/v_TennisSwing_g17_c04 91 +PlayingDhol/v_PlayingDhol_g13_c05 60 +Haircut/v_Haircut_g17_c04 33 +BasketballDunk/v_BasketballDunk_g23_c03 8 +Typing/v_Typing_g13_c01 94 +BabyCrawling/v_BabyCrawling_g19_c02 3 +StillRings/v_StillRings_g16_c03 85 +Shotput/v_Shotput_g23_c01 78 +RopeClimbing/v_RopeClimbing_g11_c03 74 +Biking/v_Biking_g15_c02 10 +SalsaSpin/v_SalsaSpin_g25_c03 76 +Lunges/v_Lunges_g20_c01 51 +Hammering/v_Hammering_g22_c07 34 +Haircut/v_Haircut_g24_c02 33 +Typing/v_Typing_g09_c03 94 +ThrowDiscus/v_ThrowDiscus_g21_c07 92 +Rowing/v_Rowing_g09_c02 75 +BrushingTeeth/v_BrushingTeeth_g12_c04 19 +UnevenBars/v_UnevenBars_g18_c01 95 +BoxingSpeedBag/v_BoxingSpeedBag_g15_c06 17 +BalanceBeam/v_BalanceBeam_g21_c04 4 +ApplyEyeMakeup/v_ApplyEyeMakeup_g23_c05 0 +PizzaTossing/v_PizzaTossing_g21_c02 57 +HammerThrow/v_HammerThrow_g23_c05 35 +Shotput/v_Shotput_g08_c07 78 +ApplyLipstick/v_ApplyLipstick_g23_c02 1 +SoccerPenalty/v_SoccerPenalty_g09_c04 84 +RopeClimbing/v_RopeClimbing_g16_c05 74 +Archery/v_Archery_g13_c04 2 +WalkingWithDog/v_WalkingWithDog_g25_c04 97 +BandMarching/v_BandMarching_g20_c04 5 +PullUps/v_PullUps_g25_c01 69 +JumpRope/v_JumpRope_g17_c02 47 +FrontCrawl/v_FrontCrawl_g12_c01 31 +JumpRope/v_JumpRope_g16_c06 47 +Skijet/v_Skijet_g14_c02 81 +FrisbeeCatch/v_FrisbeeCatch_g19_c05 30 +Haircut/v_Haircut_g14_c02 33 +PoleVault/v_PoleVault_g11_c03 67 +ApplyEyeMakeup/v_ApplyEyeMakeup_g13_c05 0 +PlayingDaf/v_PlayingDaf_g24_c01 59 +FloorGymnastics/v_FloorGymnastics_g21_c01 29 +BaseballPitch/v_BaseballPitch_g15_c07 6 +BodyWeightSquats/v_BodyWeightSquats_g12_c04 14 +BoxingPunchingBag/v_BoxingPunchingBag_g11_c04 16 +HammerThrow/v_HammerThrow_g16_c04 35 +FloorGymnastics/v_FloorGymnastics_g16_c04 29 +TableTennisShot/v_TableTennisShot_g17_c04 89 +WalkingWithDog/v_WalkingWithDog_g12_c04 97 +PommelHorse/v_PommelHorse_g15_c03 68 +Typing/v_Typing_g12_c06 94 +CleanAndJerk/v_CleanAndJerk_g11_c01 20 +PoleVault/v_PoleVault_g12_c06 67 +Mixing/v_Mixing_g14_c05 53 +VolleyballSpiking/v_VolleyballSpiking_g21_c02 96 +FieldHockeyPenalty/v_FieldHockeyPenalty_g15_c06 28 +Surfing/v_Surfing_g15_c01 87 +WritingOnBoard/v_WritingOnBoard_g08_c02 99 +CliffDiving/v_CliffDiving_g14_c03 21 +CleanAndJerk/v_CleanAndJerk_g20_c06 20 +Surfing/v_Surfing_g13_c04 87 +ApplyEyeMakeup/v_ApplyEyeMakeup_g10_c05 0 +BlowDryHair/v_BlowDryHair_g10_c04 12 +PlayingFlute/v_PlayingFlute_g11_c05 61 +BlowingCandles/v_BlowingCandles_g16_c03 13 +SkateBoarding/v_SkateBoarding_g13_c03 79 +Bowling/v_Bowling_g20_c04 15 +SalsaSpin/v_SalsaSpin_g14_c06 76 +TennisSwing/v_TennisSwing_g25_c01 91 +Fencing/v_Fencing_g14_c02 27 +CliffDiving/v_CliffDiving_g17_c03 21 +ApplyLipstick/v_ApplyLipstick_g13_c03 1 +PizzaTossing/v_PizzaTossing_g18_c01 57 +MoppingFloor/v_MoppingFloor_g09_c01 54 +CuttingInKitchen/v_CuttingInKitchen_g11_c02 24 +JumpingJack/v_JumpingJack_g17_c01 46 +Biking/v_Biking_g25_c02 10 +TaiChi/v_TaiChi_g19_c01 90 +PlayingSitar/v_PlayingSitar_g13_c02 64 +Typing/v_Typing_g23_c03 94 +JumpingJack/v_JumpingJack_g10_c04 46 +Archery/v_Archery_g24_c03 2 +ThrowDiscus/v_ThrowDiscus_g20_c03 92 +RockClimbingIndoor/v_RockClimbingIndoor_g24_c05 73 +Typing/v_Typing_g22_c03 94 +SkateBoarding/v_SkateBoarding_g13_c01 79 +RopeClimbing/v_RopeClimbing_g10_c03 74 +PlayingFlute/v_PlayingFlute_g23_c03 61 diff --git a/examples/LRCN_activity_recognition/verify_videos.sh b/examples/LRCN_activity_recognition/verify_videos.sh new file mode 100755 index 00000000000..c4cd572b30c --- /dev/null +++ b/examples/LRCN_activity_recognition/verify_videos.sh @@ -0,0 +1,111 @@ +#!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Example of invoking script: +# ./verify_videos.sh ./frames list_of_video_images_to_test + +FRAMES_DIR="./frames/" + +CATEGORIES=`ls $FRAMES_DIR | cut -d "_" -f 1-2 | uniq` + +RunVideosClassification() +{ +COUNTER=0 +SUCCESS_COUNTER=0 +# Get Label out of class name and remove white characters +LABEL=`echo $1 | cut -d "_" -f 2 | sed 's: ::g'` + +for movie_name in `ls $FRAMES_DIR | grep $1`; do + echo "movie_name: $movie_name" + RESPONSE=`./classify_video.py $movie_name 2>/dev/null` + echo "$RESPONSE" + CLASSIFICATION=`echo $RESPONSE | cut -d ":" -f 2 | cut -d "." -f 1 | sed 's: ::g'` + + if [ "$LABEL" == "$CLASSIFICATION" ]; then + echo "OK (Label: $LABEL, Classification: $CLASSIFICATION)" + ((SUCCESS_COUNTER+=1)) + else + echo "Fail!!! (Label: $LABEL, Classification: $CLASSIFICATION)" + fi + ((COUNTER+=1)) +done +echo "CLASS($LABEL) Accuracy: 0"`bc <<< "scale=2;$SUCCESS_COUNTER/$COUNTER"` +} + +RunUCF101TestSplit01Classification() +{ + COUNTER=0 + SUCCESS_COUNTER=0 + while read -r line || [[ -n "$line" ]]; do + movie_name=`echo $line | cut -d "/" -f 2 | cut -d " " -f 1` + echo "Movie name: $movie_name" + LABEL=`echo $movie_name | cut -d "_" -f 2 | sed 's: ::g'` +# RESPONSE=`./classify_video.py $movie_name 2>/dev/null` + RESPONSE=`./classify_video.py $movie_name $FRAMES_DIR 2>/tmp/verify_video_log.$COUNTER | tee -a /tmp/verify_video_log.$COUNTER` + echo "RESPONSE: $RESPONSE" + CLASSIFICATION=`echo $RESPONSE | cut -d ":" -f 2 | cut -d "." -f 1 | sed 's: ::g'` + if [ "$LABEL" == "$CLASSIFICATION" ]; then + echo "OK (Label: $LABEL, Classification: $CLASSIFICATION)" + ((SUCCESS_COUNTER+=1)) + else + echo "Fail!!! (Label: $LABEL, Classification: $CLASSIFICATION)" + fi + ((COUNTER+=1)) + done < ucf101_split1_testVideos.txt + echo "---> Accuracy: 0"`bc <<< "scale=2;$SUCCESS_COUNTER/$COUNTER"` +} + + +if [ "$#" -ne 1 ]; then + echo "" + echo " Error: Illegal number of parameters. " + echo "" + echo " Syntax : " + echo " ./verify_videos.sh " + echo "" +else + + # Make classification of first standard test split of UCF-101 + # and present overall accuracy + RunUCF101TestSplit01Classification + + # Run classification on whole UCF-101 + # and present per category accuracy + #for movie_categorie in $CATEGORIES; do + # RunVideosClassification $movie_categorie + #done +fi diff --git a/examples/cifar10/cifar10_full_sigmoid_solver.prototxt b/examples/cifar10/cifar10_full_sigmoid_solver.prototxt index 7dd3ecb9d8e..f4429d6cbef 100644 --- a/examples/cifar10/cifar10_full_sigmoid_solver.prototxt +++ b/examples/cifar10/cifar10_full_sigmoid_solver.prototxt @@ -25,4 +25,4 @@ max_iter: 60000 snapshot: 10000 snapshot_prefix: "examples/cifar10_full_sigmoid" # solver mode: CPU or GPU -solver_mode: GPU +solver_mode: CPU diff --git a/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt b/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt index a57b280fd1e..b2c5797ec86 100644 --- a/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt +++ b/examples/cifar10/cifar10_full_sigmoid_solver_bn.prototxt @@ -25,4 +25,4 @@ max_iter: 60000 snapshot: 10000 snapshot_prefix: "examples/cifar10_full_sigmoid_bn" # solver mode: CPU or GPU -solver_mode: GPU +solver_mode: CPU diff --git a/examples/cifar10/cifar10_full_solver.prototxt b/examples/cifar10/cifar10_full_solver.prototxt index 882daa2d2b5..94809e7f216 100644 --- a/examples/cifar10/cifar10_full_solver.prototxt +++ b/examples/cifar10/cifar10_full_solver.prototxt @@ -16,7 +16,7 @@ weight_decay: 0.004 # The learning rate policy lr_policy: "fixed" # Display every 200 iterations -display: 200 +display: 10 # The maximum number of iterations max_iter: 60000 # snapshot intermediate results @@ -24,4 +24,4 @@ snapshot: 10000 snapshot_format: HDF5 snapshot_prefix: "examples/cifar10/cifar10_full" # solver mode: CPU or GPU -solver_mode: GPU +solver_mode: CPU diff --git a/examples/cifar10/cifar10_full_solver_lr1.prototxt b/examples/cifar10/cifar10_full_solver_lr1.prototxt index 55f4be44053..c5467bdfb7d 100644 --- a/examples/cifar10/cifar10_full_solver_lr1.prototxt +++ b/examples/cifar10/cifar10_full_solver_lr1.prototxt @@ -24,4 +24,4 @@ snapshot: 5000 snapshot_format: HDF5 snapshot_prefix: "examples/cifar10/cifar10_full" # solver mode: CPU or GPU -solver_mode: GPU +solver_mode: CPU diff --git a/examples/cifar10/cifar10_full_solver_lr2.prototxt b/examples/cifar10/cifar10_full_solver_lr2.prototxt index 7c3d2da31de..cd5d12006e3 100644 --- a/examples/cifar10/cifar10_full_solver_lr2.prototxt +++ b/examples/cifar10/cifar10_full_solver_lr2.prototxt @@ -24,4 +24,4 @@ snapshot: 5000 snapshot_format: HDF5 snapshot_prefix: "examples/cifar10/cifar10_full" # solver mode: CPU or GPU -solver_mode: GPU +solver_mode: CPU diff --git a/examples/cifar10/cifar10_full_train_test.prototxt b/examples/cifar10/cifar10_full_train_test.prototxt index d45fc61e120..09349d66fdd 100644 --- a/examples/cifar10/cifar10_full_train_test.prototxt +++ b/examples/cifar10/cifar10_full_train_test.prototxt @@ -11,6 +11,7 @@ layer { mean_file: "examples/cifar10/mean.binaryproto" } data_param { + shuffle: true source: "examples/cifar10/cifar10_train_lmdb" batch_size: 100 backend: LMDB diff --git a/examples/cifar10/cifar10_quick_solver.prototxt b/examples/cifar10/cifar10_quick_solver.prototxt index 5de276f722f..4befd0a2427 100644 --- a/examples/cifar10/cifar10_quick_solver.prototxt +++ b/examples/cifar10/cifar10_quick_solver.prototxt @@ -23,4 +23,4 @@ snapshot: 4000 snapshot_format: HDF5 snapshot_prefix: "examples/cifar10/cifar10_quick" # solver mode: CPU or GPU -solver_mode: GPU +solver_mode: CPU diff --git a/examples/cifar10/cifar10_quick_solver_lr1.prototxt b/examples/cifar10/cifar10_quick_solver_lr1.prototxt index f8f1efd54af..86c954fcd63 100644 --- a/examples/cifar10/cifar10_quick_solver_lr1.prototxt +++ b/examples/cifar10/cifar10_quick_solver_lr1.prototxt @@ -23,4 +23,4 @@ snapshot: 5000 snapshot_format: HDF5 snapshot_prefix: "examples/cifar10/cifar10_quick" # solver mode: CPU or GPU -solver_mode: GPU +solver_mode: CPU diff --git a/examples/cifar10/convert_cifar_data.cpp b/examples/cifar10/convert_cifar_data.cpp index 7385a74a679..c1be571da55 100644 --- a/examples/cifar10/convert_cifar_data.cpp +++ b/examples/cifar10/convert_cifar_data.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // // This script converts the CIFAR dataset to the leveldb format used // by caffe to perform classification. diff --git a/examples/cifar10/create_cifar10.sh b/examples/cifar10/create_cifar10.sh index 7ee1d6ad0a0..65664c02726 100755 --- a/examples/cifar10/create_cifar10.sh +++ b/examples/cifar10/create_cifar10.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # This script converts the cifar data into leveldb format. set -e diff --git a/examples/cifar10/train_full.sh b/examples/cifar10/train_full.sh index 06ecc2dccb0..15b680a97bb 100755 --- a/examples/cifar10/train_full.sh +++ b/examples/cifar10/train_full.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e TOOLS=./build/tools diff --git a/examples/cifar10/train_full_multinode_mpi.sh b/examples/cifar10/train_full_multinode_mpi.sh new file mode 100755 index 00000000000..082e9db1988 --- /dev/null +++ b/examples/cifar10/train_full_multinode_mpi.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +TOOLS=./build/tools + +echo "executing 4 nodes with mpirun" + +OMP_NUM_THREADS=1 \ +mpirun -l -host 127.0.0.1 -n 4 \ +$TOOLS/caffe train --solver=examples/cifar10/cifar10_full_solver.prototxt --param_server=mpi diff --git a/examples/cifar10/train_full_sigmoid.sh b/examples/cifar10/train_full_sigmoid.sh index 9b5d5213b2a..de2ecdd64fc 100755 --- a/examples/cifar10/train_full_sigmoid.sh +++ b/examples/cifar10/train_full_sigmoid.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e TOOLS=./build/tools diff --git a/examples/cifar10/train_full_sigmoid_bn.sh b/examples/cifar10/train_full_sigmoid_bn.sh index 05547f3a104..c792b727200 100755 --- a/examples/cifar10/train_full_sigmoid_bn.sh +++ b/examples/cifar10/train_full_sigmoid_bn.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e TOOLS=./build/tools diff --git a/examples/cifar10/train_quick.sh b/examples/cifar10/train_quick.sh index d2b875340ee..b3b90ffda38 100755 --- a/examples/cifar10/train_quick.sh +++ b/examples/cifar10/train_quick.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e TOOLS=./build/tools diff --git a/examples/cpp_classification/batch_classification.cpp b/examples/cpp_classification/batch_classification.cpp new file mode 100644 index 00000000000..374671baa26 --- /dev/null +++ b/examples/cpp_classification/batch_classification.cpp @@ -0,0 +1,452 @@ +/* + All modification made by Intel Corporation: © 2016 Intel Corporation + + All contributions by the University of California: + Copyright (c) 2014, 2015, The Regents of the University of California (Regents) + All rights reserved. + + All other contributions: + Copyright (c) 2014, 2015, the respective contributors + All rights reserved. + For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#ifdef USE_OPENCV +#include +#include +#include +#endif // USE_OPENCV +#include +#include +#include +#include +#include +#include +#include + +#ifdef USE_OPENCV +using namespace caffe; // NOLINT(build/namespaces) +using std::string; +using std::cout; +using std::cerr; +using std::endl; +using std::vector; + +DEFINE_string(model, "", + "Required; The model definition protocol buffer text file."); + +DEFINE_string(weights, "", + "Required; The pretrained weights."); + +DEFINE_string(input, "", + "Required; File that contain the path of input images line by line"); + +DEFINE_string(label_file, "", + "Required; The label file."); + +DEFINE_string(engine, "", + "Optional; Engine can only be CAFFE | MKL2017 | MKLDNN"); + +DEFINE_string(mean_file, "", + "Optional; The mean file used to subtract from the input image."); + +DEFINE_string(mean_value, "104,117,123", + "Optional; If specified, can be one value or can be same as image channels" + " - would subtract from the corresponding channel). Separated by ','."); + +DEFINE_int32(batch_size, 1, + "Optional; batch size, default 1"); + +typedef std::pair Prediction; + +class Classifier { + public: + Classifier(const string& model_file, + const string& trained_file, + const string& mean_file, + const string& mean_value, + const string& label_file, + const string& engine, + const size_t batch_size, + const size_t topN = 5 + ); + vector > ClassifyBatch(vector& imgs); + + private: + void SetMean(const string& mean_file, const string& mean_value); + + vector PredictBatch(vector& imgs); + + void WrapInputLayerBatch(vector >* input_channels_batch); + void WriteImgToInput(const vector& imgs, vector >* input_channels_batch); + void Preprocess(cv::Mat& img); + + void PreprocessBatch(vector& imgs); + + private: + shared_ptr > net_; + cv::Size input_geometry_; + int num_channels_; + cv::Mat mean_; + size_t batch_size_; + size_t topN_; + std::vector labels_; +}; + +Classifier::Classifier(const string& model_file, + const string& trained_file, + const string& mean_file, + const string& mean_value, + const string& label_file, + const string& engine, + const size_t batch_size, + const size_t topN + ) { +#ifdef CPU_ONLY + Caffe::set_mode(Caffe::CPU); +#else + Caffe::set_mode(Caffe::GPU); +#endif + + /* Load the network. */ + net_.reset(new Net(model_file, TEST, 0, NULL, NULL, engine)); + net_->CopyTrainedLayersFrom(trained_file); + + CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input."; + CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output."; + + Blob* input_layer = net_->input_blobs()[0]; + num_channels_ = input_layer->channels(); + CHECK(num_channels_ == 3 || num_channels_ == 1) + << "Input layer should have 1 or 3 channels."; + input_geometry_ = cv::Size(input_layer->width(), input_layer->height()); + + SetMean(mean_file, mean_value); + + batch_size_ = batch_size; + topN_ = topN; + + if(!label_file.empty()) { + /* Load labels. */ + std::ifstream labels(label_file.c_str()); + CHECK(labels) << "Unable to open labels file " << label_file; + string line; + while (std::getline(labels, line)) + labels_.push_back(string(line)); + + Blob* output_layer = net_->output_blobs()[0]; + CHECK_EQ(labels_.size(), output_layer->channels()) + << "Number of labels is different from the output layer dimension."; + } + +} + + +static bool PairCompare(const std::pair& lhs, + const std::pair& rhs) { + return lhs.first > rhs.first; +} + +/* Return the indices of the top N values of vector v. */ +static vector Argmax(const vector& v, int N) { + vector > pairs; + for (size_t i = 0; i < v.size(); ++i) + pairs.push_back(std::make_pair(v[i], i)); + std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare); + + vector result; + for (int i = 0; i < N; ++i) + result.push_back(pairs[i].second); + return result; +} + +/* Return the top N predictions. */ +vector > Classifier::ClassifyBatch(vector& imgs) { + vector output_batch = PredictBatch(imgs); + vector > predictionsBatch; + int output_channels = net_->output_blobs()[0]->channels(); + for (size_t i = 0; i < batch_size_; ++i) { + vector output(output_batch.begin() + i*output_channels, output_batch.begin()+(i+1)*output_channels); + vector maxN = Argmax(output, topN_); + vector predictions; + for (int i = 0; i < topN_; ++i) { + int idx = maxN[i]; + if(labels_.empty()) { + predictions.push_back(std::make_pair(std::to_string(idx), output[idx])); + } else{ + predictions.push_back(std::make_pair(labels_[idx], output[idx])); + } + } + predictionsBatch.push_back(predictions); + } + return predictionsBatch; +} + +/* Load the mean file in binaryproto format. */ +void Classifier::SetMean(const string& mean_file, const string& mean_value) { + cv::Scalar channel_mean; + if(!mean_file.empty()) { + BlobProto blob_proto; + ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); + + /* Convert from BlobProto to Blob */ + Blob mean_blob; + mean_blob.FromProto(blob_proto); + CHECK_EQ(mean_blob.channels(), num_channels_) + << "Number of channels of mean file doesn't match input layer."; + + /* The format of the mean file is planar 32-bit float BGR or grayscale. */ + vector channels; + float* data = mean_blob.mutable_cpu_data(); + for (int i = 0; i < num_channels_; ++i) { + /* Extract an individual channel. */ + cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data); + channels.push_back(channel); + data += mean_blob.height() * mean_blob.width(); + } + + /* Merge the separate channels into a single image. */ + cv::Mat mean; + cv::merge(channels, mean); + + /* Compute the global mean pixel value and create a mean image + * filled with this value. */ + channel_mean = cv::mean(mean); + mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean); + } + if (!mean_value.empty()) { + stringstream ss(mean_value); + vector values; + string item; + while (getline(ss, item, ',')) { + float value = std::atof(item.c_str()); + values.push_back(value); + } + CHECK(values.size() == 1 || values.size() == num_channels_) << + "Specify either 1 mean_value or as many as channels: " << num_channels_; + + std::vector channels; + for (int i = 0; i < num_channels_; ++i) { + /* Extract an individual channel. */ + cv::Mat channel(input_geometry_.height, input_geometry_.width, CV_32FC1, + cv::Scalar(values[i])); + channels.push_back(channel); + } + cv::merge(channels, mean_); + } +} + +vector Classifier::PredictBatch(vector& imgs) { + Blob* input_layer = net_->input_blobs()[0]; + input_layer->Reshape(batch_size_, num_channels_, + input_geometry_.height, input_geometry_.width); + /* Forward dimension change to all layers. */ + net_->Reshape(); + + vector > input_channels_batch; + WrapInputLayerBatch(&input_channels_batch); + PreprocessBatch(imgs); + WriteImgToInput(imgs, &input_channels_batch); + + net_->Forward(); + + /* Copy the output layer to a vector */ + Blob* output_layer = net_->output_blobs()[0]; + const float* begin = output_layer->cpu_data(); + const float* end = begin + output_layer->channels() * batch_size_; + printf("output_layer->channels: %d\n", output_layer->channels()); + return vector(begin, end); +} + +/* Wrap the input layer of the network in separate cv::Mat objects + * (one per channel). This way we save one memcpy operation and we + * don't need to rely on cudaMemcpy2D. The last preprocessing + * operation will write the separate channels directly to the input + * layer. */ +void Classifier::WrapInputLayerBatch(vector >* input_channels_batch) { + Blob* input_layer = net_->input_blobs()[0]; + + int width = input_layer->width(); + int height = input_layer->height(); + float* input_data = input_layer->mutable_cpu_data(); + int num = input_layer->num(); + for( int j = 0; j < num; ++j) { + vector input_channels; + for (int i = 0; i < input_layer->channels(); ++i) { + cv::Mat channel(height, width, CV_32FC1, input_data); + input_channels.push_back(channel); + input_data += width * height; + } + input_channels_batch->push_back(input_channels); + } +} + +void Classifier::WriteImgToInput(const vector& imgs, + vector >* input_channels_batch) +{ + for(size_t i=0; iat(i)); + } +} + +void Classifier::PreprocessBatch(vector& imgs) { + for(size_t i=0; i loadImgBatch(vector imgNames) { + vector imgs; + for(size_t i=0; i predictions) { + /* Print the top N predictions. */ + for (size_t i = 0; i < predictions.size(); ++i) { + Prediction p = predictions[i]; + cout << std::fixed << std::setprecision(4) << p.second << " - \"" + << p.first << "\"" << endl; + } +} + +void printPredictionsBatch(vector imgNames, + vector > predictionsBatch) { + for( size_t i = 0; i < predictionsBatch.size(); ++i) { + cout << "---------- "<< i + 1 <<": Prediction for " + << imgNames[i] << " ----------" << endl; + printPrediction(predictionsBatch[i]); + } +} + +vector readImgListFromPath(string file) { + vector rawImgNames; + std::ifstream input_lines(file.c_str()); + CHECK(input_lines) << "Unable to open file " << file; + string line; + while (std::getline(input_lines, line)) + rawImgNames.push_back(string(line)); + return rawImgNames; +} +int main(int argc, char** argv) { + + ::google::InitGoogleLogging(argv[0]); + +#ifndef GFLAGS_GFLAGS_H_ + namespace gflags = google; +#endif + + gflags::SetUsageMessage("Image classification.\n" + "Usage:\n" + "batch_classification \n" + "Example: ./batch_classification --model --weights --input --batch_size " + ); + gflags::ParseCommandLineFlags(&argc, &argv, true); + + + CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to score."; + CHECK_GT(FLAGS_weights.size(), 0) << "Need model weights to score."; + CHECK_GT(FLAGS_input.size(), 0) << "Need model weights to score."; + + cout<<"Use batch size: "<< FLAGS_batch_size << endl; + + if (FLAGS_mean_file.empty()) { + cout<<"Use mean value: "<< FLAGS_mean_value< rawImgNames = readImgListFromPath(FLAGS_input); + + if(rawImgNames.size() > 0 && rawImgNames.size() < FLAGS_batch_size) { + while(rawImgNames.size() < FLAGS_batch_size) { + rawImgNames.insert(rawImgNames.end(), rawImgNames.begin(), rawImgNames.end()); + } + } + + vector imgNames(rawImgNames.begin(), rawImgNames.begin() + FLAGS_batch_size); + vector imgs = loadImgBatch(rawImgNames); + + vector > predictionsBatch = classifier.ClassifyBatch(imgs); + + printPredictionsBatch(imgNames, predictionsBatch); + + return 0; +} + + + +#else +int main(int argc, char** argv) { + LOG(FATAL) << "This example requires OpenCV; compile with USE_OPENCV."; +} +#endif // USE_OPENCV diff --git a/examples/cpp_classification/classification.cpp b/examples/cpp_classification/classification.cpp index 6b67c537a47..5b8aa21e46a 100644 --- a/examples/cpp_classification/classification.cpp +++ b/examples/cpp_classification/classification.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #ifdef USE_OPENCV #include @@ -23,7 +60,8 @@ class Classifier { Classifier(const string& model_file, const string& trained_file, const string& mean_file, - const string& label_file); + const string& label_file, + const string& engine); std::vector Classify(const cv::Mat& img, int N = 5); @@ -48,7 +86,8 @@ class Classifier { Classifier::Classifier(const string& model_file, const string& trained_file, const string& mean_file, - const string& label_file) { + const string& label_file, + const string& engine) { #ifdef CPU_ONLY Caffe::set_mode(Caffe::CPU); #else @@ -56,7 +95,7 @@ Classifier::Classifier(const string& model_file, #endif /* Load the network. */ - net_.reset(new Net(model_file, TEST)); + net_.reset(new Net(model_file, TEST, 0, NULL, NULL, engine)); net_->CopyTrainedLayersFrom(trained_file); CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input."; @@ -227,10 +266,10 @@ void Classifier::Preprocess(const cv::Mat& img, } int main(int argc, char** argv) { - if (argc != 6) { + if (argc < 6) { std::cerr << "Usage: " << argv[0] << " deploy.prototxt network.caffemodel" - << " mean.binaryproto labels.txt img.jpg" << std::endl; + << " mean.binaryproto labels.txt img.jpg [CAFFE|MKL2017|MKLDNN]" << std::endl; return 1; } @@ -240,9 +279,14 @@ int main(int argc, char** argv) { string trained_file = argv[2]; string mean_file = argv[3]; string label_file = argv[4]; - Classifier classifier(model_file, trained_file, mean_file, label_file); + string file = argv[5]; + string engine = ""; + if (argc > 6) { + engine = argv[6]; + } + + Classifier classifier(model_file, trained_file, mean_file, label_file, engine); - string file = argv[5]; std::cout << "---------- Prediction for " << file << " ----------" << std::endl; diff --git a/examples/finetune_flickr_style/assemble_data.py b/examples/finetune_flickr_style/assemble_data.py index 09bfa2618a4..7c83d0e88fc 100755 --- a/examples/finetune_flickr_style/assemble_data.py +++ b/examples/finetune_flickr_style/assemble_data.py @@ -1,4 +1,40 @@ #!/usr/bin/env python +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Form a subset of the Flickr Style data, download images to dirname, and write Caffe ImagesDataLayer training file. diff --git a/examples/imagenet/create_imagenet.sh b/examples/imagenet/create_imagenet.sh index 1bf08b1aa8f..e699a111c44 100755 --- a/examples/imagenet/create_imagenet.sh +++ b/examples/imagenet/create_imagenet.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # Create the imagenet lmdb inputs # N.B. set the path to the imagenet train + val data dirs set -e @@ -21,6 +57,17 @@ else RESIZE_WIDTH=0 fi +# Set ENCODE=true to encode the images as compressed JPEGs stored in the LMDB. +# Leave as false for uncompressed (raw) images. +ENCODE=false +if $ENCODE; then + ENCODE_FLAG='--encoded=true' + ENCODE_TYPE_FLAG='--encode_type=jpg' +else + ENCODE_FLAG='--encoded=false' + ENCODE_TYPE_FLAG='' +fi + if [ ! -d "$TRAIN_DATA_ROOT" ]; then echo "Error: TRAIN_DATA_ROOT is not a path to a directory: $TRAIN_DATA_ROOT" echo "Set the TRAIN_DATA_ROOT variable in create_imagenet.sh to the path" \ @@ -40,6 +87,8 @@ echo "Creating train lmdb..." GLOG_logtostderr=1 $TOOLS/convert_imageset \ --resize_height=$RESIZE_HEIGHT \ --resize_width=$RESIZE_WIDTH \ + $ENCODE_FLAG \ + $ENCODE_TYPE_FLAG \ --shuffle \ $TRAIN_DATA_ROOT \ $DATA/train.txt \ @@ -50,6 +99,8 @@ echo "Creating val lmdb..." GLOG_logtostderr=1 $TOOLS/convert_imageset \ --resize_height=$RESIZE_HEIGHT \ --resize_width=$RESIZE_WIDTH \ + $ENCODE_FLAG \ + $ENCODE_TYPE_FLAG \ --shuffle \ $VAL_DATA_ROOT \ $DATA/val.txt \ diff --git a/examples/imagenet/make_imagenet_mean.sh b/examples/imagenet/make_imagenet_mean.sh index 57f43766c4b..1f09a04df69 100755 --- a/examples/imagenet/make_imagenet_mean.sh +++ b/examples/imagenet/make_imagenet_mean.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # Compute the mean image from the imagenet training lmdb # N.B. this is available in data/ilsvrc12 diff --git a/examples/imagenet/resume_training.sh b/examples/imagenet/resume_training.sh index 4aef204368e..8ad5be77660 100755 --- a/examples/imagenet/resume_training.sh +++ b/examples/imagenet/resume_training.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train \ diff --git a/examples/imagenet/train_caffenet.sh b/examples/imagenet/train_caffenet.sh index a5094d44ae0..2e8909980a3 100755 --- a/examples/imagenet/train_caffenet.sh +++ b/examples/imagenet/train_caffenet.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train \ diff --git a/examples/mnist/convert_mnist_data.cpp b/examples/mnist/convert_mnist_data.cpp index 57ddef77074..cf1fc8e7ca7 100644 --- a/examples/mnist/convert_mnist_data.cpp +++ b/examples/mnist/convert_mnist_data.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // This script converts the MNIST dataset to a lmdb (default) or // leveldb (--backend=leveldb) format used by caffe to load data. // Usage: @@ -70,8 +107,10 @@ void convert_dataset(const char* image_filename, const char* label_filename, image_file.read(reinterpret_cast(&cols), 4); cols = swap_endian(cols); + db::DB *database_instance = db::GetDB(db_backend); + CHECK(database_instance) << "Failed to obtain database instance"; - scoped_ptr db(db::GetDB(db_backend)); + scoped_ptr db(database_instance); db->Open(db_path, db::NEW); scoped_ptr txn(db->NewTransaction()); diff --git a/examples/mnist/create_mnist.sh b/examples/mnist/create_mnist.sh index f5e2e7960c5..32ff4387f20 100755 --- a/examples/mnist/create_mnist.sh +++ b/examples/mnist/create_mnist.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # This script converts the mnist data into lmdb/leveldb format, # depending on the value assigned to $BACKEND. set -e diff --git a/examples/mnist/lenet_solver.prototxt b/examples/mnist/lenet_solver.prototxt index 2dfbc834f41..fada9dd7fa7 100644 --- a/examples/mnist/lenet_solver.prototxt +++ b/examples/mnist/lenet_solver.prototxt @@ -22,4 +22,4 @@ max_iter: 10000 snapshot: 5000 snapshot_prefix: "examples/mnist/lenet" # solver mode: CPU or GPU -solver_mode: GPU +solver_mode: CPU diff --git a/examples/mnist/lenet_solver_mlsl.prototxt b/examples/mnist/lenet_solver_mlsl.prototxt new file mode 100644 index 00000000000..89fc55d2429 --- /dev/null +++ b/examples/mnist/lenet_solver_mlsl.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/mnist/lenet_train_test_mlsl.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 10000 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0005 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 100 +# The maximum number of iterations +max_iter: 50 +# snapshot intermediate results +snapshot: 10000 +snapshot_prefix: "examples/mnist/lenet_mlsl" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/mnist/lenet_solver_mlsl2.prototxt b/examples/mnist/lenet_solver_mlsl2.prototxt new file mode 100644 index 00000000000..3eabfe53720 --- /dev/null +++ b/examples/mnist/lenet_solver_mlsl2.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/mnist/lenet_train_test_mlsl2.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 500 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0005 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 10 +# The maximum number of iterations +max_iter: 500 +# snapshot intermediate results +snapshot: 5000 +snapshot_prefix: "examples/mnist/lenet" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/mnist/lenet_solver_mlsl3.prototxt b/examples/mnist/lenet_solver_mlsl3.prototxt new file mode 100644 index 00000000000..2192088b3e2 --- /dev/null +++ b/examples/mnist/lenet_solver_mlsl3.prototxt @@ -0,0 +1,25 @@ +# The train/test net protocol buffer definition +net: "examples/mnist/lenet_train_test_mlsl3.prototxt" +# test_iter specifies how many forward passes the test should carry out. +# In the case of MNIST, we have test batch size 100 and 100 test iterations, +# covering the full 10,000 testing images. +test_iter: 100 +# Carry out testing every 500 training iterations. +test_interval: 10000 +# The base learning rate, momentum and the weight decay of the network. +base_lr: 0.01 +momentum: 0.9 +weight_decay: 0.0005 +# The learning rate policy +lr_policy: "inv" +gamma: 0.0001 +power: 0.75 +# Display every 100 iterations +display: 2 +# The maximum number of iterations +max_iter: 10 +# snapshot intermediate results +snapshot: 10000 +snapshot_prefix: "examples/mnist/lenet_mlsl" +# solver mode: CPU or GPU +solver_mode: CPU diff --git a/examples/mnist/lenet_train_test_mlsl.prototxt b/examples/mnist/lenet_train_test_mlsl.prototxt new file mode 100644 index 00000000000..926da6ebaa7 --- /dev/null +++ b/examples/mnist/lenet_train_test_mlsl.prototxt @@ -0,0 +1,179 @@ +name: "LeNet" +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } +} +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + engine: MKL2017 + weight_filler { + type: "xavier" + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + engine: MKL2017 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + engine: MKL2017 + weight_filler { + type: "xavier" + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + engine: MKL2017 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + inner_product_param { + num_output: 500 + weight_filler { + type: "xavier" + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" + relu_param { + engine: MKL2017 + } +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + inner_product_param { + num_output: 10 + weight_filler { + type: "xavier" + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip2" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip2" + bottom: "label" + top: "loss" +} diff --git a/examples/mnist/lenet_train_test_mlsl2.prototxt b/examples/mnist/lenet_train_test_mlsl2.prototxt new file mode 100644 index 00000000000..66eeb94faf1 --- /dev/null +++ b/examples/mnist/lenet_train_test_mlsl2.prototxt @@ -0,0 +1,183 @@ +name: "LeNet" +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 1 + backend: LMDB + } +} +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 1 #00 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + convolution_param { + num_output: 1 #20 + kernel_size: 3 #5 + stride: 1 + engine: MKL2017 + weight_filler { +# type: "xavier" + type: "constant" + value: 0.1 + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +#layer { +# name: "pool1" +# type: "Pooling" +# bottom: "conv1" +# top: "pool1" +# pooling_param { +# pool: MAX +# kernel_size: 2 +# stride: 2 +# engine: MKL2017 +# } +#} +#layer { +# name: "conv2" +# type: "Convolution" +# bottom: "pool1" +# top: "conv2" +# param { +# lr_mult: 1 +# } +# param { +# lr_mult: 2 +# } +# convolution_param { +# num_output: 50 +# kernel_size: 5 +# stride: 1 +# engine: MKL2017 +# weight_filler { +# type: "xavier" +# } +# bias_term: false +## bias_filler { +## type: "constant" +## } +# } +#} +#layer { +# name: "pool2" +# type: "Pooling" +# bottom: "conv2" +# top: "pool2" +# pooling_param { +# pool: MAX +# kernel_size: 2 +# stride: 2 +# engine: MKL2017 +# } +#} +#layer { +# name: "ip1" +# type: "InnerProduct" +# bottom: "conv1" +# top: "ip1" +# param { +# lr_mult: 1 +# } +# param { +# lr_mult: 2 +# } +# inner_product_param { +# num_output: 500 +# weight_filler { +# type: "xavier" +# } +# bias_term: false +# bias_filler { +# type: "constant" +# } +# } +#} +#layer { +# name: "relu1" +# type: "ReLU" +# bottom: "conv1" +# top: "ip1" +# relu_param { +# engine: MKL2017 +# } +#} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "conv1" + top: "ip2" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + inner_product_param { + num_output: 10 + weight_filler { +# type: "xavier" + type: "constant" + value: 0.1 + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip2" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip2" + bottom: "label" + top: "loss" +} diff --git a/examples/mnist/lenet_train_test_mlsl3.prototxt b/examples/mnist/lenet_train_test_mlsl3.prototxt new file mode 100644 index 00000000000..06335d4b43b --- /dev/null +++ b/examples/mnist/lenet_train_test_mlsl3.prototxt @@ -0,0 +1,199 @@ +name: "LeNet" +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_train_lmdb" + batch_size: 64 + backend: LMDB + } +} +layer { + name: "mnist" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + scale: 0.00390625 + } + data_param { + source: "examples/mnist/mnist_test_lmdb" + batch_size: 100 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + convolution_param { + num_output: 20 + kernel_size: 5 + stride: 1 + engine: MKL2017 + weight_filler { +# type: "xavier" + type: "constant" + value: 0.1 + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + engine: MKL2017 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + convolution_param { + num_output: 50 + kernel_size: 5 + stride: 1 + engine: MKL2017 + weight_filler { +# type: "xavier" + type: "constant" + value: 0.1 + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "norm1" + type: "LRN" + bottom: "conv2" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + engine: MKL2017 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "norm1" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + engine: MKL2017 + } +} +layer { + name: "ip1" + type: "InnerProduct" + bottom: "pool2" + top: "ip1" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + inner_product_param { + num_output: 500 + weight_filler { +# type: "xavier" + type: "constant" + value: 0.1 + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "ip1" + top: "ip1" + relu_param { + engine: MKL2017 + } +} +layer { + name: "ip2" + type: "InnerProduct" + bottom: "ip1" + top: "ip2" + param { + lr_mult: 1 + } +# param { +# lr_mult: 2 +# } + inner_product_param { + num_output: 10 + weight_filler { +# type: "xavier" + type: "constant" + value: 0.1 + } + bias_term: false +# bias_filler { +# type: "constant" +# } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "ip2" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "ip2" + bottom: "label" + top: "loss" +} diff --git a/examples/mnist/train_lenet.sh b/examples/mnist/train_lenet.sh index f7f9b86198d..a9d37f30ecb 100755 --- a/examples/mnist/train_lenet.sh +++ b/examples/mnist/train_lenet.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train --solver=examples/mnist/lenet_solver.prototxt $@ diff --git a/examples/mnist/train_lenet_adam.sh b/examples/mnist/train_lenet_adam.sh index 7b4e905681b..ffabff3e40b 100755 --- a/examples/mnist/train_lenet_adam.sh +++ b/examples/mnist/train_lenet_adam.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train --solver=examples/mnist/lenet_solver_adam.prototxt $@ diff --git a/examples/mnist/train_lenet_consolidated.sh b/examples/mnist/train_lenet_consolidated.sh index c5f02666822..43ec3fc366a 100755 --- a/examples/mnist/train_lenet_consolidated.sh +++ b/examples/mnist/train_lenet_consolidated.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train \ diff --git a/examples/mnist/train_lenet_docker.sh b/examples/mnist/train_lenet_docker.sh index 32cf1c8e4a3..b546eaf3e13 100755 --- a/examples/mnist/train_lenet_docker.sh +++ b/examples/mnist/train_lenet_docker.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e # The following example allows for the MNIST example (using LeNet) to be # trained using the caffe docker image instead of building from source. diff --git a/examples/mnist/train_lenet_rmsprop.sh b/examples/mnist/train_lenet_rmsprop.sh index adfa7ab0fca..f2058edb76f 100755 --- a/examples/mnist/train_lenet_rmsprop.sh +++ b/examples/mnist/train_lenet_rmsprop.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train \ diff --git a/examples/mnist/train_mnist_autoencoder.sh b/examples/mnist/train_mnist_autoencoder.sh index 724a0f14a49..1b6e14a1665 100755 --- a/examples/mnist/train_mnist_autoencoder.sh +++ b/examples/mnist/train_mnist_autoencoder.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train \ diff --git a/examples/mnist/train_mnist_autoencoder_adadelta.sh b/examples/mnist/train_mnist_autoencoder_adadelta.sh index a660dbb9ed2..f69354cb3b3 100755 --- a/examples/mnist/train_mnist_autoencoder_adadelta.sh +++ b/examples/mnist/train_mnist_autoencoder_adadelta.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train \ diff --git a/examples/mnist/train_mnist_autoencoder_adagrad.sh b/examples/mnist/train_mnist_autoencoder_adagrad.sh index 4c11dfa67ac..bbe34322ed6 100755 --- a/examples/mnist/train_mnist_autoencoder_adagrad.sh +++ b/examples/mnist/train_mnist_autoencoder_adagrad.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train \ diff --git a/examples/mnist/train_mnist_autoencoder_nesterov.sh b/examples/mnist/train_mnist_autoencoder_nesterov.sh index fd0559d2488..cf8bf256f6f 100755 --- a/examples/mnist/train_mnist_autoencoder_nesterov.sh +++ b/examples/mnist/train_mnist_autoencoder_nesterov.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e ./build/tools/caffe train \ diff --git a/examples/pycaffe/caffenet.py b/examples/pycaffe/caffenet.py old mode 100644 new mode 100755 index 82af2294435..42ca6a29979 --- a/examples/pycaffe/caffenet.py +++ b/examples/pycaffe/caffenet.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# from __future__ import print_function from caffe import layers as L, params as P, to_proto from caffe.proto import caffe_pb2 diff --git a/examples/pycaffe/layers/pascal_multilabel_datalayers.py b/examples/pycaffe/layers/pascal_multilabel_datalayers.py old mode 100644 new mode 100755 index 68e4fa7960a..bc6937da29e --- a/examples/pycaffe/layers/pascal_multilabel_datalayers.py +++ b/examples/pycaffe/layers/pascal_multilabel_datalayers.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # imports import json import time diff --git a/examples/pycaffe/layers/pyloss.py b/examples/pycaffe/layers/pyloss.py old mode 100644 new mode 100755 index 6200e6bbc55..0b0c2d09927 --- a/examples/pycaffe/layers/pyloss.py +++ b/examples/pycaffe/layers/pyloss.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import caffe import numpy as np diff --git a/examples/pycaffe/tools.py b/examples/pycaffe/tools.py old mode 100644 new mode 100755 index 88b1834af1e..3d5e67c6f30 --- a/examples/pycaffe/tools.py +++ b/examples/pycaffe/tools.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import numpy as np diff --git a/examples/pycaffe/tune_engine.py b/examples/pycaffe/tune_engine.py new file mode 100755 index 00000000000..9a6fb123a2f --- /dev/null +++ b/examples/pycaffe/tune_engine.py @@ -0,0 +1,83 @@ +import os +import sys +import argparse +from caffe.proto import caffe_pb2 +import google.protobuf.text_format as txtf +import utils + +def selectOptimalEngine(layers): + optimal_layer = None + min_time = sys.float_info.max + for layer in layers: + if layer[2] < min_time: + min_time = layer[2] + optimal_layer = layer + + return optimal_layer + +def tuneEngine(logs, model): + if len(logs) <= 1: + print "[ERROR] Please specify two or more log files" + exit(1) + + for log in logs: + if not os.path.exists(log): + print "[ERROR] Please specify valid log file:", log + exit(1) + + layer_map = {} + net = None + for log in logs: + log_name = os.path.basename(log) + (model_str, time_lines) = utils.parseLog(log) + (net, layer_model_map) = utils.parseModelStr(model_str) + layer_time_map = utils.parseTimeLines(time_lines) + for k, v in layer_model_map.items(): + if k not in layer_map.keys(): + layer_map[k] = [(v[0], v[1], layer_time_map[k], v[2])] + else: + layer_map_v = layer_map[k] + layer_map_v.append((v[0], v[1], layer_time_map[k], v[2])) + layer_map[k] = layer_map_v + + optimal_layer_map = {} + for k, v in layer_map.items(): + optimal_layer = selectOptimalEngine(v) + assert(optimal_layer != None) + optimal_layer_map[optimal_layer[0]] = optimal_layer[3] + + genModel(net, model, optimal_layer_map) + +def genModel(net, model, optimal_layer_map): + net_str = "" + net_str += "name: \"" + net.name + "\"\n" + for index in range(0, len(net.layer)): + net_str += "layer {\n" + l = net.layer[index] + if l.type.endswith("Data"): + net_str += str(l) + "\n}\n" + continue + l = optimal_layer_map[index] + net_str += str(l) + "\n}\n" + with open(model, 'w') as f: + net = caffe_pb2.NetParameter() + txtf.Merge(net_str, net) + f.write(str(net)) + print "[INFO] Complete model engine tuning:", model + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + parser.add_argument('-l', '--logs', nargs='+', help='require the caffe time logs', required=True) + + parser.add_argument('-o', '--output', action='store', dest='output', default="", + help='require the model output') + + parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') + + params = parser.parse_args() + if params.output == "": + print "Please specify the output for tuned model with -o" + sys.exit(1) + + tuneEngine(params.logs, params.output) diff --git a/examples/pycaffe/tune_model.py b/examples/pycaffe/tune_model.py new file mode 100644 index 00000000000..8305b081b35 --- /dev/null +++ b/examples/pycaffe/tune_model.py @@ -0,0 +1,95 @@ +import os +import sys +import argparse +from caffe.proto import caffe_pb2 +import google.protobuf.text_format as txtf +import copy +import utils + +def genOptimalModel(net, mkldnn_direct_time_map, mkldnn_winograd_time_map, optimal_model): + for index in range(0, len(net.layer)): + l = net.layer[index] + if l.type == "Convolution": + if mkldnn_winograd_time_map[l.name] < mkldnn_direct_time_map[l.name]: + l.convolution_param.conv_algorithm = "winograd" + else: + l.convolution_param.conv_algorithm = "direct" + + with open(optimal_model, "w") as f: + f.write(txtf.MessageToString(net, float_format=".17g")) + +def tuneModelDefinition(model_path, iteration): + working_dir = sys.path[0] + caffe_path = os.path.join(working_dir, "..", "..", "build", "tools", "caffe") + if not os.path.exists(caffe_path): + print "Caffe binary does not exist; please build Caffe binary first." + sys,exit(1) + + base_model_name = os.path.basename(model_path) + model_dir = os.path.dirname(model_path) + winograd_model_name = base_model_name.split(".")[0] + "_winograd.prototxt" + winograd_model_path = os.path.join(model_dir, winograd_model_name) + direct_model_name = base_model_name.split(".")[0] + "_direct.prototxt" + direct_model_path = os.path.join(model_dir, direct_model_name) + + base_net = caffe_pb2.NetParameter() + with open(model_path) as f: + s = f.read() + txtf.Merge(s, base_net) + + direct_net = copy.deepcopy(base_net) + for index in range(0, len(direct_net.layer)): + l = direct_net.layer[index] + if l.type == "Convolution": + l.convolution_param.conv_algorithm = "direct" + + with open(direct_model_path, "w") as f: + f.write(txtf.MessageToString(direct_net, float_format=".17g")) + + winograd_net = copy.deepcopy(base_net) + for index in range(0, len(winograd_net.layer)): + l = winograd_net.layer[index] + if l.type == "Convolution": + l.convolution_param.conv_algorithm = "winograd" + + with open(winograd_model_path, "w") as f: + f.write(txtf.MessageToString(winograd_net, float_format=".17g")) + + mkldnn_direct_log = "mkldnn_direct.log" + mkldnn_winograd_log = "mkldnn_winograd.log" + mkldnn_direct_log_path = os.path.join(model_dir, mkldnn_direct_log) + mkldnn_winograd_log_path = os.path.join(model_dir, mkldnn_winograd_log) + + mkldnn_direct_command = caffe_path + " time -model " + direct_model_path + " -engine MKLDNN -iterations " + str(iteration) + " >& " + mkldnn_direct_log_path + os.system(mkldnn_direct_command) + mkldnn_winograd_command = caffe_path + " time -model " + winograd_model_path + " -engine MKLDNN -iterations " + str(iteration) + " >& " + mkldnn_winograd_log_path + os.system(mkldnn_winograd_command) + + (model_str, mkldnn_direct_time_lines) = utils.parseLog(mkldnn_direct_log_path) + mkldnn_direct_layer_time_map = utils.parseTimeLines(mkldnn_direct_time_lines) + (model_str, mkldnn_winograd_time_lines) = utils.parseLog(mkldnn_winograd_log_path) + mkldnn_winograd_layer_time_map = utils.parseTimeLines(mkldnn_winograd_time_lines) + + hybrid_model_name = base_model_name.split(".")[0] + "_hybrid.prototxt" + hybrid_model_path = os.path.join(model_dir, hybrid_model_name) + genOptimalModel(base_net, mkldnn_direct_layer_time_map, mkldnn_winograd_layer_time_map, hybrid_model_path) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + + parser.add_argument('-m', '--model', action='store', dest='model', default="", + help='require the model definition (prototxt)') + + parser.add_argument('-i', '--iteration', action='store', dest='iterations', type=int, default=10, + help='require iterations number to run the model') + + parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') + + params = parser.parse_args() + + model = params.model + if not os.path.exists(params.model): + print "[ERROR] Please specify the model definition file with -m" + exit(1) + + tuneModelDefinition(params.model, params.iterations) diff --git a/examples/pycaffe/utils.py b/examples/pycaffe/utils.py new file mode 100755 index 00000000000..91c32bba310 --- /dev/null +++ b/examples/pycaffe/utils.py @@ -0,0 +1,114 @@ +import os +import sys +from caffe.proto import caffe_pb2 +import google.protobuf.text_format as txtf + +def readFile(filePath): + lines = [] + file = open(filePath, 'r') + for line in file.readlines(): + lines.append(line) + file.close() + + return lines + +def writeFile(filePath, lines): + file = open(filePath, 'w+') + file.write(lines) + file.close() + +def parseLog(log): + lines = readFile(log) + model_start = False + time_start = False + model_lines = [] + time_lines = [] + for line in lines: + trim_line = line.strip() + if trim_line.endswith("Initializing net from parameters:"): + model_start = True + continue + if model_start: + if trim_line.find("Creating layer") <> -1: + model_start = False + continue + model_lines.append(line) + + if trim_line.endswith("Average time per layer:"): + time_start = True + continue + if time_start: + if trim_line.find("Average Forward pass") <> -1: + time_start = False + break + time_lines.append(line) + + model_lines = model_lines[1:] + model_str = "" + for line in model_lines: + model_str = model_str + line + + return (model_str, time_lines) + +def parseTimeLines(timeLines): + layer_map = {} + for line in timeLines: + trim_line = line.strip() + items = trim_line.split("\t") + layer_items = items[0].split(" ") + layer_name = layer_items[-1] + time_items = items[1].split(" ") + if layer_name not in layer_map.keys(): + layer_map[layer_name] = (float)(time_items[1]) + else: + layer_map[layer_name] = layer_map[layer_name] + (float)(time_items[1]) + + return layer_map + +def parseModelStr(modelStr): + net = caffe_pb2.NetParameter() + txtf.Merge(modelStr, net) + layer_model_map = {} + global_engine = "CAFFE" + if net.engine != "": + global_engine = net.engine + for index in range(0, len(net.layer)): + engine = global_engine + l = net.layer[index] + if l.engine != "": + engine = l.engine + param_engine = -1 + if l.type == "Convolution" or l.type == "Deconvolution": + if l.convolution_param.engine != "": + param_engine = l.convolution_param.engine + elif l.type == "BatchNorm": + if l.batch_norm_param.engine != "": + param_engine = l.batch_norm_param.engine + elif l.type == "Concat": + if l.concat_param.engine != "": + param_engine = l.concat_param.engine + elif l.type == "Eltwise": + if l.eltwise_param.engine != "": + param_engine = l.eltwise_param.engine + elif l.type == "InnerProduct": + if l.inner_product_param.engine != "": + param_engine = l.inner_product_param.engine + elif l.type == "LRN": + if l.lrn_param.engine != "": + param_engine = l.lrn_param.engine + elif l.type == "Pooling": + if l.pooling_param.engine != "": + param_engine = l.pooling_param.engine + elif l.type == "ReLU": + if l.relu_param.engine != "": + param_engine = l.relu_param.engine + + if param_engine == 0 or param_engine == 1: + engine = "CAFFE" + elif param_engine == 3: + engine = "MKL2017" + elif param_engine == 4: + engine = "MKLDNN" + layer_model_map[l.name] = (index, engine, l) + + return (net, layer_model_map) diff --git a/examples/siamese/convert_mnist_siamese_data.cpp b/examples/siamese/convert_mnist_siamese_data.cpp index 928b3fbf4d5..7a983e13582 100644 --- a/examples/siamese/convert_mnist_siamese_data.cpp +++ b/examples/siamese/convert_mnist_siamese_data.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // // This script converts the MNIST dataset to the leveldb format used // by caffe to train siamese network. diff --git a/examples/siamese/create_mnist_siamese.sh b/examples/siamese/create_mnist_siamese.sh index 03adce54d9b..46ba09bfb61 100755 --- a/examples/siamese/create_mnist_siamese.sh +++ b/examples/siamese/create_mnist_siamese.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # This script converts the mnist data into leveldb format. set -e diff --git a/examples/siamese/train_mnist_siamese.sh b/examples/siamese/train_mnist_siamese.sh index e01ac2ceefd..678da181cb4 100755 --- a/examples/siamese/train_mnist_siamese.sh +++ b/examples/siamese/train_mnist_siamese.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# set -e TOOLS=./build/tools diff --git a/examples/ssd/images.txt b/examples/ssd/images.txt new file mode 100644 index 00000000000..196f3f23754 --- /dev/null +++ b/examples/ssd/images.txt @@ -0,0 +1 @@ +examples/images/fish-bike.jpg diff --git a/examples/ssd/ssd_detect.cpp b/examples/ssd/ssd_detect.cpp new file mode 100644 index 00000000000..cc0ec5ba9a2 --- /dev/null +++ b/examples/ssd/ssd_detect.cpp @@ -0,0 +1,395 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +// This is a demo code for using a SSD model to do detection. +// The code is modified from examples/cpp_classification/classification.cpp. +// Usage: +// ssd_detect [FLAGS] model_file weights_file list_file +// +// where model_file is the .prototxt file defining the network architecture, and +// weights_file is the .caffemodel file containing the network parameters, and +// list_file contains a list of image files with the format as follows: +// folder/img1.JPEG +// folder/img2.JPEG +// list_file can also contain a list of video files with the format as follows: +// folder/video1.mp4 +// folder/video2.mp4 +// +#include +#ifdef USE_OPENCV +#include +#include +#include +#endif // USE_OPENCV +#include +#include +#include +#include +#include +#include +#include + +#ifdef USE_OPENCV +using namespace caffe; // NOLINT(build/namespaces) + +class Detector { + public: + Detector(const string& model_file, + const string& weights_file, + const string& mean_file, + const string& mean_value); + + std::vector > Detect(const cv::Mat& img); + + private: + void SetMean(const string& mean_file, const string& mean_value); + + void WrapInputLayer(std::vector* input_channels); + + void Preprocess(const cv::Mat& img, + std::vector* input_channels); + + private: + shared_ptr > net_; + cv::Size input_geometry_; + int num_channels_; + cv::Mat mean_; +}; + +Detector::Detector(const string& model_file, + const string& weights_file, + const string& mean_file, + const string& mean_value) { +#ifdef CPU_ONLY + Caffe::set_mode(Caffe::CPU); +#else + Caffe::set_mode(Caffe::GPU); +#endif + + /* Load the network. */ + net_.reset(new Net(model_file, TEST)); + net_->CopyTrainedLayersFrom(weights_file); + + CHECK_EQ(net_->num_inputs(), 1) << "Network should have exactly one input."; + CHECK_EQ(net_->num_outputs(), 1) << "Network should have exactly one output."; + + Blob* input_layer = net_->input_blobs()[0]; + num_channels_ = input_layer->channels(); + CHECK(num_channels_ == 3 || num_channels_ == 1) + << "Input layer should have 1 or 3 channels."; + input_geometry_ = cv::Size(input_layer->width(), input_layer->height()); + + /* Load the binaryproto mean file. */ + SetMean(mean_file, mean_value); +} + +std::vector > Detector::Detect(const cv::Mat& img) { + Blob* input_layer = net_->input_blobs()[0]; + input_layer->Reshape(1, num_channels_, + input_geometry_.height, input_geometry_.width); + /* Forward dimension change to all layers. */ + net_->Reshape(); + + std::vector input_channels; + WrapInputLayer(&input_channels); + + Preprocess(img, &input_channels); + + net_->Forward(); + + /* Copy the output layer to a std::vector */ + Blob* result_blob = net_->output_blobs()[0]; + const float* result = result_blob->cpu_data(); + const int num_det = result_blob->height(); + vector > detections; + for (int k = 0; k < num_det; ++k) { + if (result[0] == -1) { + // Skip invalid detection. + result += 7; + continue; + } + vector detection(result, result + 7); + detections.push_back(detection); + result += 7; + } + return detections; +} + +/* Load the mean file in binaryproto format. */ +void Detector::SetMean(const string& mean_file, const string& mean_value) { + cv::Scalar channel_mean; + if (!mean_file.empty()) { + CHECK(mean_value.empty()) << + "Cannot specify mean_file and mean_value at the same time"; + BlobProto blob_proto; + ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); + + /* Convert from BlobProto to Blob */ + Blob mean_blob; + mean_blob.FromProto(blob_proto); + CHECK_EQ(mean_blob.channels(), num_channels_) + << "Number of channels of mean file doesn't match input layer."; + + /* The format of the mean file is planar 32-bit float BGR or grayscale. */ + std::vector channels; + float* data = mean_blob.mutable_cpu_data(); + for (int i = 0; i < num_channels_; ++i) { + /* Extract an individual channel. */ + cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data); + channels.push_back(channel); + data += mean_blob.height() * mean_blob.width(); + } + + /* Merge the separate channels into a single image. */ + cv::Mat mean; + cv::merge(channels, mean); + + /* Compute the global mean pixel value and create a mean image + * filled with this value. */ + channel_mean = cv::mean(mean); + mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean); + } + if (!mean_value.empty()) { + CHECK(mean_file.empty()) << + "Cannot specify mean_file and mean_value at the same time"; + stringstream ss(mean_value); + vector values; + string item; + while (getline(ss, item, ',')) { + float value = std::atof(item.c_str()); + values.push_back(value); + } + CHECK(values.size() == 1 || values.size() == num_channels_) << + "Specify either 1 mean_value or as many as channels: " << num_channels_; + + std::vector channels; + for (int i = 0; i < num_channels_; ++i) { + /* Extract an individual channel. */ + cv::Mat channel(input_geometry_.height, input_geometry_.width, CV_32FC1, + cv::Scalar(values[i])); + channels.push_back(channel); + } + cv::merge(channels, mean_); + } +} + +/* Wrap the input layer of the network in separate cv::Mat objects + * (one per channel). This way we save one memcpy operation and we + * don't need to rely on cudaMemcpy2D. The last preprocessing + * operation will write the separate channels directly to the input + * layer. */ +void Detector::WrapInputLayer(std::vector* input_channels) { + Blob* input_layer = net_->input_blobs()[0]; + + int width = input_layer->width(); + int height = input_layer->height(); + float* input_data = input_layer->mutable_cpu_data(); + for (int i = 0; i < input_layer->channels(); ++i) { + cv::Mat channel(height, width, CV_32FC1, input_data); + input_channels->push_back(channel); + input_data += width * height; + } +} + +void Detector::Preprocess(const cv::Mat& img, + std::vector* input_channels) { + /* Convert the input image to the input image format of the network. */ + cv::Mat sample; + if (img.channels() == 3 && num_channels_ == 1) + cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY); + else if (img.channels() == 4 && num_channels_ == 1) + cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY); + else if (img.channels() == 4 && num_channels_ == 3) + cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR); + else if (img.channels() == 1 && num_channels_ == 3) + cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR); + else + sample = img; + + cv::Mat sample_resized; + if (sample.size() != input_geometry_) + cv::resize(sample, sample_resized, input_geometry_); + else + sample_resized = sample; + + cv::Mat sample_float; + if (num_channels_ == 3) + sample_resized.convertTo(sample_float, CV_32FC3); + else + sample_resized.convertTo(sample_float, CV_32FC1); + + cv::Mat sample_normalized; + cv::subtract(sample_float, mean_, sample_normalized); + + /* This operation will write the separate BGR planes directly to the + * input layer of the network because it is wrapped by the cv::Mat + * objects in input_channels. */ + cv::split(sample_normalized, *input_channels); + + CHECK(reinterpret_cast(input_channels->at(0).data) + == net_->input_blobs()[0]->cpu_data()) + << "Input channels are not wrapping the input layer of the network."; +} + +DEFINE_string(mean_file, "", + "The mean file used to subtract from the input image."); +DEFINE_string(mean_value, "104,117,123", + "If specified, can be one value or can be same as image channels" + " - would subtract from the corresponding channel). Separated by ','." + "Either mean_file or mean_value should be provided, not both."); +DEFINE_string(file_type, "image", + "The file type in the list_file. Currently support image and video."); +DEFINE_string(out_file, "", + "If provided, store the detection results in the out_file."); +DEFINE_double(confidence_threshold, 0.01, + "Only store detections with score higher than the threshold."); + +int main(int argc, char** argv) { + ::google::InitGoogleLogging(argv[0]); + // Print output to stderr (while still logging) + FLAGS_alsologtostderr = 1; + +#ifndef GFLAGS_GFLAGS_H_ + namespace gflags = google; +#endif + + gflags::SetUsageMessage("Do detection using SSD mode.\n" + "Usage:\n" + " ssd_detect [FLAGS] model_file weights_file list_file\n"); + gflags::ParseCommandLineFlags(&argc, &argv, true); + + if (argc < 4) { + gflags::ShowUsageWithFlagsRestrict(argv[0], "examples/ssd/ssd_detect"); + return 1; + } + + const string& model_file = argv[1]; + const string& weights_file = argv[2]; + const string& mean_file = FLAGS_mean_file; + const string& mean_value = FLAGS_mean_value; + const string& file_type = FLAGS_file_type; + const string& out_file = FLAGS_out_file; + const float confidence_threshold = FLAGS_confidence_threshold; + + // Initialize the network. + Detector detector(model_file, weights_file, mean_file, mean_value); + + // Set the output mode. + std::streambuf* buf = std::cout.rdbuf(); + std::ofstream outfile; + if (!out_file.empty()) { + outfile.open(out_file.c_str()); + if (outfile.good()) { + buf = outfile.rdbuf(); + } + } + std::ostream out(buf); + + // Process image one by one. + std::ifstream infile(argv[3]); + std::string file; + while (infile >> file) { + if (file_type == "image") { + cv::Mat img = cv::imread(file, -1); + CHECK(!img.empty()) << "Unable to decode image " << file; + std::vector > detections = detector.Detect(img); + + /* Print the detection results. */ + for (int i = 0; i < detections.size(); ++i) { + const vector& d = detections[i]; + // Detection format: [image_id, label, score, xmin, ymin, xmax, ymax]. + CHECK_EQ(d.size(), 7); + const float score = d[2]; + if (score >= confidence_threshold) { + out << file << " "; + out << static_cast(d[1]) << " "; + out << score << " "; + out << static_cast(d[3] * img.cols) << " "; + out << static_cast(d[4] * img.rows) << " "; + out << static_cast(d[5] * img.cols) << " "; + out << static_cast(d[6] * img.rows) << std::endl; + } + } + } else if (file_type == "video") { + cv::VideoCapture cap(file); + if (!cap.isOpened()) { + LOG(FATAL) << "Failed to open video: " << file; + } + cv::Mat img; + int frame_count = 0; + while (true) { + bool success = cap.read(img); + if (!success) { + LOG(INFO) << "Process " << frame_count << " frames from " << file; + break; + } + CHECK(!img.empty()) << "Error when read frame"; + std::vector > detections = detector.Detect(img); + + /* Print the detection results. */ + for (int i = 0; i < detections.size(); ++i) { + const vector& d = detections[i]; + // Detection format: [image_id, label, score, xmin, ymin, xmax, ymax]. + CHECK_EQ(d.size(), 7); + const float score = d[2]; + if (score >= confidence_threshold) { + out << file << "_"; + out << std::setfill('0') << std::setw(6) << frame_count << " "; + out << static_cast(d[1]) << " "; + out << score << " "; + out << static_cast(d[3] * img.cols) << " "; + out << static_cast(d[4] * img.rows) << " "; + out << static_cast(d[5] * img.cols) << " "; + out << static_cast(d[6] * img.rows) << std::endl; + } + } + ++frame_count; + } + if (cap.isOpened()) { + cap.release(); + } + } else { + LOG(FATAL) << "Unknown file_type: " << file_type; + } + } + return 0; +} +#else +int main(int argc, char** argv) { + LOG(FATAL) << "This example requires OpenCV; compile with USE_OPENCV."; +} +#endif // USE_OPENCV diff --git a/examples/ssd/ssdvars.sh b/examples/ssd/ssdvars.sh new file mode 100755 index 00000000000..7bc53589a3b --- /dev/null +++ b/examples/ssd/ssdvars.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +export CAFFE_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"/../.. +export PYTHONPATH=$CAFFE_ROOT/"python" + +# datapath is where you wish to store generated lmdb +# also in $DATAPATH/data directory you should have unpacked +# VOCdevkit and/or coco directories. see more in ./data/coco/README.md +# this variable is used in create_list and create_data scripts only. +export DATAPATH="/home/data/ssd/" + +echo CAFFE_ROOT is $CAFFE_ROOT +echo PYTHONPATH is $PYTHONPATH +echo DATAPATH is $DATAPATH diff --git a/examples/test_blobs/ImageDataLayerTest_TestRead_d.blob b/examples/test_blobs/ImageDataLayerTest_TestRead_d.blob new file mode 100644 index 00000000000..331bcbec550 Binary files /dev/null and b/examples/test_blobs/ImageDataLayerTest_TestRead_d.blob differ diff --git a/examples/test_blobs/ImageDataLayerTest_TestRead_f.blob b/examples/test_blobs/ImageDataLayerTest_TestRead_f.blob new file mode 100644 index 00000000000..16ee2484130 Binary files /dev/null and b/examples/test_blobs/ImageDataLayerTest_TestRead_f.blob differ diff --git a/examples/test_blobs/ImageDataLayerTest_TestResize_d.blob b/examples/test_blobs/ImageDataLayerTest_TestResize_d.blob new file mode 100644 index 00000000000..ff6459e24b7 Binary files /dev/null and b/examples/test_blobs/ImageDataLayerTest_TestResize_d.blob differ diff --git a/examples/test_blobs/ImageDataLayerTest_TestResize_f.blob b/examples/test_blobs/ImageDataLayerTest_TestResize_f.blob new file mode 100644 index 00000000000..9330be011e3 Binary files /dev/null and b/examples/test_blobs/ImageDataLayerTest_TestResize_f.blob differ diff --git a/examples/test_blobs/ImageDataLayerTest_TestShuffle_d.blob b/examples/test_blobs/ImageDataLayerTest_TestShuffle_d.blob new file mode 100644 index 00000000000..331bcbec550 Binary files /dev/null and b/examples/test_blobs/ImageDataLayerTest_TestShuffle_d.blob differ diff --git a/examples/test_blobs/ImageDataLayerTest_TestShuffle_f.blob b/examples/test_blobs/ImageDataLayerTest_TestShuffle_f.blob new file mode 100644 index 00000000000..16ee2484130 Binary files /dev/null and b/examples/test_blobs/ImageDataLayerTest_TestShuffle_f.blob differ diff --git a/examples/web_demo/app.py b/examples/web_demo/app.py old mode 100644 new mode 100755 index 09411f33f10..08eff944177 --- a/examples/web_demo/app.py +++ b/examples/web_demo/app.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import os import time import cPickle diff --git a/examples/web_demo/exifutil.py b/examples/web_demo/exifutil.py old mode 100644 new mode 100755 index 01918b2a491..c6f1cc250f6 --- a/examples/web_demo/exifutil.py +++ b/examples/web_demo/exifutil.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ This script handles the skimage exif problem. """ diff --git a/external/mkl/prepare_mkl.sh b/external/mkl/prepare_mkl.sh new file mode 100755 index 00000000000..b68bc7aec19 --- /dev/null +++ b/external/mkl/prepare_mkl.sh @@ -0,0 +1,114 @@ +#!/bin/sh +# set -ex +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +FindLibrary() +{ +# Find all the instances of the MKL libraries present in Caffe + MKL_LIBS=`find $1 -name libmklml_intel.so` + +# Sort libraries based on build date in $MKL/include/mkl_version.h. +# Cut out everything but build date tagged with __INTEL_MKL_BUILD_DATE. +# The format of sorted lines is: "build_date:path_to_mkl_lib/include/mkl_version.h". +# Sort lines based on the first column (build_date), in reversed order (the recent on the top). +# Cut out include/mkl_version.h. + RECENT_VERSION=`echo "$MKL_LIBS" \ + | sed -e 's/lib.*$/include\/mkl_version.h/' \ + | xargs grep __INTEL_MKL_BUILD_DATE /dev/null \ + | sed -e 's/\(.*\):.* \([0-9]*\)/\2:\1/' \ + | sort -rk1 -t: | head -n1 | cut -d ":" -f 2- \ + | sed -e 's/include\/mkl_version.h//'` + +# Find once again libmklml_intel.so to obtain the path with most recent MKL lib. +# TODO: obtain path from MKL_LIBS. + RECENT_MKL=`find $RECENT_VERSION -name libmklml_intel.so` + LOCALMKL=$RECENT_MKL +} + +GetVersionName() +{ +VERSION_LINE=0 +if [ $1 ]; then + VERSION_LINE=`grep __INTEL_MKL_BUILD_DATE $1/include/mkl_version.h 2>/dev/null | sed -e 's/.* //'` +fi +if [ -z $VERSION_LINE ]; then + VERSION_LINE=0 +fi +echo $VERSION_LINE # Return Version Line +} + +# MKL +DST=`dirname $0` +OMP=0 +VERSION_MATCH=20170425 +ARCHIVE_BASENAME=mklml_lnx_2018.0.20170425.tgz +MKL_CONTENT_DIR=`echo $ARCHIVE_BASENAME | rev | cut -d "." -f 2- | rev` +GITHUB_RELEASE_TAG=1.0.0 + +MKLURL="https://github.com/intel/caffe/releases/download/$GITHUB_RELEASE_TAG/$ARCHIVE_BASENAME" +# there are diffrent MKL lib to be used for GCC and for ICC +reg='^[0-9]+$' +VERSION_LINE=`GetVersionName $MKLROOT` +# Check if MKLROOT is set if positive then set one will be used.. +if [ -z $MKLROOT ] || [ $VERSION_LINE -lt $VERSION_MATCH ]; then + # ..if MKLROOT is not set then check if we have MKL downloaded in proper version + VERSION_LINE=`GetVersionName $DST/$MKL_CONTENT_DIR` + if [ $VERSION_LINE -lt $VERSION_MATCH ] ; then + #...If it is not then downloaded and unpacked + wget --no-check-certificate -P $DST $MKLURL -O $DST/$ARCHIVE_BASENAME + tar -xzf $DST/$ARCHIVE_BASENAME -C $DST + fi + FindLibrary $DST + MKLROOT=$PWD/`echo $LOCALMKL | sed -e 's/lib.*$//'` +fi +# Check what MKL lib we have in MKLROOT +if [ -z `find $MKLROOT -name libmkl_rt.so -print -quit` ]; then +# mkl_rt has not been found; we are dealing with MKLML + + if [ -z $LOCALMKL ] ; then +# LOCALMKL is not set, when MKLROOT was set manually and it points to MKLML in correct version + FindLibrary $MKLROOT + fi + + LIBRARIES=`basename $LOCALMKL | sed -e 's/^.*lib//' | sed -e 's/\.so.*$//'` + OMP=1 +else + LIBRARIES="mkl_rt" +fi + +# return value to calling script (Makefile,cmake) +echo $MKLROOT $LIBRARIES $OMP diff --git a/external/mlsl/prepare_mlsl.sh b/external/mlsl/prepare_mlsl.sh new file mode 100755 index 00000000000..0d56d720077 --- /dev/null +++ b/external/mlsl/prepare_mlsl.sh @@ -0,0 +1,136 @@ +#!/bin/sh +# set -ex +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# This script is used to prepare the Intel® Machine Learning Scaling Library +# +FindLibrary() +{ +# Find all the instances of the MKL libraries present in Caffe + MLSL_LIBS=`find $1 -name libmlsl.so` + #echo "[Debug][FindLibrary function] MLSL_LIBS: $MLSL_LIBS" + + LOCALMLSL=$MLSL_LIBS + #echo "[Debug][FindLibrary function] LOCALMLSL: $LOCALMLSL" +} + +GetVersionName() +{ +VERSION_LINE=0 +if [ $1 ]; then + RAW_VERSION_LINE=`echo $1 | rev | cut -d "_" -f -1 | rev` + VERSION_LINE=`echo $RAW_VERSION_LINE | sed 's/\.//g'` +fi +if [ -z $VERSION_LINE ]; then + VERSION_LINE=0 +fi +if [ -z "$(echo $VERSION_LINE | sed -n "/^[0-9]\+$/p")" ]; then + #echo "[Debug] VERSION_LINE value contains other string or flags, not only numbers" + VERSION_LINE=0 +fi +echo $VERSION_LINE # Return Version Line +} + +# Clean up the previous MLSL version +CleanUpPreviousMLSL2017_0_014() +{ +OLD_ARCHIVE_TARGZ=files.tar.gz +OLD_INSTALL_SHELL=install.sh +OLD_ARCHIVE_BASENAME=l_mlsl_p_2017.0.014.tgz +OLD_ARCHIVE_INSTALL_FOLDERNAME=l_mlsl_p_2017.0.014 +if [ -f $ABS_DST/$OLD_ARCHIVE_TARGZ ]; then + rm $ABS_DST/$OLD_ARCHIVE_TARGZ + #echo "[Debug] Delete old files.tar.gz!" +fi +if [ -f $ABS_DST/$OLD_INSTALL_SHELL ]; then + rm $ABS_DST/$OLD_INSTALL_SHELL + #echo "[Debug] Delete old install.sh file!" +fi +if [ -f $ABS_DST/$OLD_ARCHIVE_BASENAME ]; then + rm $ABS_DST/$OLD_ARCHIVE_BASENAME + #echo "[Debug] Delete old l_mlsl_p_2017.0.014.tgz file!" +fi +if [ -d $ABS_DST/$OLD_ARCHIVE_INSTALL_FOLDERNAME ]; then + rm -rf $ABS_DST/$OLD_ARCHIVE_INSTALL_FOLDERNAME + #echo "[Debug] Delete old l_mlsl_p_2017.0.014 folder!" +fi +} + +# MLSL +DST=`dirname $0` +#echo "[Debug] dirname: $0" +#echo "[Debug] DST value: $DST" +ABS_DST=`readlink -f $DST` +#echo "[Debug] ABS_DST value: $ABS_DST" + +if [ -z $MLSL_ROOT ]; then + CleanUpPreviousMLSL2017_0_014 +fi + +VERSION_MATCH=20171016 +ARCHIVE_BASENAME=l_mlsl_2017.1.016.tgz +ARCHIVE_INSTALL_FOLDERNAME=l_mlsl_2017.1.016 +#because the l_mlsl_2017.1.016.tgz will unpacked files.tar.gz and install.sh to the ARCHIVE_INSTALL_FOLDERNAME +#not unpacked to the DST folder (Different behavior against l_mlsl_p_2017.0.014.tgz) +ARCHIVE_INSTALL_FOLDERNAME_TEMP=l_mlsl_2017.1.016_temp +MLSL_CONTENT_DIR=`echo $ARCHIVE_BASENAME | rev | cut -d "." -f 2- | rev` +#echo "[Debug] MLSL_CONTENT_DIR value: $MLSL_CONTENT_DIR" +GITHUB_RELEASE_TAG=v2017.1-Preview + +MLSLURL="https://github.com/01org/MLSL/releases/download/$GITHUB_RELEASE_TAG/$ARCHIVE_BASENAME" +#echo "[Debug] MLSL_ROOT value: $MLSL_ROOT" +VERSION_LINE=`GetVersionName $MLSL_ROOT` +#echo "[Debug] VERSION_LINE value: $VERSION_LINE" +# Check if MLSL_ROOT is set if positive then set one will be used.. +if [ -z $MLSL_ROOT ] || [ $VERSION_LINE -lt $VERSION_MATCH ]; then + # ..if MLSL_ROOT is not set then check if we have MLSL unpacked and installed in proper version + FindLibrary $DST + #echo "[Debug] LOCALMLSL value inside if: $LOCALMLSL" + if [ $LOCALMLSL ]; then + #in order to return value to calling script (Makefile,cmake), cannot print other info + #echo "[Debug] Some verison of MLSL is unpacked and installed" + MLSL_PREVIOUS_CONTENT_DIR=`echo $LOCALMLSL | rev | cut -d "/" -f 4- | cut -d "/" -f -1 | rev` + #echo "[Debug] MLSL_PREVIOUS_CONTENT_DIR value: $MLSL_PREVIOUS_CONTENT_DIR" + #echo "[Debug] DST/MLSL_PREVIOUS_CONTENT_DIR value: $DST/$MLSL_PREVIOUS_CONTENT_DIR" + VERSION_LINE=`GetVersionName $DST/$MLSL_PREVIOUS_CONTENT_DIR` + fi + #echo "[Debug] VERSION_LINE value inside if: $VERSION_LINE" + + #if MLSL_ROOT is not set + if [ -z $MLSL_ROOT ]; then + #if version is not given, or the version is lower than expected version + if [ $VERSION_LINE -lt $VERSION_MATCH ]; then + #Then downloaded, unpacked and installed + wget --no-check-certificate -P $DST $MLSLURL -O $DST/$ARCHIVE_BASENAME + if [ ! -d $DST/$ARCHIVE_INSTALL_FOLDERNAME_TEMP ]; then + mkdir $DST/$ARCHIVE_INSTALL_FOLDERNAME_TEMP + #echo "[Debug] Create l_mlsl_2017.1.016_temp folder for unpacking!" + fi + tar -xzf $DST/$ARCHIVE_BASENAME -C $DST/$ARCHIVE_INSTALL_FOLDERNAME_TEMP + #echo "[Debug] PWD value: $PWD" + #install.sh did not support the relative path as the parameter + bash $DST/$ARCHIVE_INSTALL_FOLDERNAME_TEMP/$ARCHIVE_INSTALL_FOLDERNAME/install.sh -s -d $ABS_DST/$ARCHIVE_INSTALL_FOLDERNAME + rm -rf $DST/$ARCHIVE_INSTALL_FOLDERNAME_TEMP + #echo "[Debug] Remove l_mlsl_2017.1.016_temp folder for unpacking!" + fi + #else: version is just our expected version, no need to donload again, but need to set the MLSL_ROOT + #do not change the value of MLSL_ROOT if MLSL_ROOT is set, but version is not given + FindLibrary $DST + #echo "[Debug] LOCALMLSL value: $LOCALMLSL" + #echo "[Debug] PWD value: $PWD" + MLSL_ROOT=$PWD/`echo $LOCALMLSL | sed -e 's/intel64.*$//'` + else + #if MLSL_ROOT is set, but version is not given, or the version is lower than expected version + #not to download our own version, and just use mlsl as the return value of LIBRARIES + LIBRARIES="mlsl" + fi + #echo "[Debug] MLSL_ROOT value: $MLSL_ROOT" +fi + +#The simplest implementation of LIBRARIES return value +LIBRARIES="mlsl" +#echo "[Debug] LIBRARIES value: $LIBRARIES" + +# return value to calling script (Makefile,cmake) +echo $MLSL_ROOT $LIBRARIES diff --git a/include/caffe/blob.hpp b/include/caffe/blob.hpp index af360ac24bd..3295f7ab15a 100644 --- a/include/caffe/blob.hpp +++ b/include/caffe/blob.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_BLOB_HPP_ #define CAFFE_BLOB_HPP_ @@ -218,7 +255,11 @@ class Blob { const Dtype* cpu_data() const; void set_cpu_data(Dtype* data); + void set_cpu_diff(Dtype* diff); + +#ifndef CPU_ONLY const int* gpu_shape() const; +#endif const Dtype* gpu_data() const; const Dtype* cpu_diff() const; const Dtype* gpu_diff() const; @@ -226,6 +267,25 @@ class Blob { Dtype* mutable_gpu_data(); Dtype* mutable_cpu_diff(); Dtype* mutable_gpu_diff(); + + size_t prv_data_count() const { + CHECK(data_); return data_->prv_descriptor_->prv_count();} + size_t prv_diff_count() const { + CHECK(diff_); return diff_->prv_descriptor_->prv_count();} + + const Dtype* prv_data() const; + const Dtype* prv_diff() const; + Dtype* mutable_prv_data(); + Dtype* mutable_prv_diff(); + + void set_prv_data_descriptor(shared_ptr descriptor, + bool same_data = false); + void set_prv_diff_descriptor(shared_ptr descriptor, + bool same_data = false); + + shared_ptr get_prv_data_descriptor(); + shared_ptr get_prv_diff_descriptor(); + void Update(); void FromProto(const BlobProto& proto, bool reshape = true); void ToProto(BlobProto* proto, bool write_diff = false) const; @@ -268,7 +328,9 @@ class Blob { protected: shared_ptr data_; shared_ptr diff_; +#ifndef CPU_ONLY shared_ptr shape_data_; +#endif vector shape_; int count_; int capacity_; diff --git a/include/caffe/caffe.hpp b/include/caffe/caffe.hpp index 06882096c55..7b434cbe695 100644 --- a/include/caffe/caffe.hpp +++ b/include/caffe/caffe.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // caffe.hpp is the header file that you need to include in your code. It wraps // all the internal caffe header files into one for simpler inclusion. diff --git a/include/caffe/common.hpp b/include/caffe/common.hpp index 3c6a076ec2f..cc19f358d1b 100644 --- a/include/caffe/common.hpp +++ b/include/caffe/common.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_COMMON_HPP_ #define CAFFE_COMMON_HPP_ @@ -164,6 +201,9 @@ class Caffe { inline static bool root_solver() { return Get().root_solver_; } inline static void set_root_solver(bool val) { Get().root_solver_ = val; } + inline static int iter_size() { return Get().iter_size_; } + inline static void set_iter_size(int val) { Get().iter_size_ = val; } + protected: #ifndef CPU_ONLY cublasHandle_t cublas_handle_; @@ -174,6 +214,7 @@ class Caffe { Brew mode_; int solver_count_; bool root_solver_; + int iter_size_; private: // The private constructor to avoid duplicate instantiation. diff --git a/include/caffe/data_reader.hpp b/include/caffe/data_reader.hpp index 8ed5542cb8d..dff77199f98 100644 --- a/include/caffe/data_reader.hpp +++ b/include/caffe/data_reader.hpp @@ -1,8 +1,46 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_DATA_READER_HPP_ #define CAFFE_DATA_READER_HPP_ #include #include +#include #include #include "caffe/common.hpp" @@ -25,10 +63,10 @@ class DataReader { explicit DataReader(const LayerParameter& param); ~DataReader(); - inline BlockingQueue& free() const { + inline BlockingQueue& free() const { return queue_pair_->free_; } - inline BlockingQueue& full() const { + inline BlockingQueue& full() const { return queue_pair_->full_; } @@ -39,12 +77,45 @@ class DataReader { explicit QueuePair(int size); ~QueuePair(); - BlockingQueue free_; - BlockingQueue full_; + BlockingQueue free_; + BlockingQueue full_; DISABLE_COPY_AND_ASSIGN(QueuePair); }; + class DBWrapper { + public: + explicit DBWrapper(const LayerParameter& param); + virtual string value() = 0; + virtual void Next() = 0; + protected: + shared_ptr db; + shared_ptr cursor; + }; + + class DBShuffle: public DBWrapper { + public: + explicit DBShuffle(const LayerParameter& param); + virtual string value() { + return string(static_cast(current_image_->first), + current_image_->second); + } + virtual void Next(); + protected: + vector > image_pointers_; + vector >::iterator current_image_; + shared_ptr prefetch_rng_; + + void ShuffleImages(); + }; + + class DBSequential: public DBWrapper { + public: + explicit DBSequential(const LayerParameter& param): DBWrapper(param) {} + virtual string value() { return cursor->value(); } + virtual void Next(); + }; + // A single body is created per source class Body : public InternalThread { public: @@ -53,10 +124,12 @@ class DataReader { protected: void InternalThreadEntry(); - void read_one(db::Cursor* cursor, QueuePair* qp); + void read_one(DBWrapper* img, QueuePair* qp); + void ShuffleImages(); const LayerParameter param_; BlockingQueue > new_queue_pairs_; + bool first_read_; friend class DataReader; diff --git a/include/caffe/data_transformer.hpp b/include/caffe/data_transformer.hpp index 97b4ee6a8c4..e93599f831b 100644 --- a/include/caffe/data_transformer.hpp +++ b/include/caffe/data_transformer.hpp @@ -1,14 +1,120 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_DATA_TRANSFORMER_HPP #define CAFFE_DATA_TRANSFORMER_HPP +#include #include #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/rng.hpp" + +using google::protobuf::RepeatedPtrField; namespace caffe { +class DataReader; + +class RandNumbers { + public: + /** + * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). + * + * @param n + * The upperbound (exclusive) value of the random number. + * @return + * A uniformly random integer value from ({0, 1, ..., n-1}). + */ + int operator()(int n) { + CHECK_GT(n, 0); + return GetNextNumber() % n; + } + + virtual uint32_t GetNextNumber() = 0; +}; + +class GenRandNumbers: public RandNumbers { + public: + void Init() { + const unsigned int rng_seed = caffe_rng_rand(); + rng_.reset(new Caffe::RNG(rng_seed)); + } + + void Reset() { rng_.reset(); } + bool IsEmpty() { return (rng_.get() == nullptr); } + + virtual uint32_t GetNextNumber() { + CHECK(rng_); + caffe::rng_t* rng = static_cast(rng_->generator()); + return (*rng)(); + } + private: + shared_ptr rng_; +}; + + +class PreclcRandomNumbers: public RandNumbers { + public: + void FillRandomNumbers(int num_count, RandNumbers& rand_gen) { + for (int i = 0; i < num_count; i++) + random_numbers.push(rand_gen.GetNextNumber()); + } + + virtual uint32_t GetNextNumber() { + CHECK(!random_numbers.empty()); + uint32_t num = random_numbers.front(); + random_numbers.pop(); + return num; + } + private: + std::queue random_numbers; +}; + +namespace { + // We use these type and value to distinguish when + // annotation handler should be used. + struct EmptyType { }; + EmptyType empty_value; +} + /** * @brief Applies common transformations to the input data, such as * scaling, mirroring, substracting the image mean... @@ -24,7 +130,9 @@ class DataTransformer { * transformation. */ void InitRand(); + void ReinitRand(); + void GenerateRandNumbers(PreclcRandomNumbers& rn, bool sample_bboxes = false); /** * @brief Applies the transformation defined in the data layer's * transform_param block to the data. @@ -35,7 +143,15 @@ class DataTransformer { * This is destination blob. It can be part of top blob's data if * set_cpu_data() is used. See data_layer.cpp for an example. */ - void Transform(const Datum& datum, Blob* transformed_blob); + + void Transform(const Datum& datum, Blob* transformed_blob) + {Transform(datum, transformed_blob, rand_num_);} + void Transform(const Datum& datum, Blob* transformed_blob, + RandNumbers& rand_num); + + void Transform(Blob* input_blob, + Blob* transformed_blob, + RandNumbers& rand_num); /** * @brief Applies the transformation defined in the data layer's @@ -50,6 +166,82 @@ class DataTransformer { void Transform(const vector & datum_vector, Blob* transformed_blob); + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to the annotated data. + * + * @param anno_datum + * AnnotatedDatum containing the data and annotation to be transformed. + * @param transformed_blob + * This is destination blob. It can be part of top blob's data if + * set_cpu_data() is used. See annotated_data_layer.cpp for an example. + * @param transformed_anno_vec + * This is destination annotation. + */ + void Transform(const AnnotatedDatum& anno_datum, + Blob* transformed_blob, + RepeatedPtrField* transformed_anno_vec); + void Transform(const AnnotatedDatum& anno_datum, + Blob* transformed_blob, + RepeatedPtrField* transformed_anno_vec, + RandNumbers& rand_num); + void Transform(const AnnotatedDatum& anno_datum, + Blob* transformed_blob, + vector* transformed_anno_vec); + void Transform(const AnnotatedDatum& anno_datum, + Blob* transformed_blob, + vector* transformed_anno_vec, + RandNumbers& rand_num); + + /** + * @brief Transform the annotation according to the transformation applied + * to the datum. + * + * @param anno_datum + * AnnotatedDatum containing the data and annotation to be transformed. + * @param do_resize + * If true, resize the annotation accordingly before crop. + * @param crop_bbox + * The cropped region applied to anno_datum.datum() + * @param do_mirror + * If true, meaning the datum has mirrored. + * @param transformed_anno_group_all + * Stores all transformed AnnotationGroup. + */ + void TransformAnnotation( + const AnnotatedDatum& anno_datum, const bool do_resize, + const NormalizedBBox& crop_bbox, const bool do_mirror, + RepeatedPtrField* transformed_anno_group_all); + + /** + * @brief Crops the datum according to bbox. + */ + void CropImage(const Datum& datum, const NormalizedBBox& bbox, + Datum* crop_datum); + + /** + * @brief Crops the datum and AnnotationGroup according to bbox. + */ + void CropImage(const AnnotatedDatum& anno_datum, const NormalizedBBox& bbox, + AnnotatedDatum* cropped_anno_datum); + + /** + * @brief Expand the datum. + */ + void ExpandImage(const Datum& datum, const float expand_ratio, + NormalizedBBox* expand_bbox, Datum* expanded_datum); + + /** + * @brief Expand the datum and adjust AnnotationGroup. + */ + void ExpandImage(const AnnotatedDatum& anno_datum, + AnnotatedDatum* expanded_anno_datum); + + /** + * @brief Apply distortion to the datum. + */ + void DistortImage(const Datum& datum, Datum* distort_datum); + #ifdef USE_OPENCV /** * @brief Applies the transformation defined in the data layer's @@ -74,7 +266,36 @@ class DataTransformer { * This is destination blob. It can be part of top blob's data if * set_cpu_data() is used. See image_data_layer.cpp for an example. */ - void Transform(const cv::Mat& cv_img, Blob* transformed_blob); + template + void Transform(const cv::Mat& cv_img, + Blob* transformed_blob, + NormalizedBBox* crop_bbox, + RandNumbers& rand_num, + AnnotationHandler anno_handler = empty_value); + void Transform(const cv::Mat& cv_img, + Blob* transformed_blob, + RandNumbers& rand_num); + void Transform(const cv::Mat& cv_img, + Blob* transformed_blob) { + Transform(cv_img, transformed_blob, rand_num_); + } + + + /** + * @brief Crops img according to bbox. + */ + void CropImage(const cv::Mat& img, const NormalizedBBox& bbox, + cv::Mat* crop_img); + + /** + * @brief Expand img to include mean value as background. + */ + void ExpandImage(const cv::Mat& img, const float expand_ratio, + NormalizedBBox* expand_bbox, cv::Mat* expand_img); + + void TransformInv(const Blob* blob, vector* cv_imgs); + void TransformInv(const Dtype* data, cv::Mat* cv_img, const int height, + const int width, const int channels); #endif // USE_OPENCV /** @@ -128,25 +349,60 @@ class DataTransformer { #endif // USE_OPENCV protected: - /** - * @brief Generates a random integer from Uniform({0, 1, ..., n-1}). - * - * @param n - * The upperbound (exclusive) value of the random number. - * @return - * A uniformly random integer value from ({0, 1, ..., n-1}). + GenRandNumbers rand_num_; + + // Transform and return the transformation information. + template + void Transform(const Datum& datum, Dtype* transformed_data, + NormalizedBBox* crop_bbox, RandNumbers& rand_num, + AnnotationHandler annotation_handler = empty_value); + + void Transform(const Datum& datum, Dtype* transformed_data, + RandNumbers& rand_num); + + /** + * @brief Applies the transformation defined in the data layer's + * transform_param block to the data and return transform information. */ - virtual int Rand(int n); + template + void Transform(const Datum& datum, Blob* transformed_blob, + NormalizedBBox* crop_bbox, RandNumbers& rand_num, + AnnotationHandler annotation_handler = empty_value); - void Transform(const Datum& datum, Dtype* transformed_data); // Tranformation parameters TransformationParameter param_; - - shared_ptr rng_; Phase phase_; Blob data_mean_; vector mean_values_; + + // Data reader used if any to get data + DataReader* data_reader_used; + + + private: + void Transform(const Datum& datum, Dtype* transformed_data, + NormalizedBBox* crop_bbox, RandNumbers& rand_num, + const bool do_mirror, const bool has_uint8, + const bool has_mean_file, const bool has_mean_values); + + template + void Transform(const cv::Mat& cv_img, + Blob* transformed_blob, + NormalizedBBox* crop_bbox, + RandNumbers& rand_num); + + template + void Transform(const Datum& datum, Dtype* transformed_data, + NormalizedBBox* crop_bbox, RandNumbers& rand_num); + +#ifdef USE_OPENCV + void RandomResizeImage(const Datum& datum, Datum *resized_datum); + void RandomResizeImage(const cv::Mat& img, cv::Mat *resized_img); + void RandomAlterAspectRatio(const Datum& datum, Datum *resized_datum); + void RandomAlterAspectRatio(const cv::Mat& img, cv::Mat *resized_img); +#endif }; } // namespace caffe diff --git a/include/caffe/engine_parser.hpp b/include/caffe/engine_parser.hpp new file mode 100644 index 00000000000..1fa716e9b9a --- /dev/null +++ b/include/caffe/engine_parser.hpp @@ -0,0 +1,193 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MKLDNN_ENGINES_HPP_ +#define CAFFE_MKLDNN_ENGINES_HPP_ + +#include +#include +#include +#include + +#ifdef MKLDNN_SUPPORTED +#include "caffe/mkldnn_base.hpp" +#endif + +namespace caffe { +static const char* supportedEngines[] = + {"CAFFE", "CUDNN", "MKL2017", "MKLDNN"}; +class EngineParser { + public: + explicit EngineParser(const std::string subEngineString) { + parse(subEngineString.c_str()); + // Check for wrong engine name + validateEngine(); + } + + bool isEngine(const char* name) const { + return (engineName == name); + } + + unsigned getNumberOfSubEngines() const { + return subEngines.size(); + } + +#ifdef MKLDNN_SUPPORTED + engine& getMKLDNNSubEngine(unsigned engineIndex) const { + CHECK(engineIndex < getNumberOfSubEngines()); + const char *engineName = subEngines[engineIndex].c_str(); + + if (!strcmp(engineName, "CPU")) + return CpuEngine::Instance().get_engine(); + +#ifdef FPGA_ENABLED + if (!strcmp(engineName, "FPGA")) + return FPGAEngine::Instance().get_engine(); +#endif + +#ifdef DLA_ENABLED + if (!strcmp(engineName, "DLA")) + return DLAEngine::Instance().get_engine(); +#endif + + LOG(FATAL) << "EngineParser: Unknown subengine: " << engineName; + } +#endif + + private: + std::string engineName; + std::vector subEngines; + + bool parse(const char *subEngineString) { + // Ignore whitespaces + subEngineString = parseWhitespaces(subEngineString); + + // Extract engine identifier. It can be empty at this point + const char *beginOfIdentifier = subEngineString; + subEngineString = parseIdentifier(subEngineString); + engineName.assign(beginOfIdentifier, subEngineString - beginOfIdentifier); + + // Ignore whitespaces + subEngineString = parseWhitespaces(subEngineString); + + // String termination is allowed at this place + if (!*subEngineString) + return true; + + // Otherwise colon must be specified and engine identifier cannot be empty + if (!engineName.length() || (*subEngineString != ':') + || (*(subEngineString+1) == '\0')) + LOG(FATAL) << "Wrong engine specification"; + + // Process sub engines + subEngineString++; + while (true) { + // Ignore separators + subEngineString = parseSeparators(subEngineString); + + // String termination is allowed at this place + if (!*subEngineString) + return true; + + // Extract sub engine identifier + const char *beginOfIdentifier = subEngineString; + subEngineString = parseIdentifier(subEngineString); + + // Identifier can not be empty nor contain invalid characters + if (beginOfIdentifier == subEngineString) + return false; + + // Collect all valid sub engine names + std::string subEngineName; + subEngineName.assign(beginOfIdentifier, + subEngineString - beginOfIdentifier); + subEngines.push_back(subEngineName); + } + } + + void validateEngine() { +#ifndef USE_CUDNN + if (engineName == "CUDNN") + LOG(FATAL) << "Support for CUDNN is not enabled"; +#endif +#ifndef MKL2017_SUPPORTED + if (engineName == "MKL2017") + LOG(FATAL) << "Support for MKL2017 is not enabled"; +#endif +#ifndef MKLDNN_SUPPORTED + if (engineName == "MKLDNN") + LOG(FATAL) << "Support for MKLDNN is not enabled"; +#endif + for (unsigned i = 0; + i < sizeof(supportedEngines)/sizeof(supportedEngines[0]); i++ ) + if (supportedEngines[i] == engineName) { + if (subEngines.size() > 0 && engineName != "MKLDNN") + LOG(FATAL) << "Engine " << engineName + << " does not support subengines"; + return; + } + LOG(FATAL) << "Unknown engine: " << engineName; + } + + const char *parseWhitespaces(const char *subEngineString) const { + while (isspace(*subEngineString)) + subEngineString++; + + return subEngineString; + } + + const char *parseSeparators(const char *subEngineString) const { + while (isspace(*subEngineString) || (*subEngineString == ',') + || (*subEngineString == ';')) + subEngineString++; + + return subEngineString; + } + + const char *parseIdentifier(const char *subEngineString) const { + if (!isalpha(*subEngineString) && (*subEngineString != '_')) + return subEngineString; + + do { + subEngineString++; + } while (isalnum(*subEngineString) || (*subEngineString == '_')); + + return subEngineString; + } +}; +} // namespace caffe +#endif diff --git a/include/caffe/filler.hpp b/include/caffe/filler.hpp index dad9ad46b3b..76d5afeb396 100644 --- a/include/caffe/filler.hpp +++ b/include/caffe/filler.hpp @@ -1,3 +1,41 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to +https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // Fillers are random number generators that fills a blob using the specified // algorithm. The expectation is that they are only going to be used during // initialization time and will not involve any GPUs. @@ -8,6 +46,7 @@ #include #include "caffe/blob.hpp" +#include "caffe/gabor.hpp" #include "caffe/proto/caffe.pb.h" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" @@ -15,25 +54,23 @@ namespace caffe { /// @brief Fills a Blob with constant or randomly-generated data. -template -class Filler { +template class Filler { public: - explicit Filler(const FillerParameter& param) : filler_param_(param) {} + explicit Filler(const FillerParameter ¶m) : filler_param_(param) {} virtual ~Filler() {} - virtual void Fill(Blob* blob) = 0; + virtual void Fill(Blob *blob) = 0; + protected: FillerParameter filler_param_; }; // class Filler - /// @brief Fills a Blob with constant values @f$ x = 0 @f$. -template -class ConstantFiller : public Filler { +template class ConstantFiller : public Filler { public: - explicit ConstantFiller(const FillerParameter& param) + explicit ConstantFiller(const FillerParameter ¶m) : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); + virtual void Fill(Blob *blob) { + Dtype *data = blob->mutable_cpu_data(); const int count = blob->count(); const Dtype value = this->filler_param_.value(); CHECK(count); @@ -41,36 +78,35 @@ class ConstantFiller : public Filler { data[i] = value; } CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; /// @brief Fills a Blob with uniformly distributed values @f$ x\sim U(a, b) @f$. -template -class UniformFiller : public Filler { +template class UniformFiller : public Filler { public: - explicit UniformFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { + explicit UniformFiller(const FillerParameter ¶m) : Filler(param) {} + virtual void Fill(Blob *blob) { CHECK(blob->count()); caffe_rng_uniform(blob->count(), Dtype(this->filler_param_.min()), - Dtype(this->filler_param_.max()), blob->mutable_cpu_data()); + Dtype(this->filler_param_.max()), + blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; /// @brief Fills a Blob with Gaussian-distributed values @f$ x = a @f$. -template -class GaussianFiller : public Filler { +template class GaussianFiller : public Filler { public: - explicit GaussianFiller(const FillerParameter& param) + explicit GaussianFiller(const FillerParameter ¶m) : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); + virtual void Fill(Blob *blob) { + Dtype *data = blob->mutable_cpu_data(); CHECK(blob->count()); caffe_rng_gaussian(blob->count(), Dtype(this->filler_param_.mean()), - Dtype(this->filler_param_.std()), blob->mutable_cpu_data()); + Dtype(this->filler_param_.std()), + blob->mutable_cpu_data()); int sparse = this->filler_param_.sparse(); CHECK_GE(sparse, -1); if (sparse >= 0) { @@ -82,7 +118,7 @@ class GaussianFiller : public Filler { const int num_outputs = blob->shape(0); Dtype non_zero_probability = Dtype(sparse) / Dtype(num_outputs); rand_vec_.reset(new SyncedMemory(blob->count() * sizeof(int))); - int* mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); + int *mask = reinterpret_cast(rand_vec_->mutable_cpu_data()); caffe_rng_bernoulli(blob->count(), non_zero_probability, mask); for (int i = 0; i < blob->count(); ++i) { data[i] *= mask[i]; @@ -97,13 +133,12 @@ class GaussianFiller : public Filler { /** @brief Fills a Blob with values @f$ x \in [0, 1] @f$ * such that @f$ \forall i \sum_j x_{ij} = 1 @f$. */ -template -class PositiveUnitballFiller : public Filler { +template class PositiveUnitballFiller : public Filler { public: - explicit PositiveUnitballFiller(const FillerParameter& param) + explicit PositiveUnitballFiller(const FillerParameter ¶m) : Filler(param) {} - virtual void Fill(Blob* blob) { - Dtype* data = blob->mutable_cpu_data(); + virtual void Fill(Blob *blob) { + Dtype *data = blob->mutable_cpu_data(); DCHECK(blob->count()); caffe_rng_uniform(blob->count(), 0, 1, blob->mutable_cpu_data()); // We expect the filler to not be called very frequently, so we will @@ -120,7 +155,7 @@ class PositiveUnitballFiller : public Filler { } } CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; @@ -140,12 +175,10 @@ class PositiveUnitballFiller : public Filler { * * TODO(dox): make notation in above comment consistent with rest & use LaTeX. */ -template -class XavierFiller : public Filler { +template class XavierFiller : public Filler { public: - explicit XavierFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { + explicit XavierFiller(const FillerParameter ¶m) : Filler(param) {} + virtual void Fill(Blob *blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); int fan_out = blob->count() / blob->channels(); @@ -154,14 +187,14 @@ class XavierFiller : public Filler { FillerParameter_VarianceNorm_AVERAGE) { n = (fan_in + fan_out) / Dtype(2); } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { + FillerParameter_VarianceNorm_FAN_OUT) { n = fan_out; } Dtype scale = sqrt(Dtype(3) / n); caffe_rng_uniform(blob->count(), -scale, scale, - blob->mutable_cpu_data()); + blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; @@ -182,12 +215,10 @@ class XavierFiller : public Filler { * a, b, c) where a * b * c = fan_in and num * b * c = fan_out. Note that this * is currently not the case for inner product layers. */ -template -class MSRAFiller : public Filler { +template class MSRAFiller : public Filler { public: - explicit MSRAFiller(const FillerParameter& param) - : Filler(param) {} - virtual void Fill(Blob* blob) { + explicit MSRAFiller(const FillerParameter ¶m) : Filler(param) {} + virtual void Fill(Blob *blob) { CHECK(blob->count()); int fan_in = blob->count() / blob->num(); int fan_out = blob->count() / blob->channels(); @@ -196,14 +227,14 @@ class MSRAFiller : public Filler { FillerParameter_VarianceNorm_AVERAGE) { n = (fan_in + fan_out) / Dtype(2); } else if (this->filler_param_.variance_norm() == - FillerParameter_VarianceNorm_FAN_OUT) { + FillerParameter_VarianceNorm_FAN_OUT) { n = fan_out; } Dtype std = sqrt(Dtype(2) / n); caffe_rng_gaussian(blob->count(), Dtype(0), std, - blob->mutable_cpu_data()); + blob->mutable_cpu_data()); CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; @@ -240,15 +271,14 @@ operation is equivalent to the following call in Python with Scikit.Image. out = skimage.transform.rescale(img, factor, mode='constant', cval=0) \endcode */ -template -class BilinearFiller : public Filler { +template class BilinearFiller : public Filler { public: - explicit BilinearFiller(const FillerParameter& param) + explicit BilinearFiller(const FillerParameter ¶m) : Filler(param) {} - virtual void Fill(Blob* blob) { + virtual void Fill(Blob *blob) { CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim."; CHECK_EQ(blob->width(), blob->height()) << "Filter must be square"; - Dtype* data = blob->mutable_cpu_data(); + Dtype *data = blob->mutable_cpu_data(); int f = ceil(blob->width() / 2.); float c = (2 * f - 1 - f % 2) / (2. * f); for (int i = 0; i < blob->count(); ++i) { @@ -257,10 +287,78 @@ class BilinearFiller : public Filler { data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c)); } CHECK_EQ(this->filler_param_.sparse(), -1) - << "Sparsity not supported by this Filler."; + << "Sparsity not supported by this Filler."; } }; +/*! +@brief Fills a Blob with Gabor filters. + +A common use case is with the first convolutional layer for edge detection. +\code +layer { + name: "conv1/7x7_s2" + type: "Convolution" + bottom: "data" + top: "conv1/7x7_s2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "gabor" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +\endcode + */ +template class GaborFiller : public Filler { + public: + explicit GaborFiller(const FillerParameter ¶m) : Filler(param) {} + + virtual void Fill(Blob *blob) { + CHECK_LE(blob->num_axes(), 4) + << "Blob must be 4 dim or less to use Gabor filler."; + CHECK_EQ(blob->width(), blob->height()) + << "Filter must be square in first two dimensions to use Gabor filler."; + CHECK_EQ(blob->channels(), 3) + << "Blob must have 3 channels to use Gabor filler"; + Dtype *data = blob->mutable_cpu_data(); + KernelGenerator kernelGenerator(blob->num(), blob->width()); + kernelGenerator.generate(); + caffe_copy(kernelGenerator.getSizeOfKernelData(), + kernelGenerator.getKernelData(), data); + + /* + static bool error = + logFilter(kernelGenerator.getSizeOfKernelData(), data); + (void) error; + */ + CHECK_EQ(this->filler_param_.sparse(), -1) + << "Sparsity not supported by this Filler."; + } + /* + bool logFilter(int numberOfElements, void* kernelData) { + FILE *outputBinaryFile = fopen("gabor_filters_dump.txt", "w+b"); + fwrite(kernelData, 1, numberOfElements, outputBinaryFile); + fclose(outputBinaryFile); + return false; + } + */ +}; /** * @brief Get a specific filler from the specification given in FillerParameter. * @@ -268,8 +366,8 @@ class BilinearFiller : public Filler { * this way for now. */ template -Filler* GetFiller(const FillerParameter& param) { - const std::string& type = param.type(); +Filler *GetFiller(const FillerParameter ¶m) { + const std::string &type = param.type(); if (type == "constant") { return new ConstantFiller(param); } else if (type == "gaussian") { @@ -284,10 +382,12 @@ Filler* GetFiller(const FillerParameter& param) { return new MSRAFiller(param); } else if (type == "bilinear") { return new BilinearFiller(param); + } else if (type == "gabor") { + return new GaborFiller(param); } else { CHECK(false) << "Unknown filler name: " << param.type(); } - return (Filler*)(NULL); + return (Filler *)(NULL); } } // namespace caffe diff --git a/include/caffe/gabor.hpp b/include/caffe/gabor.hpp new file mode 100644 index 00000000000..a7a25f388e0 --- /dev/null +++ b/include/caffe/gabor.hpp @@ -0,0 +1,275 @@ +namespace caffe { + +class KernelParameters { + public: + double r, g, b; + double lambda, sigma; + double omega, phi, theta; + + KernelParameters() + : r(1), g(1), b(1), lambda(1), sigma(0.6), omega(M_PI), phi(0), theta(0) { + } + + void generate(int kernelId, int numberOfKernels, int kernelSize) { + if ((numberOfKernels == 32) && (kernelSize == 5)) + generateCifarLike(kernelId, kernelSize); + else if ((numberOfKernels == 64) && (kernelSize == 7)) + generateGoogleNetLike(kernelId, kernelSize); + else if ((numberOfKernels == 64) && (kernelSize == 3)) + generateVggLike(kernelId, kernelSize); + else if ((numberOfKernels == 96) && (kernelSize == 11)) + generateAlexNetLike(kernelId, kernelSize); + else + LOG(FATAL) << "No predefined gabor filters for this topology."; + } + + void generateCifarLike(int kernelId, int kernelSize) { + lambda = 0.5; + + if (kernelId < 8) { + omega = M_PI * (kernelSize - 1) / 2 / 1; + theta = (kernelId % 8) * M_PI / 8; + } else if (kernelId < 14) { + omega = M_PI * (kernelSize - 1) / 2 / 2; + theta = ((kernelId - 2) % 6) * M_PI / 6 + M_PI / 12; + } else if (kernelId < 16) { + lambda = 0.5; + sigma = 0.75; + omega = M_PI * (kernelSize - 1) / 2 / 8; + phi = (kernelId % 2) * M_PI; + r = 1; + g = -1; + b = 1; + } else { + omega = M_PI * (kernelSize - 1) / 2 / 4; + theta = (kernelId % 4) * M_PI / 2 + M_PI / 4 + M_PI / 8; + phi = M_PI / 2; + } + + if (kernelId >= 30) { + theta = (kernelId % 2) * M_PI / 2 + M_PI / 4 + M_PI / 8; + r = 1; + g = 1; + b = 0; + } else if (kernelId >= 28) { + theta = (kernelId % 2) * M_PI / 2 + M_PI / 4 - M_PI / 8; + r = -1; + g = 1; + b = -1; + } else if (kernelId >= 24) { + r = -1; + g = 1; + b = 1; + } else if (kernelId >= 20) { + r = 1; + g = 0; + b = -1; + } + } + + void generateGoogleNetLike(int kernelId, int kernelSize) { + if (kernelId < 32) { + int rotation = kernelId / 8; + int frequency = kernelId % 8; + int phase = kernelId % 2; + lambda = 1 / (1 + frequency / 8.); + sigma = 0.4 + 0.2 * frequency / 8; + omega = M_PI * (kernelSize - 1) / 2 / (1 + frequency / 2.); + phi = phase * M_PI + M_PI * 12 / 32; + theta = rotation * M_PI / 4; + } else if (kernelId < 40) { + sigma = 0.45; + lambda = 0.5; + omega = M_PI * (kernelSize - 1) / 2; + theta = (kernelId % 8) * M_PI / 8; + phi = M_PI; + } else if (kernelId < 46) { + int phase = (kernelId - 1) / 3; + int size = (kernelId - 1) % 3; + lambda = 1 / (1 + size / 2.); + sigma = 1. / (2.5 - size / 2.); + omega = M_PI / 4; + phi = phase * M_PI; + r = 0.25; + g = -1; + b = 1; + } else if (kernelId < 48) { + lambda = 2. / 3; + sigma = 1; + omega = M_PI * (kernelSize - 1) / 2 / 12; + theta = (kernelId % 8) * M_PI + M_PI / 8; + phi = M_PI / 2; + r = 1; + g = 0.1; + b = -0.5; + } else if (kernelId < 56) { + lambda = 2. / 3; + sigma = 1; + omega = M_PI * (kernelSize - 1) / 2 / 12; + theta = (kernelId % 8) * M_PI / 4 + M_PI / 32; + phi = M_PI / 2; + r = -0.5; + g = 0.1; + b = 1; + } else if (kernelId < 60) { + lambda = 1; + sigma = 1; + omega = M_PI * (kernelSize - 1) / 2 / 12; + theta = (kernelId % 8) * M_PI / 2 + M_PI / 8; + phi = M_PI / 2; + r = 0.25; + g = -1; + b = 1; + } else { + lambda = 2. / 3; + sigma = 1; + omega = M_PI * (kernelSize - 1) / 2 / 12; + theta = (kernelId % 8) * M_PI / 2 + M_PI / 8; + phi = M_PI / 2; + r = -1; + g = -1; + b = 1; + } + } + + void generateVggLike(int kernelId, int kernelSize) { + generateGoogleNetLike(kernelId, kernelSize); + sigma = 1; + } + + void generateAlexNetLike(int kernelId, int kernelSize) { + lambda = 1. / 3; + + if (kernelId < 48) { + int rotation = kernelId / 8; + int frequency = kernelId % 8; + int phase = kernelId % 2; + lambda /= (1 + frequency / 8.); + sigma = 0.5 + 0.2 * frequency / 8; + omega = M_PI * (kernelSize - 1) / 2 / (1 + frequency / 2.); + phi = phase * M_PI + M_PI * 12 / 32; + theta = rotation * M_PI / 6; + } else if (kernelId < 56) { + int phase = kernelId / 4; + int size = kernelId % 4; + lambda /= (1 + size / 2.); + sigma = 1. / (2.5 - size / 2.); + omega = M_PI / 4; + phi = phase * M_PI; + r = 0.25; + g = -1; + b = 1; + } else if (kernelId < 60) { + lambda /= 1.5; + sigma = 0.75; + omega = M_PI * (kernelSize - 1) / 2 / 8; + theta = (kernelId % 4) * M_PI / 2 + M_PI / 8; + phi = M_PI / 2; + r = -1; + g = 1; + b = -0.5; + } else if (kernelId < 64) { + lambda /= 3; + sigma = 2; + omega = M_PI * (kernelSize - 1) / 2 / 8; + theta = (kernelId % 4) * M_PI / 2 + M_PI / 8; + phi = M_PI / 2; + r = 1; + g = -0.5; + b = -0.75; + } else if (kernelId < 72) { + lambda /= 1.5; + sigma = 0.75; + omega = M_PI * (kernelSize - 1) / 2 / 4; + theta = (kernelId % 8) * M_PI / 4 + M_PI / 32; + phi = M_PI / 2; + r = 1; + g = 0.1; + b = -0.75; + } else if (kernelId < 80) { + lambda /= 1.5; + sigma = 1; + omega = M_PI * (kernelSize - 1) / 2 / 12; + theta = (kernelId % 8) * M_PI / 4 + M_PI / 32; + phi = M_PI / 2; + r = -0.5; + g = 0.1; + b = 1; + } else if (kernelId < 88) { + lambda /= 2.5; + sigma = 1; + omega = M_PI * (kernelSize - 1) / 2 / 8; + theta = (kernelId % 8) * M_PI / 4 + M_PI / 32; + phi = M_PI / 2; + r = -1; + g = -1; + b = 1; + } else if (kernelId < 92) { + omega = M_PI * (kernelSize - 1) / 2 / 16; + theta = (kernelId % 4) * M_PI / 2 + M_PI / 16; + phi = M_PI / 2; + } else { + lambda /= 4; + sigma = 0.75; + omega = M_PI * (kernelSize - 1) / 2 / 4; + theta = (kernelId % 8) * M_PI / 4 + M_PI / 32; + phi = M_PI / 2; + r = -1; + g = -1; + b = 1; + } + } +}; + +template class KernelGenerator { + public: + KernelGenerator(int numberOfKernels, int kernelSize) + : numberOfKernels(numberOfKernels), kernelSize(kernelSize), + kernels(new Dtype[getNumberOfElements()]) {} + + ~KernelGenerator() { delete[] kernels; } + + void generate() { + for (int kernelId = 0; kernelId < numberOfKernels; kernelId++) + generateKernel(kernelId); + } + + const Dtype *getKernelData() const { return kernels; } + + int getSizeOfKernelData() const { + return getNumberOfElements(); + } + + private: + int numberOfKernels; + int kernelSize; + Dtype *kernels; + + int getNumberOfElements() const { + return numberOfKernels * 3 * kernelSize * kernelSize; + } + + void generateKernel(int kernelId) { + KernelParameters param; + param.generate(kernelId, numberOfKernels, kernelSize); + + for (int ky = 0; ky < kernelSize; ky++) + for (int kx = 0; kx < kernelSize; kx++) { + double x = 2. * kx / (kernelSize - 1) - 1; + double y = 2. * ky / (kernelSize - 1) - 1; + + double dis = exp(-(x * x + y * y) / (2 * param.sigma * param.sigma)); + double arg = x * cos(param.theta) - y * sin(param.theta); + double per = cos(arg * param.omega + param.phi); + double val = param.lambda * dis * per; + + kernels[kx + kernelSize * (ky + kernelSize * (0 + 3 * kernelId))] = + (Dtype)(param.r * val); + kernels[kx + kernelSize * (ky + kernelSize * (1 + 3 * kernelId))] = + (Dtype)(param.g * val); + kernels[kx + kernelSize * (ky + kernelSize * (2 + 3 * kernelId))] = + (Dtype)(param.b * val); + } + } +}; +}; // namespace caffe diff --git a/include/caffe/internal_thread.hpp b/include/caffe/internal_thread.hpp index 6a8c5a02892..7f115efa7b4 100644 --- a/include/caffe/internal_thread.hpp +++ b/include/caffe/internal_thread.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_INTERNAL_THREAD_HPP_ #define CAFFE_INTERNAL_THREAD_HPP_ diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp index 10f353f94f9..45d65c7994d 100644 --- a/include/caffe/layer.hpp +++ b/include/caffe/layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_LAYER_H_ #define CAFFE_LAYER_H_ @@ -11,6 +48,52 @@ #include "caffe/proto/caffe.pb.h" #include "caffe/util/math_functions.hpp" +#include "caffe/multinode/mlsl.hpp" + +#define MAX_ELEMS_TO_LOG 16 +#define LOG_LAYER(layer) DLOG(INFO) << layer->type() << ": " +#define LOG_BLOB(layer, blob, part, blob_id, description) \ + do \ + { \ + int elems_to_log = std::min(MAX_ELEMS_TO_LOG, blob->count()); \ + for (int idx = 0; idx < elems_to_log; idx++) \ + { \ + LOG_LAYER(layer) << description \ + << ", blob_id " << blob_id \ + << ", idx " << idx \ + << ", value " << blob->cpu_##part()[idx]; \ + } \ + } while (0) + +#define LOG_PARAM_BLOB(blob, part, blob_id, description) \ + do \ + { \ + int elems_to_log = std::min(MAX_ELEMS_TO_LOG, blob->count()); \ + for (int idx = 0; idx < elems_to_log; idx++) \ + { \ + DLOG(INFO) << description \ + << ", blob_id " << blob_id \ + << ", idx " << idx \ + << ", value " << blob->cpu_##part()[idx]; \ + } \ + } while (0) + +#define LOG_BUFFER(layer, buffer, buffer_id, description) \ + do \ + { \ + if (!buffer) { \ + /*LOG(WARNING) << "skip NULL buffer";*/ \ + break; \ + } \ + for (int idx = 0; idx < MAX_ELEMS_TO_LOG; idx++) \ + { \ + LOG_LAYER(layer) << description \ + << ", buffer_id " << buffer_id \ + << ", idx " << idx \ + << ", value " << buffer[idx]; \ + } \ + } while (0) + /** Forward declare boost::thread instead of including boost/thread.hpp to avoid a boost/NVCC issues (#1009, #1010) on OSX. @@ -31,6 +114,23 @@ namespace caffe { */ template class Layer { + +#ifdef USE_MLSL + +public: + MLSL::Operation *layerOp{ nullptr }; + mn::Distribution &GetDistribution(); + virtual bool ParamNeedReduce(int param_id) { return true; } + +protected: + virtual bool Bypass(const vector*>& bottom, + const vector*>& top); + + virtual void MultinodeSetUp(const vector*>& bottom, + const vector*>& top); + +#endif /* USE_MLSL */ + public: /** * You should not implement your own constructor. Any set up code should go @@ -49,6 +149,7 @@ class Layer { } } } + virtual ~Layer() {} /** @@ -71,6 +172,9 @@ class Layer { LayerSetUp(bottom, top); Reshape(bottom, top); SetLossWeights(top); +#ifdef USE_MLSL + MultinodeSetUp(bottom, top); +#endif } /** @@ -454,6 +558,12 @@ inline Dtype Layer::Forward(const vector*>& bottom, Lock(); Dtype loss = 0; Reshape(bottom, top); +#ifdef USE_MLSL + if (Bypass(bottom, top)) { + Unlock(); + return loss; + } +#endif switch (Caffe::mode()) { case Caffe::CPU: Forward_cpu(bottom, top); @@ -490,6 +600,9 @@ template inline void Layer::Backward(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { +#ifdef USE_MLSL + if (Bypass(bottom, top)) return; +#endif switch (Caffe::mode()) { case Caffe::CPU: Backward_cpu(top, propagate_down, bottom); diff --git a/include/caffe/layer_factory.hpp b/include/caffe/layer_factory.hpp index f385afccfee..d189bd8bbc0 100644 --- a/include/caffe/layer_factory.hpp +++ b/include/caffe/layer_factory.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + /** * @brief A layer factory that allows one to register layers. * During runtime, registered layers could be called by passing a LayerParameter diff --git a/include/caffe/layers/absval_layer.hpp b/include/caffe/layers/absval_layer.hpp index 9b5305dceb4..e625d142782 100644 --- a/include/caffe/layers/absval_layer.hpp +++ b/include/caffe/layers/absval_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_ABSVAL_LAYER_HPP_ #define CAFFE_ABSVAL_LAYER_HPP_ diff --git a/include/caffe/layers/accuracy_layer.hpp b/include/caffe/layers/accuracy_layer.hpp index fe2adb939e4..c61255bd349 100644 --- a/include/caffe/layers/accuracy_layer.hpp +++ b/include/caffe/layers/accuracy_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_ACCURACY_LAYER_HPP_ #define CAFFE_ACCURACY_LAYER_HPP_ @@ -39,7 +76,7 @@ class AccuracyLayer : public Layer { // If there are two top blobs, then the second blob will contain // accuracies per class. virtual inline int MinTopBlobs() const { return 1; } - virtual inline int MaxTopBlos() const { return 2; } + virtual inline int MaxTopBlobs() const { return 2; } protected: /** diff --git a/include/caffe/layers/annotated_data_layer.hpp b/include/caffe/layers/annotated_data_layer.hpp new file mode 100644 index 00000000000..5d7c2fa50c0 --- /dev/null +++ b/include/caffe/layers/annotated_data_layer.hpp @@ -0,0 +1,80 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_DATA_LAYER_HPP_ +#define CAFFE_DATA_LAYER_HPP_ + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/data_reader.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/base_data_layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" + +namespace caffe { + +template +class AnnotatedDataLayer : public BasePrefetchingDataLayer { + public: + explicit AnnotatedDataLayer(const LayerParameter& param); + virtual ~AnnotatedDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + // AnnotatedDataLayer uses DataReader instead for sharing for parallelism + virtual inline bool ShareInParallel() const { return false; } + virtual inline const char* type() const { return "AnnotatedData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void load_batch(Batch* batch); + + DataReader reader_; + bool has_anno_type_; + AnnotatedDatum_AnnotationType anno_type_; + vector batch_samplers_; + string label_map_file_; +}; + +} // namespace caffe + +#endif // CAFFE_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/argmax_layer.hpp b/include/caffe/layers/argmax_layer.hpp index 4fef363e850..a18c100b995 100644 --- a/include/caffe/layers/argmax_layer.hpp +++ b/include/caffe/layers/argmax_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_ARGMAX_LAYER_HPP_ #define CAFFE_ARGMAX_LAYER_HPP_ diff --git a/include/caffe/layers/base_conv_layer.hpp b/include/caffe/layers/base_conv_layer.hpp old mode 100644 new mode 100755 index 0160a833dd2..00a819920a5 --- a/include/caffe/layers/base_conv_layer.hpp +++ b/include/caffe/layers/base_conv_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_BASE_CONVOLUTION_LAYER_HPP_ #define CAFFE_BASE_CONVOLUTION_LAYER_HPP_ @@ -29,6 +66,13 @@ class BaseConvolutionLayer : public Layer { virtual inline bool EqualNumBottomTopBlobs() const { return true; } protected: + // Split Reshape into two parts + // Part 1 for normal blob reshape + // Part 2 for openmp optimization for CAFFE engine (only) + void DoReshape(const vector*>& bottom, + const vector*>& top); + void ReshapeForMKL(const vector*>& bottom, + const vector*>& top); // Helper functions that abstract away the column buffer and gemm arguments. // The last argument in forward_cpu_gemm is so that we can skip the im2col if // we just called weight_cpu_gemm with the same input. @@ -41,6 +85,9 @@ class BaseConvolutionLayer : public Layer { weights); void backward_cpu_bias(Dtype* bias, const Dtype* input); + void clear_weight_mt(void); + void sum_weight_mt(Dtype* weight_diff); + #ifndef CPU_ONLY void forward_gpu_gemm(const Dtype* col_input, const Dtype* weights, Dtype* output, bool skip_im2col = false); @@ -93,6 +140,9 @@ class BaseConvolutionLayer : public Layer { bool is_1x1_; bool force_nd_im2col_; + int num_of_threads_; // Number of threads to be used for + // batch based parallelization eg. + // min(batch,omp_get_num_threads()) private: // wrap im2col/col2im so we don't have to remember the (long) argument lists inline void conv_im2col_cpu(const Dtype* data, Dtype* col_buff) { @@ -103,6 +153,13 @@ class BaseConvolutionLayer : public Layer { pad_.cpu_data()[0], pad_.cpu_data()[1], stride_.cpu_data()[0], stride_.cpu_data()[1], dilation_.cpu_data()[0], dilation_.cpu_data()[1], col_buff); + } else if (!force_nd_im2col_ && num_spatial_axes_ == 3) { + im3d2col_cpu(data, conv_in_channels_, + conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], conv_input_shape_.cpu_data()[3], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], kernel_shape_.cpu_data()[2], + pad_.cpu_data()[0], pad_.cpu_data()[1], pad_.cpu_data()[2], + stride_.cpu_data()[0], stride_.cpu_data()[1], stride_.cpu_data()[2], + dilation_.cpu_data()[0], dilation_.cpu_data()[1], dilation_.cpu_data()[2], col_buff); } else { im2col_nd_cpu(data, num_spatial_axes_, conv_input_shape_.cpu_data(), col_buffer_shape_.data(), kernel_shape_.cpu_data(), @@ -117,6 +174,13 @@ class BaseConvolutionLayer : public Layer { pad_.cpu_data()[0], pad_.cpu_data()[1], stride_.cpu_data()[0], stride_.cpu_data()[1], dilation_.cpu_data()[0], dilation_.cpu_data()[1], data); + } else if (!force_nd_im2col_ && num_spatial_axes_ == 3) { + col2im3d_cpu(col_buff, conv_in_channels_, + conv_input_shape_.cpu_data()[1], conv_input_shape_.cpu_data()[2], conv_input_shape_.cpu_data()[3], + kernel_shape_.cpu_data()[0], kernel_shape_.cpu_data()[1], kernel_shape_.cpu_data()[2], + pad_.cpu_data()[0], pad_.cpu_data()[1], pad_.cpu_data()[2], + stride_.cpu_data()[0], stride_.cpu_data()[1], stride_.cpu_data()[2], + dilation_.cpu_data()[0], dilation_.cpu_data()[1], dilation_.cpu_data()[2], data); } else { col2im_nd_cpu(col_buff, num_spatial_axes_, conv_input_shape_.cpu_data(), col_buffer_shape_.data(), kernel_shape_.cpu_data(), @@ -162,11 +226,16 @@ class BaseConvolutionLayer : public Layer { int conv_in_channels_; int conv_out_spatial_dim_; int kernel_dim_; - int col_offset_; - int output_offset_; + size_t col_offset_; + size_t output_offset_; Blob col_buffer_; Blob bias_multiplier_; + + size_t col_buffer_mt_size; // openmp + size_t weight_diff_mt_size; // openmp + std::vector col_buffer_mt_; // openmp + std::vector weight_diff_mt_; // openmp }; } // namespace caffe diff --git a/include/caffe/layers/base_data_layer.hpp b/include/caffe/layers/base_data_layer.hpp index 2c49b73184b..0a428232ae2 100644 --- a/include/caffe/layers/base_data_layer.hpp +++ b/include/caffe/layers/base_data_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_DATA_LAYERS_HPP_ #define CAFFE_DATA_LAYERS_HPP_ @@ -38,7 +75,7 @@ class BaseDataLayer : public Layer { const vector& propagate_down, const vector*>& bottom) {} virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) {} - + protected: TransformationParameter transform_param_; shared_ptr > data_transformer_; @@ -74,6 +111,8 @@ class BasePrefetchingDataLayer : virtual void InternalThreadEntry(); virtual void load_batch(Batch* batch) = 0; + virtual void GetBatch(); + Batch prefetch_[PREFETCH_COUNT]; BlockingQueue*> prefetch_free_; BlockingQueue*> prefetch_full_; diff --git a/include/caffe/layers/batch_norm_layer.hpp b/include/caffe/layers/batch_norm_layer.hpp index 9b2d5126efb..e83bab95343 100644 --- a/include/caffe/layers/batch_norm_layer.hpp +++ b/include/caffe/layers/batch_norm_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_BATCHNORM_LAYER_HPP_ #define CAFFE_BATCHNORM_LAYER_HPP_ @@ -52,6 +89,9 @@ class BatchNormLayer : public Layer { virtual inline const char* type() const { return "BatchNorm"; } virtual inline int ExactNumBottomBlobs() const { return 1; } virtual inline int ExactNumTopBlobs() const { return 1; } +#ifdef USE_MLSL + virtual bool ParamNeedReduce(int param_id) { return false; } +#endif protected: virtual void Forward_cpu(const vector*>& bottom, @@ -63,6 +103,20 @@ class BatchNormLayer : public Layer { virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + void replicate(Dtype* buffer_to_write, + int num_batches, + unsigned int batch_offset_incr, + unsigned int channel_offset_incr, + const Dtype* data_to_be_replicated); + + template + void replicate_to_op(Dtype* buffer_to_write, + int num_batches, + unsigned int batch_offset_incr, + unsigned int channel_offset_incr, + const Dtype* data_to_be_replicated, + FuncTy op_func); + Blob mean_, variance_, temp_, x_norm_; bool use_global_stats_; Dtype moving_average_fraction_; diff --git a/include/caffe/layers/batch_reindex_layer.hpp b/include/caffe/layers/batch_reindex_layer.hpp index ebb3a567bc4..2238a955c3b 100644 --- a/include/caffe/layers/batch_reindex_layer.hpp +++ b/include/caffe/layers/batch_reindex_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_BATCHREINDEX_LAYER_HPP_ #define CAFFE_BATCHREINDEX_LAYER_HPP_ diff --git a/include/caffe/layers/bias_layer.hpp b/include/caffe/layers/bias_layer.hpp index eedc3aaa351..244886e2fd2 100644 --- a/include/caffe/layers/bias_layer.hpp +++ b/include/caffe/layers/bias_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_BIAS_LAYER_HPP_ #define CAFFE_BIAS_LAYER_HPP_ diff --git a/include/caffe/layers/bnll_layer.hpp b/include/caffe/layers/bnll_layer.hpp index be07c748364..5c1bf6599ac 100644 --- a/include/caffe/layers/bnll_layer.hpp +++ b/include/caffe/layers/bnll_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_BNLL_LAYER_HPP_ #define CAFFE_BNLL_LAYER_HPP_ diff --git a/include/caffe/layers/concat_layer.hpp b/include/caffe/layers/concat_layer.hpp index a1570249197..1acc4259713 100644 --- a/include/caffe/layers/concat_layer.hpp +++ b/include/caffe/layers/concat_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CONCAT_LAYER_HPP_ #define CAFFE_CONCAT_LAYER_HPP_ diff --git a/include/caffe/layers/contrastive_loss_layer.hpp b/include/caffe/layers/contrastive_loss_layer.hpp index e890afb8207..c745810af55 100644 --- a/include/caffe/layers/contrastive_loss_layer.hpp +++ b/include/caffe/layers/contrastive_loss_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CONTRASTIVE_LOSS_LAYER_HPP_ #define CAFFE_CONTRASTIVE_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/conv_layer.hpp b/include/caffe/layers/conv_layer.hpp index 93a618ddd72..11b460a8e8b 100644 --- a/include/caffe/layers/conv_layer.hpp +++ b/include/caffe/layers/conv_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CONV_LAYER_HPP_ #define CAFFE_CONV_LAYER_HPP_ diff --git a/include/caffe/layers/crop_layer.hpp b/include/caffe/layers/crop_layer.hpp index c4fda1220c3..619160dad9e 100644 --- a/include/caffe/layers/crop_layer.hpp +++ b/include/caffe/layers/crop_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CROP_LAYER_HPP_ #define CAFFE_CROP_LAYER_HPP_ @@ -44,17 +81,14 @@ class CropLayer : public Layer { vector offsets; private: - // Recursive copy function. + template void crop_copy(const vector*>& bottom, const vector*>& top, const vector& offsets, - vector indices, - int cur_dim, const Dtype* src_data, - Dtype* dest_data, - bool is_forward); + Dtype* dest_data); - // Recursive copy function: this is similar to crop_copy() but loops over all + // Copy function: this is similar to crop_copy() but loops over all // but the last two dimensions to allow for ND cropping while still relying on // a CUDA kernel for the innermost two dimensions for performance reasons. An // alterantive implementation could rely on the kernel more by passing diff --git a/include/caffe/layers/cudnn_conv_layer.hpp b/include/caffe/layers/cudnn_conv_layer.hpp index 31fe49a71fa..8f32ea7d4d7 100644 --- a/include/caffe/layers/cudnn_conv_layer.hpp +++ b/include/caffe/layers/cudnn_conv_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CUDNN_CONV_LAYER_HPP_ #define CAFFE_CUDNN_CONV_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_lcn_layer.hpp b/include/caffe/layers/cudnn_lcn_layer.hpp index 74cf4775e51..7f7d29f292a 100644 --- a/include/caffe/layers/cudnn_lcn_layer.hpp +++ b/include/caffe/layers/cudnn_lcn_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CUDNN_LCN_LAYER_HPP_ #define CAFFE_CUDNN_LCN_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_lrn_layer.hpp b/include/caffe/layers/cudnn_lrn_layer.hpp index 000ccc36507..4fd090000d4 100644 --- a/include/caffe/layers/cudnn_lrn_layer.hpp +++ b/include/caffe/layers/cudnn_lrn_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CUDNN_LRN_LAYER_HPP_ #define CAFFE_CUDNN_LRN_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_pooling_layer.hpp b/include/caffe/layers/cudnn_pooling_layer.hpp index 6d0db47d660..073e9bdad49 100644 --- a/include/caffe/layers/cudnn_pooling_layer.hpp +++ b/include/caffe/layers/cudnn_pooling_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CUDNN_POOLING_LAYER_HPP_ #define CAFFE_CUDNN_POOLING_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_relu_layer.hpp b/include/caffe/layers/cudnn_relu_layer.hpp index a1cb29e7c5f..27b3182185e 100644 --- a/include/caffe/layers/cudnn_relu_layer.hpp +++ b/include/caffe/layers/cudnn_relu_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CUDNN_RELU_LAYER_HPP_ #define CAFFE_CUDNN_RELU_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_sigmoid_layer.hpp b/include/caffe/layers/cudnn_sigmoid_layer.hpp index 7b3486f8a7e..e17c511996e 100644 --- a/include/caffe/layers/cudnn_sigmoid_layer.hpp +++ b/include/caffe/layers/cudnn_sigmoid_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CUDNN_SIGMOID_LAYER_HPP_ #define CAFFE_CUDNN_SIGMOID_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_softmax_layer.hpp b/include/caffe/layers/cudnn_softmax_layer.hpp index 174368e413d..568f43d6d36 100644 --- a/include/caffe/layers/cudnn_softmax_layer.hpp +++ b/include/caffe/layers/cudnn_softmax_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ #define CAFFE_CUDNN_SOFTMAX_LAYER_HPP_ diff --git a/include/caffe/layers/cudnn_tanh_layer.hpp b/include/caffe/layers/cudnn_tanh_layer.hpp index 59e758d7031..e435cca98b7 100644 --- a/include/caffe/layers/cudnn_tanh_layer.hpp +++ b/include/caffe/layers/cudnn_tanh_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_CUDNN_TANH_LAYER_HPP_ #define CAFFE_CUDNN_TANH_LAYER_HPP_ diff --git a/include/caffe/layers/data_layer.hpp b/include/caffe/layers/data_layer.hpp index 6c361791a0c..afbf08a8302 100644 --- a/include/caffe/layers/data_layer.hpp +++ b/include/caffe/layers/data_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_DATA_LAYER_HPP_ #define CAFFE_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/deconv_layer.hpp b/include/caffe/layers/deconv_layer.hpp index 23ae887e61e..62ef2e2b1ac 100644 --- a/include/caffe/layers/deconv_layer.hpp +++ b/include/caffe/layers/deconv_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_DECONV_LAYER_HPP_ #define CAFFE_DECONV_LAYER_HPP_ diff --git a/include/caffe/layers/detection_evaluate_layer.hpp b/include/caffe/layers/detection_evaluate_layer.hpp new file mode 100644 index 00000000000..c35f0032960 --- /dev/null +++ b/include/caffe/layers/detection_evaluate_layer.hpp @@ -0,0 +1,108 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_DETECTION_EVALUATE_LAYER_HPP_ +#define CAFFE_DETECTION_EVALUATE_LAYER_HPP_ + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Generate the detection evaluation based on DetectionOutputLayer and + * ground truth bounding box labels. + * + * Intended for use with MultiBox detection method. + * + * NOTE: does not implement Backwards operation. + */ +template +class DetectionEvaluateLayer : public Layer { + public: + explicit DetectionEvaluateLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "DetectionEvaluate"; } + virtual inline int ExactBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @brief Evaluate the detection output. + * + * @param bottom input Blob vector (exact 2) + * -# @f$ (1 \times 1 \times N \times 7) @f$ + * N detection results. + * -# @f$ (1 \times 1 \times M \times 7) @f$ + * M ground truth. + * @param top Blob vector (length 1) + * -# @f$ (1 \times 1 \times N \times 4) @f$ + * N is the number of detections, and each row is: + * [image_id, label, confidence, true_pos, false_pos] + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + /// @brief Not implemented + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; + } + + int num_classes_; + int background_label_id_; + float overlap_threshold_; + bool evaluate_difficult_gt_; + vector > sizes_; + int count_; + bool use_normalized_bbox_; + + bool has_resize_; + ResizeParameter resize_param_; +}; + +} // namespace caffe + +#endif // CAFFE_DETECTION_EVALUATE_LAYER_HPP_ diff --git a/include/caffe/layers/detection_output_layer.hpp b/include/caffe/layers/detection_output_layer.hpp new file mode 100644 index 00000000000..46bc659cb84 --- /dev/null +++ b/include/caffe/layers/detection_output_layer.hpp @@ -0,0 +1,155 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_DETECTION_OUTPUT_LAYER_HPP_ +#define CAFFE_DETECTION_OUTPUT_LAYER_HPP_ + +#include +#include +#include + +#include +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/bbox_util.hpp" + +using namespace boost::property_tree; // NOLINT(build/namespaces) + +namespace caffe { + +/** + * @brief Generate the detection output based on location and confidence + * predictions by doing non maximum suppression. + * + * Intended for use with MultiBox detection method. + * + * NOTE: does not implement Backwards operation. + */ +template +class DetectionOutputLayer : public Layer { + public: + explicit DetectionOutputLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "DetectionOutput"; } + virtual inline int MinBottomBlobs() const { return 3; } + virtual inline int MaxBottomBlobs() const { return 4; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @brief Do non maximum suppression (nms) on prediction results. + * + * @param bottom input Blob vector (at least 2) + * -# @f$ (N \times C1 \times 1 \times 1) @f$ + * the location predictions with C1 predictions. + * -# @f$ (N \times C2 \times 1 \times 1) @f$ + * the confidence predictions with C2 predictions. + * -# @f$ (N \times 2 \times C3 \times 1) @f$ + * the prior bounding boxes with C3 values. + * @param top output Blob vector (length 1) + * -# @f$ (1 \times 1 \times N \times 7) @f$ + * N is the number of detections after nms, and each row is: + * [image_id, label, confidence, xmin, ymin, xmax, ymax] + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + /// @brief Not implemented + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; + } + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; + } + + int num_classes_; + bool share_location_; + int num_loc_classes_; + int background_label_id_; + CodeType code_type_; + bool variance_encoded_in_target_; + int keep_top_k_; + float confidence_threshold_; + + int num_; + int num_priors_; + + float nms_threshold_; + int top_k_; + float eta_; + + bool need_save_; + string output_directory_; + string output_name_prefix_; + string output_format_; + map label_to_name_; + map label_to_display_name_; + vector names_; + vector > sizes_; + int num_test_image_; + int name_count_; + bool has_resize_; + ResizeParameter resize_param_; + + ptree detections_; + + bool visualize_; + float visualize_threshold_; + shared_ptr > data_transformer_; + string save_file_; + Blob bbox_preds_; + Blob bbox_permute_; + Blob conf_permute_; +}; + +} // namespace caffe + +#endif // CAFFE_DETECTION_OUTPUT_LAYER_HPP_ diff --git a/include/caffe/layers/dropout_layer.hpp b/include/caffe/layers/dropout_layer.hpp index e83143bc3cc..5e35e13a328 100644 --- a/include/caffe/layers/dropout_layer.hpp +++ b/include/caffe/layers/dropout_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_DROPOUT_LAYER_HPP_ #define CAFFE_DROPOUT_LAYER_HPP_ diff --git a/include/caffe/layers/dummy_data_layer.hpp b/include/caffe/layers/dummy_data_layer.hpp index 4180f1d01e4..fb058726c2a 100644 --- a/include/caffe/layers/dummy_data_layer.hpp +++ b/include/caffe/layers/dummy_data_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_DUMMY_DATA_LAYER_HPP_ #define CAFFE_DUMMY_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/eltwise_layer.hpp b/include/caffe/layers/eltwise_layer.hpp index 091de834362..cddf12107c4 100644 --- a/include/caffe/layers/eltwise_layer.hpp +++ b/include/caffe/layers/eltwise_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_ELTWISE_LAYER_HPP_ #define CAFFE_ELTWISE_LAYER_HPP_ diff --git a/include/caffe/layers/elu_layer.hpp b/include/caffe/layers/elu_layer.hpp index 0796e898007..92c494d2053 100644 --- a/include/caffe/layers/elu_layer.hpp +++ b/include/caffe/layers/elu_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_ELU_LAYER_HPP_ #define CAFFE_ELU_LAYER_HPP_ diff --git a/include/caffe/layers/embed_layer.hpp b/include/caffe/layers/embed_layer.hpp index 36137a625b6..81b41006155 100644 --- a/include/caffe/layers/embed_layer.hpp +++ b/include/caffe/layers/embed_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_EMBED_LAYER_HPP_ #define CAFFE_EMBED_LAYER_HPP_ diff --git a/include/caffe/layers/euclidean_loss_layer.hpp b/include/caffe/layers/euclidean_loss_layer.hpp index f564569e27a..ff89bb4e39c 100644 --- a/include/caffe/layers/euclidean_loss_layer.hpp +++ b/include/caffe/layers/euclidean_loss_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_ #define CAFFE_EUCLIDEAN_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/exp_layer.hpp b/include/caffe/layers/exp_layer.hpp index 9fc8c396a74..76ce317bef8 100644 --- a/include/caffe/layers/exp_layer.hpp +++ b/include/caffe/layers/exp_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_EXP_LAYER_HPP_ #define CAFFE_EXP_LAYER_HPP_ diff --git a/include/caffe/layers/filter_layer.hpp b/include/caffe/layers/filter_layer.hpp index e040e66612b..fdd3edba787 100644 --- a/include/caffe/layers/filter_layer.hpp +++ b/include/caffe/layers/filter_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_FILTER_LAYER_HPP_ #define CAFFE_FILTER_LAYER_HPP_ diff --git a/include/caffe/layers/flatten_layer.hpp b/include/caffe/layers/flatten_layer.hpp index e494bbb588f..6367d6cca8a 100644 --- a/include/caffe/layers/flatten_layer.hpp +++ b/include/caffe/layers/flatten_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_FLATTEN_LAYER_HPP_ #define CAFFE_FLATTEN_LAYER_HPP_ diff --git a/include/caffe/layers/hdf5_data_layer.hpp b/include/caffe/layers/hdf5_data_layer.hpp index b04cf8e1940..d4a5ad87321 100644 --- a/include/caffe/layers/hdf5_data_layer.hpp +++ b/include/caffe/layers/hdf5_data_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_HDF5_DATA_LAYER_HPP_ #define CAFFE_HDF5_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/hdf5_output_layer.hpp b/include/caffe/layers/hdf5_output_layer.hpp index 487d08fc06c..ecd859a4854 100644 --- a/include/caffe/layers/hdf5_output_layer.hpp +++ b/include/caffe/layers/hdf5_output_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_HDF5_OUTPUT_LAYER_HPP_ #define CAFFE_HDF5_OUTPUT_LAYER_HPP_ diff --git a/include/caffe/layers/hinge_loss_layer.hpp b/include/caffe/layers/hinge_loss_layer.hpp index 54e42bd44da..84594ac15af 100644 --- a/include/caffe/layers/hinge_loss_layer.hpp +++ b/include/caffe/layers/hinge_loss_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_HINGE_LOSS_LAYER_HPP_ #define CAFFE_HINGE_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/im2col_layer.hpp b/include/caffe/layers/im2col_layer.hpp index 71e32f7427f..fa0f0003ba5 100644 --- a/include/caffe/layers/im2col_layer.hpp +++ b/include/caffe/layers/im2col_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_IM2COL_LAYER_HPP_ #define CAFFE_IM2COL_LAYER_HPP_ diff --git a/include/caffe/layers/image_data_layer.hpp b/include/caffe/layers/image_data_layer.hpp index a0d3384e4c9..ccf597c23e7 100644 --- a/include/caffe/layers/image_data_layer.hpp +++ b/include/caffe/layers/image_data_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_IMAGE_DATA_LAYER_HPP_ #define CAFFE_IMAGE_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/infogain_loss_layer.hpp b/include/caffe/layers/infogain_loss_layer.hpp index 633f339a28e..367ef4246b1 100644 --- a/include/caffe/layers/infogain_loss_layer.hpp +++ b/include/caffe/layers/infogain_loss_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_INFOGAIN_LOSS_LAYER_HPP_ #define CAFFE_INFOGAIN_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/inner_product_layer.hpp b/include/caffe/layers/inner_product_layer.hpp index 18d0d6192eb..14d7791a912 100644 --- a/include/caffe/layers/inner_product_layer.hpp +++ b/include/caffe/layers/inner_product_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_INNER_PRODUCT_LAYER_HPP_ #define CAFFE_INNER_PRODUCT_LAYER_HPP_ diff --git a/include/caffe/layers/input_layer.hpp b/include/caffe/layers/input_layer.hpp index f4472678c69..b9f5f17ed9c 100644 --- a/include/caffe/layers/input_layer.hpp +++ b/include/caffe/layers/input_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_INPUT_LAYER_HPP_ #define CAFFE_INPUT_LAYER_HPP_ diff --git a/include/caffe/layers/log_layer.hpp b/include/caffe/layers/log_layer.hpp index 7d037d2bdca..12787269db4 100644 --- a/include/caffe/layers/log_layer.hpp +++ b/include/caffe/layers/log_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_LOG_LAYER_HPP_ #define CAFFE_LOG_LAYER_HPP_ diff --git a/include/caffe/layers/loss_layer.hpp b/include/caffe/layers/loss_layer.hpp index dbdf612c062..9fa911d359c 100644 --- a/include/caffe/layers/loss_layer.hpp +++ b/include/caffe/layers/loss_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_LOSS_LAYER_HPP_ #define CAFFE_LOSS_LAYER_HPP_ @@ -29,6 +66,16 @@ class LossLayer : public Layer { virtual void Reshape( const vector*>& bottom, const vector*>& top); + /** + * Read the normalization mode parameter and compute the normalizer based + * on the blob size. If normalization_mode is VALID, the count of valid + * outputs will be read from valid_count, unless it is -1 in which case + * all outputs are assumed to be valid. + */ + Dtype GetNormalizer( + const LossParameter_NormalizationMode normalization_mode, + const int outer_num, const int inner_num, const int valid_count); + virtual inline int ExactNumBottomBlobs() const { return 2; } /** diff --git a/include/caffe/layers/lrn_layer.hpp b/include/caffe/layers/lrn_layer.hpp index 06cf71a94cb..e485bffc0f6 100644 --- a/include/caffe/layers/lrn_layer.hpp +++ b/include/caffe/layers/lrn_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_LRN_LAYER_HPP_ #define CAFFE_LRN_LAYER_HPP_ @@ -69,6 +106,12 @@ class LRNLayer : public Layer { // Fields used for normalization ACROSS_CHANNELS // scale_ stores the intermediate summing results Blob scale_; + Blob padded_ratio_; // buffer for backward + Blob accum_ratio_; // buffer for backward + + int num_of_threads_; // Number of threads to be used for + // batch based parallelization eg. + // min(batch,omp_get_num_threads()) // Fields used for normalization WITHIN_CHANNEL shared_ptr > split_layer_; diff --git a/include/caffe/layers/lstm_layer.hpp b/include/caffe/layers/lstm_layer.hpp index a0e67c9d432..1c6483be2fa 100644 --- a/include/caffe/layers/lstm_layer.hpp +++ b/include/caffe/layers/lstm_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_LSTM_LAYER_HPP_ #define CAFFE_LSTM_LAYER_HPP_ diff --git a/include/caffe/layers/memory_data_layer.hpp b/include/caffe/layers/memory_data_layer.hpp index 8abcc8c1b68..0fdaf6344ed 100644 --- a/include/caffe/layers/memory_data_layer.hpp +++ b/include/caffe/layers/memory_data_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_MEMORY_DATA_LAYER_HPP_ #define CAFFE_MEMORY_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/mkl_layers.hpp b/include/caffe/layers/mkl_layers.hpp new file mode 100644 index 00000000000..0d5d6641671 --- /dev/null +++ b/include/caffe/layers/mkl_layers.hpp @@ -0,0 +1,647 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MKL2017_LAYERS_HPP_ +#define CAFFE_MKL2017_LAYERS_HPP_ + +#include +#include + +#include "boost/enable_shared_from_this.hpp" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/layers/base_conv_layer.hpp" +#include "caffe/layers/conv_layer.hpp" +#include "caffe/layers/deconv_layer.hpp" +#include "caffe/layers/neuron_layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/mkl_memory.hpp" +#include "mkl_dnn_cppwrapper.h" + +#include "caffe/util/performance.hpp" + +namespace caffe { + +template +class MKLConvolutionLayer : public ConvolutionLayer { + public: + explicit MKLConvolutionLayer(const LayerParameter& param); + + virtual ~MKLConvolutionLayer(); + + virtual inline const char* type() const { return "MklConvolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + // Customized methods + void Init(const vector*>& bottom, + const vector*>& top); + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void compute_output_shape(); + + void Reshape(const vector*>& bottom, + const vector*>& top); + + private: + /* Fwd step */ + shared_ptr > fwd_bottom_data, fwd_top_data, fwd_filter_data, + fwd_bias_data; + dnnPrimitive_t convolutionFwd; + + /* Bwd data step */ + shared_ptr > bwdd_top_diff, bwdd_bottom_diff; + shared_ptr > bwdd_filter_data; + dnnPrimitive_t convolutionBwdData; + + /* Bwd filter step */ + shared_ptr > bwdf_top_diff, bwdf_filter_diff; + shared_ptr > bwdf2fwd_filter_diff; + shared_ptr > bwdf_bottom_data; + dnnPrimitive_t convolutionBwdFilter; + + /* Bwd bias step */ + shared_ptr > bwdb_top_diff, bwdb_bias_diff; + dnnPrimitive_t convolutionBwdBias; + + /* In case of (iter_size > 1) we need additional buffers */ + shared_ptr > bwdf_filter_diff_iter, + bwdb_bias_diff_iter; + + // TODO: temp. compatibility vs. older cafe + size_t width_, + height_, + width_out_, + height_out_, + kernel_w_, + kernel_h_, + stride_w_, + stride_h_; + int pad_w_, + pad_h_; + + bool bprop_unpack_called; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_prop_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_diff_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_bias_); +}; + +template +class MKLDeconvolutionLayer : public DeconvolutionLayer { + public: + explicit MKLDeconvolutionLayer(const LayerParameter& param); + + virtual ~MKLDeconvolutionLayer(); + + virtual inline const char* type() const { return "MklDeconvolution"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + // Customized methods + void Init(const vector*>& bottom, + const vector*>& top); + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void compute_output_shape(); + + void Reshape(const vector*>& bottom, + const vector*>& top); + + private: + /* Fwd step */ + shared_ptr > fwd_bottom_data, fwd_top_data, fwd_filter_data, + fwd_bias_data; + dnnPrimitive_t convolutionFwd; + + /* Bwd data step */ + shared_ptr > bwdd_top_diff, bwdd_bottom_diff; + shared_ptr > bwdd_filter_data; + dnnPrimitive_t convolutionBwdData; + + /* Bwd filter step */ + shared_ptr > bwdf_top_diff, bwdf_filter_diff; + shared_ptr > bwdf2fwd_filter_diff; + shared_ptr > bwdf_bottom_data; + dnnPrimitive_t convolutionBwdFilter; + + /* Bwd bias step */ + shared_ptr > bwdb_top_diff, bwdb_bias_diff; + dnnPrimitive_t convolutionBwdBias; + + /* In case of (iter_size > 1) we need additional buffers */ + shared_ptr > bwdf_filter_diff_iter, + bwdb_bias_diff_iter; + + // TODO: temp. compatibility vs. older cafe + size_t width_, + height_, + width_out_, + height_out_, + kernel_w_, + kernel_h_, + stride_w_, + stride_h_; + int pad_w_, + pad_h_; + + bool bprop_unpack_called; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_prop_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_diff_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_bias_); +}; +/** + * @brief Normalize the input in a local region across feature maps. + */ + +template +class MKLLRNLayer : public Layer { + public: + explicit MKLLRNLayer(const LayerParameter& param) + : Layer(param), + lrnFwd(static_cast(NULL)), + lrnBwd(static_cast(NULL)), + fwd_top_data (new MKLData()), + fwd_bottom_data (new MKLData()), + bwd_top_diff (new MKLDiff()), + bwd_bottom_diff (new MKLDiff()), + lrn_buffer_(static_cast(NULL)) { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual ~MKLLRNLayer(); + + virtual inline const char* type() const { return "LRN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual void CrossChannelForward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelBackward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual void CrossChannelForward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void CrossChannelBackward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + void Init(const vector*>& bottom, + const vector*>& top); + + int size_; + int pre_pad_; + Dtype alpha_; + Dtype beta_; + Dtype k_; + int num_; + int channels_; + int height_; + int width_; + // Fields used for normalization ACROSS_CHANNELS + // scale_ stores the intermediate summing results + private: + dnnPrimitive_t lrnFwd, lrnBwd; + shared_ptr > fwd_top_data, fwd_bottom_data; + shared_ptr > bwd_top_diff, bwd_bottom_diff; + Dtype *lrn_buffer_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + + + +template +class MKLPoolingLayer : public Layer { + public: + explicit MKLPoolingLayer(const LayerParameter& param) + : Layer(param), + fwd_top_data (new MKLData()), + fwd_bottom_data (new MKLData()), + bwd_top_diff (new MKLDiff()), + bwd_bottom_diff (new MKLDiff()), + poolingFwd(NULL), poolingBwd(NULL) { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + ~MKLPoolingLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + void Init(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Pooling"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + int kernel_h_, kernel_w_; + int stride_h_, stride_w_; + int pad_h_, pad_w_; + int channels_, num_; + int height_, width_; + int pooled_height_, pooled_width_; + bool global_pooling_; + dnnAlgorithm_t algorithm; + Blob rand_idx_; + Blob max_idx_; + + private: + size_t kernel_size[2], + kernel_stride[4]; + int src_offset[2]; + shared_ptr > fwd_top_data, fwd_bottom_data; + shared_ptr > bwd_top_diff, bwd_bottom_diff; + + dnnPrimitive_t poolingFwd, poolingBwd; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +template +class MKLReLULayer : public NeuronLayer { + public: + /** + * @param param provides ReLUParameter relu_param, + * with ReLULayer options: + * - negative_slope (\b optional, default 0). + * the value @f$ \nu @f$ by which negative values are multiplied. + */ + explicit MKLReLULayer(const LayerParameter& param) + : NeuronLayer(param), + fwd_top_data_ (new MKLData()), + fwd_bottom_data_ (new MKLData()), + bwd_top_diff_ (new MKLDiff()), + bwd_bottom_diff_ (new MKLDiff()), + reluFwd_(NULL), + reluBwd_(NULL) { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + + ~MKLReLULayer(); + + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + void Init(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "ReLU"; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + private: + shared_ptr > fwd_top_data_; + shared_ptr > fwd_bottom_data_; + shared_ptr > bwd_top_diff_; + shared_ptr > bwd_bottom_diff_; + dnnPrimitive_t reluFwd_, reluBwd_; + vector sizes_; + vector strides_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +template +class MKLConcatLayer : public Layer { + public: + explicit MKLConcatLayer(const LayerParameter& param) + : Layer(param), + concatFwd_(static_cast(NULL)), + concatBwd_(static_cast(NULL)), + fwd_top_data_(new MKLData()), + bwd_top_diff_(new MKLDiff()), + split_channels_(NULL) { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + virtual inline const char* type() const { return "Concat"; } + ~MKLConcatLayer(); + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + + void Init(const vector*>& bottom, + const vector*>& top); + + private: + dnnPrimitive_t concatFwd_; + dnnPrimitive_t concatBwd_; + shared_ptr > fwd_top_data_; + vector > > fwd_bottom_data_; + shared_ptr > bwd_top_diff_; + vector > > bwd_bottom_diff_; + size_t *split_channels_; + + size_t width_; + size_t height_; + size_t channels_; + size_t num_; + size_t num_concats_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +template +class MKLBatchNormLayer : public Layer { + public: + explicit MKLBatchNormLayer(const LayerParameter& param) + : Layer(param), + fwd_top_data(new MKLData()), + fwd_bottom_data(new MKLData()), + bwd_top_diff(new MKLDiff()), + bwd_bottom_diff(new MKLDiff()), + batchNormFwd(static_cast(NULL)), + batchNormFwdInference(static_cast(NULL)), + batchNormBwd(static_cast(NULL)), + mean_buffer_(static_cast(NULL)), + variance_buffer_(static_cast(NULL)), + scaleShift_buffer_(static_cast(NULL)), + diffScaleShift_buffer_(static_cast(NULL)), + layout_usr_(static_cast(NULL)), + use_global_stats_(false) + { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + + virtual ~MKLBatchNormLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "BatchNorm"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } +#ifdef USE_MLSL + virtual bool ParamNeedReduce(int param_id) { return param_id >= 3; } +#endif + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + void Init(const vector*>& bottom, + const vector*>& top); + + Dtype moving_average_fraction_; + Dtype eps_; + bool use_weight_bias_; + bool bias_term_; + int num_; + int channels_; + int height_; + int width_; + + private: + shared_ptr > fwd_top_data; + shared_ptr > fwd_bottom_data; + shared_ptr > bwd_top_diff; + shared_ptr > bwd_bottom_diff; + Blob temp_; + dnnPrimitive_t batchNormFwd, batchNormFwdInference, batchNormBwd; + Dtype *mean_buffer_; + Dtype *variance_buffer_; + Dtype *scaleShift_buffer_; + Dtype *diffScaleShift_buffer_; + dnnLayout_t layout_usr_; + bool use_global_stats_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +template +class MKLSplitLayer : public Layer { + public: + explicit MKLSplitLayer(const LayerParameter& param) + : Layer(param), + bwd_bottom_diff (new MKLDiff()), + sumPrimitive(static_cast(NULL)) { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + } + + virtual ~MKLSplitLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + void Init(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Split"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + private: + shared_ptr > bwd_bottom_diff; + vector > > bwd_top_diff; + vector coeffs_; + size_t num_tops; + vector sizes_src_; + vector strides_src_; + dnnPrimitive_t sumPrimitive; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); +}; + +template +class MKLEltwiseLayer : public Layer { + public: + explicit MKLEltwiseLayer(const LayerParameter& param) + : Layer(param), + fwd_top_data (new MKLData()), + sumPrimitive(static_cast(NULL)) { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + } + + virtual ~MKLEltwiseLayer(); + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + void Init(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Eltwise"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + private: + shared_ptr > fwd_top_data; + vector > > fwd_bottom_data; + vector > > bwd_bottom_diff; + + dnnPrimitive_t sumPrimitive; + dnnPrimitive_t convertPrimitive; + + EltwiseParameter_EltwiseOp op_; + vector coeffs_; + Blob max_idx_; + size_t num_bottoms; + int channels_, num_; + int height_, width_; + + bool stable_prod_grad_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); +}; + +} // namespace caffe +#endif // #ifndef CAFFE_MKL2017_LAYERS_HPP_ diff --git a/include/caffe/layers/mkldnn_layers.hpp b/include/caffe/layers/mkldnn_layers.hpp new file mode 100644 index 00000000000..f63301e2a6f --- /dev/null +++ b/include/caffe/layers/mkldnn_layers.hpp @@ -0,0 +1,525 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MKLDNN_LAYERS_HPP_ +#define CAFFE_MKLDNN_LAYERS_HPP_ + +#include +#include + +#include "boost/enable_shared_from_this.hpp" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/engine_parser.hpp" +#include "caffe/layers/base_conv_layer.hpp" +#include "caffe/layers/conv_layer.hpp" +#include "caffe/layers/inner_product_layer.hpp" +#include "caffe/layers/neuron_layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/mkldnn_memory.hpp" +#include "mkldnn.hpp" + +#include "caffe/util/performance.hpp" + +using namespace mkldnn; + +namespace caffe { + +// ===== MKLDNNBatchNormLayer ======================================= +template +class MKLDNNBatchNormLayer : public MKLDNNLayer, public Layer { +public: + explicit MKLDNNBatchNormLayer(const LayerParameter& param) + : Layer(param) + , fwd_top_data(), fwd_bottom_data() + , bwd_top_diff(), bwd_bottom_diff() + , BatchNormFwd_pd(), BatchNormBwd_pd() + , mean_memory(), variance_memory() + , scaleshift_memory(), bwd_scaleshift_diff_memory() + , output_memory(), bwd_bottom_diff_memory() + , input_primitive(), bwd_top_diff_primitive() + { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + ~MKLDNNBatchNormLayer() {} +#ifdef USE_MLSL + virtual bool ParamNeedReduce(int param_id) { return param_id >= 3; } +#endif + +protected: + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + virtual void Reshape(const vector*>& bottom, const vector*>& top); + virtual inline const char* type() const { return "BatchNorm"; } + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); +private: + void InitBatchNorm(const vector*>& bottom, const vector*>& top); + void InitBatchNormBwd(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom); + shared_ptr > fwd_top_data, fwd_bottom_data; + shared_ptr > bwd_top_diff, bwd_bottom_diff; + shared_ptr BatchNormFwd_pd; + shared_ptr BatchNormBwd_pd; + + MKLDNNPrimitive BatchNormFwd, BatchNormBwd; + shared_ptr mean_memory, variance_memory; + + shared_ptr scaleshift_memory, bwd_scaleshift_diff_memory; + shared_ptr output_memory, bwd_bottom_diff_memory; + + shared_ptr input_primitive, bwd_top_diff_primitive; + + int32_t num_, width_, height_, channels_; + Dtype eps_, moving_average_fraction_; + bool use_weight_bias_, bias_term_, use_global_stats_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +// ===== MKLDNNConvolutionLayer ======================================= +template +class MKLDNNConvolutionLayer : public MKLDNNLayer , public ConvolutionLayer { +public: + explicit MKLDNNConvolutionLayer(const LayerParameter& param); + virtual ~MKLDNNConvolutionLayer() {} + + //For test the parameters of kernel/stride/pad + int GetKernelWidth() { return kernel_w_; } + int GetKernelHeight() { return kernel_h_; } + int GetStrideWidth() { return stride_w_; } + int GetStrideHeight() { return stride_h_; } + int GetPadWidth() { return pad_w_; } + int GetPadHeight() { return pad_h_; } +protected: + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + // Customized methods + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + void Reshape(const vector*>& bottom, const vector*>& top); +private: + virtual void compute_output_shape(); + virtual void init_properties(const vector*>& bottom, const vector*>& top); + void InitConvolutionFwd(const vector*>& bottom, const vector*>& top); + void InitConvolutionBwd(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom); + + shared_ptr > fwd_bottom_data, fwd_top_data, fwd_weights_data, fwd_bias_data + , bwdd_weights_data, bwdw_bottom_data; + shared_ptr > bwdd_bottom_diff, bwdd_top_diff + , bwdw_top_diff, bwdw_weights_diff, bwdw_bias_diff; + shared_ptr convFwd_pd; + shared_ptr convBwdData_pd; + shared_ptr convBwdWeights_pd; + MKLDNNPrimitive convFwd, convBwdData, convBwdWeights; + shared_ptr fwd_top_data_memory, bwdd_bottom_diff_memory + , bwdw_weights_diff_memory, bwdw_bias_diff_memory; + shared_ptr fwd_bottom_data_primitive, fwd_weights_data_primitive, fwd_bias_data_primitive + , bwdd_top_diff_primitive, bwdd_weights_data_primitive + , bwdw_top_diff_primitive, bwdw_bottom_data_primitive; + int32_t width_, height_, width_out_, height_out_, kernel_w_, kernel_h_, stride_w_, stride_h_; + int pad_w_, pad_h_; + mkldnn::algorithm conv_algorithm; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_weights_); +}; + +// ===== MKLDNNInnerProductLayer ======================================= +template +class MKLDNNInnerProductLayer : public MKLDNNLayer , public InnerProductLayer { +public: + explicit MKLDNNInnerProductLayer(const LayerParameter& param); + virtual ~MKLDNNInnerProductLayer(); +protected: + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + // Customized methods + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + void Reshape(const vector*>& bottom, const vector*>& top); +private: + void InitInnerProductFwd(const vector*>& bottom, const vector*>& top); + void InitInnerProductBwd(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + + shared_ptr > fwd_bottom_data, fwd_top_data, fwd_weights_data, fwd_bias_data + , bwdd_weights_data, bwdw_bottom_data; + shared_ptr > bwdd_bottom_diff, bwdd_top_diff + , bwdw_top_diff, bwdw_weights_diff, bwdw_bias_diff; + shared_ptr ipFwd_pd; + shared_ptr ipBwdData_pd; + shared_ptr ipBwdWeights_pd; + + MKLDNNPrimitive ipFwd, ipBwdData, ipBwdWeights; + shared_ptr fwd_top_data_memory, bwdd_bottom_diff_memory + , bwdw_weights_diff_memory, bwdw_bias_diff_memory; + shared_ptr fwd_bottom_data_primitive, fwd_weights_data_primitive, fwd_bias_data_primitive + , bwdd_top_diff_primitive, bwdd_weights_data_primitive + , bwdw_top_diff_primitive, bwdw_bottom_data_primitive; + int32_t w_, h_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_weights_); +}; + + +/** + * @brief Normalize the input in a local region across feature maps. + */ + +// ===== MKLDNNLRNLayer ======================================= +template +class MKLDNNLRNLayer : public MKLDNNLayer , public Layer { +public: + explicit MKLDNNLRNLayer(const LayerParameter& param); + virtual ~MKLDNNLRNLayer() {} +protected: + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + virtual void Reshape(const vector*>& bottom, const vector*>& top); + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + + virtual inline const char* type() const { return "LRN"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } +private: + void InitLRNFwd(const vector*>& bottom, const vector*>& top); + void InitLRNBwd(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + + shared_ptr > fwd_top_data, fwd_bottom_data; + shared_ptr > bwd_top_diff, bwd_bottom_diff; + shared_ptr lrnFwd_pd; + shared_ptr lrnBwd_pd; + MKLDNNPrimitive lrnFwd; + MKLDNNPrimitive lrnBwd; + shared_ptr bottom_md; + shared_ptr fwd_top_data_memory, bwd_bottom_diff_memory, scratch_memory; + shared_ptr fwd_bottom_data_primitive, bwd_top_diff_primitive; + Dtype alpha_, beta_, k_; + int size_, num_, width_, height_, channels_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +// ===== MKLDNNPoolingLayer ======================================= +template +class MKLDNNPoolingLayer : public MKLDNNLayer, public Layer { +public: + explicit MKLDNNPoolingLayer(const LayerParameter& param) + : MKLDNNLayer(), Layer(param) + , fwd_bottom_data(), fwd_top_data() + , bwd_top_diff(), bwd_bottom_diff() + , poolingFwd_pd() + , poolingBwd_pd() + , indices_pd() + , indices_memory(), fwd_top_data_memory(), bwd_bottom_diff_memory() + , fwd_bottom_data_primitive(), bwd_top_diff_primitive() + , num_(0), channels_(0), width_(0), height_(0), width_out_(0), height_out_(0) + , kernel_w_(0), kernel_h_(0), stride_w_(0), stride_h_(0) + , pad_t_(0),pad_b_(0), pad_l_(0), pad_r_(0) + , global_pooling_(false) + , force_exclude_padding_flag_(false) + { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + ~MKLDNNPoolingLayer() {} +protected: + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + virtual void Reshape(const vector*>& bottom, const vector*>& top); + + virtual inline const char* type() const { return "Pooling"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int MinTopBlobs() const { return 1; } + // MAX POOL layers can output an extra top blob for the mask; + // others can only output the pooled inputs. + virtual inline int MaxTopBlobs() const { + return (this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) ? 2 : 1; + } +protected: + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top,const vector& propagate_down + ,const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + ,const vector*>& bottom); + +private: + void InitPoolingFwd(const vector*>& bottom, const vector*>& top); + void InitPoolingBwd(const vector*>& bottom + , const vector& propagate_down + , const vector*>& top); + + shared_ptr> fwd_bottom_data, fwd_top_data; + shared_ptr> bwd_top_diff, bwd_bottom_diff; + shared_ptr poolingFwd_pd; + shared_ptr poolingBwd_pd; + MKLDNNPrimitive poolingFwd, poolingBwd; + shared_ptr indices_pd; + shared_ptr indices_memory, fwd_top_data_memory, bwd_bottom_diff_memory; + shared_ptr fwd_bottom_data_primitive, bwd_top_diff_primitive; + int32_t num_, channels_, width_, height_, width_out_, height_out_; + int32_t kernel_w_, kernel_h_, stride_w_, stride_h_; + int32_t pad_t_, pad_b_, pad_l_, pad_r_; + Blob max_idx_; + bool global_pooling_; + bool force_exclude_padding_flag_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +// ===== MKLDNNReLULayer ======================================= +template +class MKLDNNReLULayer : public MKLDNNLayer , public NeuronLayer { +public: + /** + * @param param provides ReLUParameter relu_param, + * with ReLULayer options: + * - negative_slope (\b optional, default 0). + * the value @f$ \nu @f$ by which negative values are multiplied. + */ + explicit MKLDNNReLULayer(const LayerParameter& param) + : MKLDNNLayer(), NeuronLayer(param) + , fwd_top_data(), fwd_bottom_data() + , bwd_top_diff(), bwd_bottom_diff() + , reluFwd_pd(), reluBwd_pd() + , fwd_top_data_memory(), bwd_bottom_diff_memory() + , fwd_bottom_data_primitive(), bwd_top_diff_primitive() + , num_(0), width_(0), height_(0), channels_(0) + { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + ~MKLDNNReLULayer() {} + +protected: + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + virtual void Reshape(const vector*>& bottom, const vector*>& top); + virtual inline const char* type() const { return "ReLU"; } + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); +private: + void InitReLUFwd(const vector*>& bottom, const vector*>& top); + void InitReLUBwd(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + + shared_ptr > fwd_top_data, fwd_bottom_data; + shared_ptr > bwd_top_diff, bwd_bottom_diff; + shared_ptr reluFwd_pd; + shared_ptr reluBwd_pd; + MKLDNNPrimitive reluFwd, reluBwd; + shared_ptr fwd_top_data_memory, bwd_bottom_diff_memory; + shared_ptr fwd_bottom_data_primitive, bwd_top_diff_primitive; + int32_t num_, width_, height_, channels_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +// ===== MKLDNNConcatLayer ====================================== +template +class MKLDNNConcatLayer : public MKLDNNLayer , public Layer { +public: + explicit MKLDNNConcatLayer(const LayerParameter& param) + : MKLDNNLayer(), Layer(param), + concatFwd_pd(), fwd_output_memory(), + bwd_reorder_input_memory(), bwd_reorder_output_memory(), + fwd_top_data(), fwd_bottom_data(), split_channels() { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } +protected: + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + virtual void Reshape(const vector*>& bottom, const vector*>& top); + virtual inline const char* type() const { return "Concat"; } + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); +private: + void InitConcatFwd(const vector*>& bottom, const vector*>& top); + void InitConcatBwd(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + + shared_ptr concatFwd_pd; + shared_ptr fwd_output_memory; + shared_ptr bwd_reorder_input_memory; + vector> bwd_reorder_output_memory; + vector> bwd_bottom_memory_; + vector> fwd_input_primitives_; + vector fwd_input_primitives_at_; + MKLDNNPrimitive concatFwd; + shared_ptr > fwd_top_data; + vector > > fwd_bottom_data; + shared_ptr > bwd_top_diff; + vector > > bwd_bottom_diff; + vector > reorders; + vector split_channels; + + int32_t num_, width_, height_, channels_, num_concats_; + int concat_dimension; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +// ===== MKLDNNSplitLayer ====================================== +template +class MKLDNNSplitLayer : public MKLDNNLayer , public Layer { +public: + explicit MKLDNNSplitLayer(const LayerParameter& param) + : MKLDNNLayer(), Layer(param), + splitBwd_pd_(), bwd_bottom_diff_memory_() + { + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + } + ~MKLDNNSplitLayer(); + +protected: + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + virtual void Reshape(const vector*>& bottom, const vector*>& top); + virtual inline const char* type() const { return "Split"; } + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); +private: + void InitSplitFwd(const vector*>& bottom, const vector*>& top); + void InitSplitBwd(const vector*>& top, const vector*>& bottom); + + private: + vector sizes_src_; + vector strides_src_; + MKLDNNPrimitive splitBwd_; + shared_ptr splitBwd_pd_; + shared_ptr bwd_bottom_diff_memory_; + shared_ptr > bwd_bottom_diff_; + vector> bwd_top_diff_primitives_; + vector bwd_top_diffs_primitives_at_; + vector > > bwd_top_diffs_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_bw_); +}; + +// ===== MKLDNNEltwiseLayer ======================================= +template +class MKLDNNEltwiseLayer : public MKLDNNLayer , public Layer { +public: + explicit MKLDNNEltwiseLayer(const LayerParameter& param) + : MKLDNNLayer(), Layer(param) + , fwd_top_data(), fwd_bottom_data() + , eltwiseFwd_pd() + , fwd_top_data_memory() + , fwd_bottom_data_primitives_() + , num_(0), width_(0), height_(0), channels_(0) + , num_bottoms_(0) + { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + } + ~MKLDNNEltwiseLayer() {} + +protected: + virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); + virtual void Reshape(const vector*>& bottom, const vector*>& top); + virtual inline const char* type() const { return "Eltwise"; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, const vector*>& top); + virtual void Backward_cpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); +private: + void InitEltwiseFwd(const vector*>& bottom, const vector*>& top); + void InitEltwiseBwd(const vector*>& top, const vector& propagate_down + , const vector*>& bottom); + + shared_ptr > fwd_top_data; + vector > > fwd_bottom_data; + shared_ptr eltwiseFwd_pd; + MKLDNNPrimitive eltwiseFwd; + + shared_ptr fwd_top_data_memory; + vector> fwd_bottom_data_primitives_; + vector fwd_bottom_data_primitives_at_; + + EltwiseParameter_EltwiseOp op_; + vector coeffs_; + Blob max_idx_; + int32_t num_, width_, height_, channels_; + int32_t num_bottoms_; + bool stable_prod_grad_; + + PERFORMANCE_EVENT_ID_DECL(perf_id_fw_); +}; + + +} // namespace caffe +#endif // #ifndef CAFFE_MKLDNN_LAYERS_HPP_ diff --git a/include/caffe/layers/multibox_loss_layer.hpp b/include/caffe/layers/multibox_loss_layer.hpp new file mode 100644 index 00000000000..e675ea70364 --- /dev/null +++ b/include/caffe/layers/multibox_loss_layer.hpp @@ -0,0 +1,149 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MULTIBOX_LOSS_LAYER_HPP_ +#define CAFFE_MULTIBOX_LOSS_LAYER_HPP_ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/bbox_util.hpp" + +#include "caffe/layers/loss_layer.hpp" + +namespace caffe { + +/** + * @brief Perform MultiBox operations. Including the following: + * + * - decode the predictions. + * - perform matching between priors/predictions and ground truth. + * - use matched boxes and confidences to compute loss. + * + */ +template +class MultiBoxLossLayer : public LossLayer { + public: + explicit MultiBoxLossLayer(const LayerParameter& param) + : LossLayer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MultiBoxLoss"; } + // bottom[0] stores the location predictions. + // bottom[1] stores the confidence predictions. + // bottom[2] stores the prior bounding boxes. + // bottom[3] stores the ground truth bounding boxes. + virtual inline int ExactNumBottomBlobs() const { return 4; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + // The internal localization loss layer. + shared_ptr > loc_loss_layer_; + LocLossType loc_loss_type_; + float loc_weight_; + // bottom vector holder used in Forward function. + vector*> loc_bottom_vec_; + // top vector holder used in Forward function. + vector*> loc_top_vec_; + // blob which stores the matched location prediction. + Blob loc_pred_; + // blob which stores the corresponding matched ground truth. + Blob loc_gt_; + // localization loss. + Blob loc_loss_; + + // The internal confidence loss layer. + shared_ptr > conf_loss_layer_; + ConfLossType conf_loss_type_; + // bottom vector holder used in Forward function. + vector*> conf_bottom_vec_; + // top vector holder used in Forward function. + vector*> conf_top_vec_; + // blob which stores the confidence prediction. + Blob conf_pred_; + // blob which stores the corresponding ground truth label. + Blob conf_gt_; + // confidence loss. + Blob conf_loss_; + + MultiBoxLossParameter multibox_loss_param_; + int num_classes_; + bool share_location_; + MatchType match_type_; + float overlap_threshold_; + bool use_prior_for_matching_; + int background_label_id_; + bool use_difficult_gt_; + bool do_neg_mining_; + float neg_pos_ratio_; + float neg_overlap_; + CodeType code_type_; + bool encode_variance_in_target_; + bool map_object_to_agnostic_; + bool ignore_cross_boundary_bbox_; + bool bp_inside_; + MiningType mining_type_; + + int loc_classes_; + int num_gt_; + int num_; + int num_priors_; + + int num_matches_; + int num_conf_; + vector > > all_match_indices_; + vector > all_neg_indices_; + + // How to normalize the loss. + LossParameter_NormalizationMode normalization_; +}; + +} // namespace caffe + +#endif // CAFFE_MULTIBOX_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/multinomial_logistic_loss_layer.hpp b/include/caffe/layers/multinomial_logistic_loss_layer.hpp index 3977cf9ea57..3aea5b0beb4 100644 --- a/include/caffe/layers/multinomial_logistic_loss_layer.hpp +++ b/include/caffe/layers/multinomial_logistic_loss_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_MULTINOMIAL_LOGISTIC_LOSS_LAYER_HPP_ #define CAFFE_MULTINOMIAL_LOGISTIC_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/mvn_layer.hpp b/include/caffe/layers/mvn_layer.hpp index 3a235ceca64..50edcd82e82 100644 --- a/include/caffe/layers/mvn_layer.hpp +++ b/include/caffe/layers/mvn_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_MVN_LAYER_HPP_ #define CAFFE_MVN_LAYER_HPP_ diff --git a/include/caffe/layers/neuron_layer.hpp b/include/caffe/layers/neuron_layer.hpp index 10c108ce682..223aa4b42bd 100644 --- a/include/caffe/layers/neuron_layer.hpp +++ b/include/caffe/layers/neuron_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_NEURON_LAYER_HPP_ #define CAFFE_NEURON_LAYER_HPP_ diff --git a/include/caffe/layers/normalize_layer.hpp b/include/caffe/layers/normalize_layer.hpp new file mode 100644 index 00000000000..723fc846d4a --- /dev/null +++ b/include/caffe/layers/normalize_layer.hpp @@ -0,0 +1,88 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_NORMALIZE_LAYER_HPP_ +#define CAFFE_NORMALIZE_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Normalizes the input to have L_p norm of 1 with scale learnable. + * + * TODO(weiliu89): thorough documentation for Forward, Backward, and proto params. + */ +template +class NormalizeLayer : public Layer { + public: + explicit NormalizeLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Normalize"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob norm_; + Blob sum_channel_multiplier_, sum_spatial_multiplier_; + Blob buffer_, buffer_channel_, buffer_spatial_; + bool across_spatial_; + bool channel_shared_; + Dtype eps_; +}; + +} // namespace caffe + +#endif // CAFFE_MVN_LAYER_HPP_ diff --git a/include/caffe/layers/parameter_layer.hpp b/include/caffe/layers/parameter_layer.hpp index 188b92acbe2..176117c4a61 100644 --- a/include/caffe/layers/parameter_layer.hpp +++ b/include/caffe/layers/parameter_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_PARAMETER_LAYER_HPP_ #define CAFFE_PARAMETER_LAYER_HPP_ diff --git a/include/caffe/layers/permute_layer.hpp b/include/caffe/layers/permute_layer.hpp new file mode 100644 index 00000000000..cac6e278f1c --- /dev/null +++ b/include/caffe/layers/permute_layer.hpp @@ -0,0 +1,96 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_PERMUTE_LAYER_HPP_ +#define CAFFE_PERMUTE_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Permute the input blob by changing the memory order of the data. + * + * TODO(weiliu89): thorough documentation for Forward, Backward, and proto params. + */ + +// The main function which does the permute. +template +void Permute(const int count, Dtype* bottom_data, const bool forward, + const int* permute_order, const int* old_steps, const int* new_steps, + const int num_axes, Dtype* top_data); + +template +class PermuteLayer : public Layer { + public: + explicit PermuteLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "Permute"; } + virtual inline int ExactNumBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + int num_axes_; + bool need_permute_; + + // Use Blob because it is convenient to be accessible in .cu file. + Blob permute_order_; + Blob old_steps_; + Blob new_steps_; +}; + +} // namespace caffe + +#endif // CAFFE_PERMUTE_LAYER_HPP_ diff --git a/include/caffe/layers/pooling_layer.hpp b/include/caffe/layers/pooling_layer.hpp index f4d6803ba8e..a3173cc8e4b 100644 --- a/include/caffe/layers/pooling_layer.hpp +++ b/include/caffe/layers/pooling_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_POOLING_LAYER_HPP_ #define CAFFE_POOLING_LAYER_HPP_ @@ -7,6 +44,8 @@ #include "caffe/layer.hpp" #include "caffe/proto/caffe.pb.h" +#include "caffe/layers/pooling_layer_impl.hpp" + namespace caffe { /** @@ -16,6 +55,13 @@ namespace caffe { */ template class PoolingLayer : public Layer { + // Private code generators. + friend class PoolingCodeGeneratorForward; + friend class PoolingCodeGeneratorBackward; + PoolingCodeGeneratorForward Forward_code_generator; + PoolingCodeGeneratorBackward Backward_code_generator; + + public: explicit PoolingLayer(const LayerParameter& param) : Layer(param) {} diff --git a/include/caffe/layers/pooling_layer_impl.hpp b/include/caffe/layers/pooling_layer_impl.hpp new file mode 100644 index 00000000000..f119ace35e0 --- /dev/null +++ b/include/caffe/layers/pooling_layer_impl.hpp @@ -0,0 +1,149 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_CODE_GENERATORS_POOLING_H_ +#define CAFFE_CODE_GENERATORS_POOLING_H_ + +#include +#include + +#if defined __x86_64__ || defined _M_X64 +# define XBYAK_NO_OP_NAMES +# define XBYAK_USE_MMAP_ALLOCATOR +# include "../xbyak/xbyak_util.h" +#endif + +#include "caffe/proto/caffe.pb.h" + +namespace caffe { +// Declarations of CodeGenerator classes. + +template +class PoolingLayer; + +template +class Blob; + +template +class PoolingCodeGeneratorForward +#if defined __x86_64__ || defined _M_X64 + : public ::Xbyak::CodeGenerator +#endif +{ + public: + PoolingCodeGeneratorForward(); + ~PoolingCodeGeneratorForward(); + + typedef void (Callback_t)( + const Dtype* bottom_data, + Dtype* top_data, + int top_count, + int batch_start, + int batch_end, + void* mask, + int64_t channel_start, + int64_t channel_end, + PoolingLayer* layer, + bool use_top_mask); + + Callback_t* Get_callback( + PoolingLayer* layer, + Blob* top, + bool use_top_mask); + + private: + void Create_callback(PoolingLayer* layer); + + static void Naive( + const Dtype* bottom_data, + Dtype* top_data, + int top_count, + int batch_start, + int batch_end, + void* mask, + int64_t channel_start, + int64_t channel_end, + PoolingLayer* layer, + bool use_top_mask); + Callback_t* Callback; + std::vector Layer_output_shape_signature; + bool Use_top_mask; + PoolingParameter_PoolMethod Method; +}; + +template +class PoolingCodeGeneratorBackward +#if defined __x86_64__ || defined _M_X64 + : public ::Xbyak::CodeGenerator +#endif +{ + public: + PoolingCodeGeneratorBackward(); + ~PoolingCodeGeneratorBackward(); + + typedef void (Callback_t)( + const Dtype* top_diff, + Dtype* bottom_diff, + int batch_start, + int batch_end, + int64_t channel_start, + int64_t channel_end, + bool use_top_mask, + const void* mask, + PoolingLayer* layer); + + Callback_t* Get_callback(PoolingLayer* layer, Blob* top); + + private: + void Create_callback(PoolingLayer* layer); + + static void Naive( + const Dtype* top_diff, + Dtype* bottom_diff, + int batch_start, + int batch_end, + int64_t channel_start, + int64_t channel_end, + bool use_top_mask, + const void* mask, + PoolingLayer* layer); + Callback_t* Callback; + std::vector layer_output_shape_signature; +}; +} // namespace caffe + +#endif // CAFFE_CODE_GENERATORS_POOLING_H_ diff --git a/include/caffe/layers/power_layer.hpp b/include/caffe/layers/power_layer.hpp index 6ecbafcaca8..c22cdd70a9e 100644 --- a/include/caffe/layers/power_layer.hpp +++ b/include/caffe/layers/power_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_POWER_LAYER_HPP_ #define CAFFE_POWER_LAYER_HPP_ diff --git a/include/caffe/layers/prelu_layer.hpp b/include/caffe/layers/prelu_layer.hpp index 3ddfb484b66..93a327cb5e7 100644 --- a/include/caffe/layers/prelu_layer.hpp +++ b/include/caffe/layers/prelu_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_PRELU_LAYER_HPP_ #define CAFFE_PRELU_LAYER_HPP_ diff --git a/include/caffe/layers/prior_box_layer.hpp b/include/caffe/layers/prior_box_layer.hpp new file mode 100644 index 00000000000..4adea9f338b --- /dev/null +++ b/include/caffe/layers/prior_box_layer.hpp @@ -0,0 +1,121 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_PRIORBOX_LAYER_HPP_ +#define CAFFE_PRIORBOX_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +/** + * @brief Generate the prior boxes of designated sizes and aspect ratios across + * all dimensions @f$ (H \times W) @f$. + * + * Intended for use with MultiBox detection method to generate prior (template). + * + * NOTE: does not implement Backwards operation. + */ +template +class PriorBoxLayer : public Layer { + public: + /** + * @param param provides PriorBoxParameter prior_box_param, + * with PriorBoxLayer options: + * - min_size (\b minimum box size in pixels. can be multiple. required!). + * - max_size (\b maximum box size in pixels. can be ignored or same as the + * # of min_size.). + * - aspect_ratio (\b optional aspect ratios of the boxes. can be multiple). + * - flip (\b optional bool, default true). + * if set, flip the aspect ratio. + */ + explicit PriorBoxLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "PriorBox"; } + virtual inline int ExactBottomBlobs() const { return 2; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + /** + * @brief Generates prior boxes for a layer with specified parameters. + * + * @param bottom input Blob vector (at least 2) + * -# @f$ (N \times C \times H_i \times W_i) @f$ + * the input layer @f$ x_i @f$ + * -# @f$ (N \times C \times H_0 \times W_0) @f$ + * the data layer @f$ x_0 @f$ + * @param top output Blob vector (length 1) + * -# @f$ (N \times 2 \times K*4) @f$ where @f$ K @f$ is the prior numbers + * By default, a box of aspect ratio 1 and min_size and a box of aspect + * ratio 1 and sqrt(min_size * max_size) are created. + */ + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + /// @brief Not implemented + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + return; + } + + vector min_sizes_; + vector max_sizes_; + vector aspect_ratios_; + bool flip_; + int num_priors_; + bool clip_; + vector variance_; + + int img_w_; + int img_h_; + float step_w_; + float step_h_; + + float offset_; +}; + +} // namespace caffe + +#endif // CAFFE_PRIORBOX_LAYER_HPP_ diff --git a/include/caffe/layers/python_layer.hpp b/include/caffe/layers/python_layer.hpp index 66dbbdf13b8..e80f8d0f3cf 100644 --- a/include/caffe/layers/python_layer.hpp +++ b/include/caffe/layers/python_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_PYTHON_LAYER_HPP_ #define CAFFE_PYTHON_LAYER_HPP_ diff --git a/include/caffe/layers/recurrent_layer.hpp b/include/caffe/layers/recurrent_layer.hpp index ca17371b994..ad8a0c9f169 100644 --- a/include/caffe/layers/recurrent_layer.hpp +++ b/include/caffe/layers/recurrent_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_RECURRENT_LAYER_HPP_ #define CAFFE_RECURRENT_LAYER_HPP_ diff --git a/include/caffe/layers/reduction_layer.hpp b/include/caffe/layers/reduction_layer.hpp index 804a495b11c..48a1902975e 100644 --- a/include/caffe/layers/reduction_layer.hpp +++ b/include/caffe/layers/reduction_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_REDUCTION_LAYER_HPP_ #define CAFFE_REDUCTION_LAYER_HPP_ diff --git a/include/caffe/layers/relu_layer.hpp b/include/caffe/layers/relu_layer.hpp index d7a73f7a8d1..4138fc3189d 100644 --- a/include/caffe/layers/relu_layer.hpp +++ b/include/caffe/layers/relu_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_RELU_LAYER_HPP_ #define CAFFE_RELU_LAYER_HPP_ diff --git a/include/caffe/layers/reshape_layer.hpp b/include/caffe/layers/reshape_layer.hpp index d11e06384ce..16ef36f8e76 100644 --- a/include/caffe/layers/reshape_layer.hpp +++ b/include/caffe/layers/reshape_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_XXX_LAYER_HPP_ #define CAFFE_XXX_LAYER_HPP_ diff --git a/include/caffe/layers/rnn_layer.hpp b/include/caffe/layers/rnn_layer.hpp index 6dce238ae17..9c4230c8ad0 100644 --- a/include/caffe/layers/rnn_layer.hpp +++ b/include/caffe/layers/rnn_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_RNN_LAYER_HPP_ #define CAFFE_RNN_LAYER_HPP_ diff --git a/include/caffe/layers/scale_layer.hpp b/include/caffe/layers/scale_layer.hpp index 924df2e51ab..ff644aa70ff 100644 --- a/include/caffe/layers/scale_layer.hpp +++ b/include/caffe/layers/scale_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SCALE_LAYER_HPP_ #define CAFFE_SCALE_LAYER_HPP_ diff --git a/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp b/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp index 598dca5ff2c..02eecc14c45 100644 --- a/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp +++ b/include/caffe/layers/sigmoid_cross_entropy_loss_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SIGMOID_CROSS_ENTROPY_LOSS_LAYER_HPP_ #define CAFFE_SIGMOID_CROSS_ENTROPY_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/sigmoid_layer.hpp b/include/caffe/layers/sigmoid_layer.hpp index ac0f6927feb..b5d782d03d9 100644 --- a/include/caffe/layers/sigmoid_layer.hpp +++ b/include/caffe/layers/sigmoid_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SIGMOID_LAYER_HPP_ #define CAFFE_SIGMOID_LAYER_HPP_ diff --git a/include/caffe/layers/silence_layer.hpp b/include/caffe/layers/silence_layer.hpp index fba087fcef0..42f76ff5bf3 100644 --- a/include/caffe/layers/silence_layer.hpp +++ b/include/caffe/layers/silence_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SILENCE_LAYER_HPP_ #define CAFFE_SILENCE_LAYER_HPP_ diff --git a/include/caffe/layers/slice_layer.hpp b/include/caffe/layers/slice_layer.hpp index 10a0abb6eeb..207e21147f3 100644 --- a/include/caffe/layers/slice_layer.hpp +++ b/include/caffe/layers/slice_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SLICE_LAYER_HPP_ #define CAFFE_SLICE_LAYER_HPP_ diff --git a/include/caffe/layers/smooth_L1_loss_layer.hpp b/include/caffe/layers/smooth_L1_loss_layer.hpp new file mode 100644 index 00000000000..28bd55928fb --- /dev/null +++ b/include/caffe/layers/smooth_L1_loss_layer.hpp @@ -0,0 +1,105 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +// ------------------------------------------------------------------ +// Fast R-CNN +// copyright (c) 2015 Microsoft +// Licensed under The MIT License [see fast-rcnn/LICENSE for details] +// Written by Ross Girshick +// Modified by Wei Liu +// ------------------------------------------------------------------ + +#ifndef CAFFE_SMOOTH_L1_LOSS_LAYER_HPP_ +#define CAFFE_SMOOTH_L1_LOSS_LAYER_HPP_ + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +#include "caffe/layers/loss_layer.hpp" + +namespace caffe { + +/** + * @brief Computes the SmoothL1 loss as introduced in:@f$ + * Fast R-CNN, Ross Girshick, ICCV 2015. + */ +template +class SmoothL1LossLayer : public LossLayer { + public: + explicit SmoothL1LossLayer(const LayerParameter& param) + : LossLayer(param), diff_() {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "SmoothL1Loss"; } + + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MaxBottomBlobs() const { return 3; } + + /** + * Unlike most loss layers, in the SmoothL1LossLayer we can backpropagate + * to both inputs -- override to return true and always allow force_backward. + */ + virtual inline bool AllowForceBackward(const int bottom_index) const { + return true; + } + + protected: + /// @copydoc SmoothL1LossLayer + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + Blob diff_; + Blob errors_; + bool has_weights_; +}; + +} // namespace caffe + +#endif // CAFFE_SMOOTH_L1_LOSS_LAYER_HPP_ diff --git a/include/caffe/layers/softmax_layer.hpp b/include/caffe/layers/softmax_layer.hpp index c65b8703e43..b3ede713c05 100644 --- a/include/caffe/layers/softmax_layer.hpp +++ b/include/caffe/layers/softmax_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SOFTMAX_LAYER_HPP_ #define CAFFE_SOFTMAX_LAYER_HPP_ @@ -35,6 +72,8 @@ class SoftmaxLayer : public Layer { const vector& propagate_down, const vector*>& bottom); virtual void Backward_gpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom); + virtual void Forward_cpu_fast_case(const vector*>& bottom, + const vector*>& top); int outer_num_; int inner_num_; diff --git a/include/caffe/layers/softmax_loss_layer.hpp b/include/caffe/layers/softmax_loss_layer.hpp index f07e8a02cf1..741d31f1d69 100644 --- a/include/caffe/layers/softmax_loss_layer.hpp +++ b/include/caffe/layers/softmax_loss_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SOFTMAX_WITH_LOSS_LAYER_HPP_ #define CAFFE_SOFTMAX_WITH_LOSS_LAYER_HPP_ @@ -63,6 +100,9 @@ class SoftmaxWithLossLayer : public LossLayer { virtual inline int MinTopBlobs() const { return 1; } virtual inline int MaxTopBlobs() const { return 2; } + virtual inline int ExactNumBottomBlobs() const { return -1; } + virtual inline int MinBottomBlobs() const { return 2; } + virtual inline int MaxBottomBlobs() const { return 3; } protected: virtual void Forward_cpu(const vector*>& bottom, const vector*>& top); diff --git a/include/caffe/layers/split_layer.hpp b/include/caffe/layers/split_layer.hpp index 8140dfc7c40..b44b0f605be 100644 --- a/include/caffe/layers/split_layer.hpp +++ b/include/caffe/layers/split_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SPLIT_LAYER_HPP_ #define CAFFE_SPLIT_LAYER_HPP_ diff --git a/include/caffe/layers/spp_layer.hpp b/include/caffe/layers/spp_layer.hpp index 9f145cc77e3..37fdbcff927 100644 --- a/include/caffe/layers/spp_layer.hpp +++ b/include/caffe/layers/spp_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SPP_LAYER_HPP_ #define CAFFE_SPP_LAYER_HPP_ @@ -20,6 +57,7 @@ class SPPLayer : public Layer { public: explicit SPPLayer(const LayerParameter& param) : Layer(param) {} + ~SPPLayer(); virtual void LayerSetUp(const vector*>& bottom, const vector*>& top); virtual void Reshape(const vector*>& bottom, @@ -52,19 +90,19 @@ class SPPLayer : public Layer { /// top vector holder used in call to the underlying SplitLayer::Forward vector*> split_top_vec_; /// bottom vector holder used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_bottom_vecs_; + vector*> > > pooling_bottom_vecs_; /// the internal Pooling layers of different kernel sizes vector > > pooling_layers_; /// top vector holders used in call to the underlying PoolingLayer::Forward - vector*>*> pooling_top_vecs_; + vector*> > > pooling_top_vecs_; /// pooling_outputs stores the outputs of the PoolingLayers - vector*> pooling_outputs_; + vector > > pooling_outputs_; /// the internal Flatten layers that the Pooling layers feed into - vector*> flatten_layers_; + vector > > flatten_layers_; /// top vector holders used in call to the underlying FlattenLayer::Forward - vector*>*> flatten_top_vecs_; + vector*> > > flatten_top_vecs_; /// flatten_outputs stores the outputs of the FlattenLayers - vector*> flatten_outputs_; + vector > > flatten_outputs_; /// bottom vector holder used in call to the underlying ConcatLayer::Forward vector*> concat_bottom_vec_; /// the internal Concat layers that the Flatten layers feed into diff --git a/include/caffe/layers/tanh_layer.hpp b/include/caffe/layers/tanh_layer.hpp index 8f95e9322d9..febe59421c7 100644 --- a/include/caffe/layers/tanh_layer.hpp +++ b/include/caffe/layers/tanh_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_TANH_LAYER_HPP_ #define CAFFE_TANH_LAYER_HPP_ diff --git a/include/caffe/layers/threshold_layer.hpp b/include/caffe/layers/threshold_layer.hpp index 3bf4db63e5c..63894a6021f 100644 --- a/include/caffe/layers/threshold_layer.hpp +++ b/include/caffe/layers/threshold_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_THRESHOLD_LAYER_HPP_ #define CAFFE_THRESHOLD_LAYER_HPP_ diff --git a/include/caffe/layers/tile_layer.hpp b/include/caffe/layers/tile_layer.hpp index fbdbe2f0c53..8eb331e7f16 100644 --- a/include/caffe/layers/tile_layer.hpp +++ b/include/caffe/layers/tile_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_TILE_LAYER_HPP_ #define CAFFE_TILE_LAYER_HPP_ diff --git a/include/caffe/layers/video_data_layer.hpp b/include/caffe/layers/video_data_layer.hpp new file mode 100644 index 00000000000..7d24bb8edfb --- /dev/null +++ b/include/caffe/layers/video_data_layer.hpp @@ -0,0 +1,94 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_VIDEO_DATA_LAYER_HPP_ +#define CAFFE_VIDEO_DATA_LAYER_HPP_ + +#ifdef USE_OPENCV +#if OPENCV_VERSION == 3 +#include +#else +#include +#endif // OPENCV_VERSION == 3 + +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/data_transformer.hpp" +#include "caffe/internal_thread.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/base_data_layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" + +namespace caffe { + +/** + * @brief Provides data to the Net from webcam or video files. + * + * TODO(weiliu89): thorough documentation for Forward and proto params. + */ +template +class VideoDataLayer : public BasePrefetchingDataLayer { + public: + explicit VideoDataLayer(const LayerParameter& param); + virtual ~VideoDataLayer(); + virtual void DataLayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual inline bool ShareInParallel() const { return false; } + virtual inline const char* type() const { return "VideoData"; } + virtual inline int ExactNumBottomBlobs() const { return 0; } + virtual inline int MinTopBlobs() const { return 1; } + + protected: + virtual void load_batch(Batch* batch); + + VideoDataParameter_VideoType video_type_; + cv::VideoCapture cap_; + + int skip_frames_; + + int total_frames_; + int processed_frames_; + vector top_shape_; +}; + +} // namespace caffe +#endif // USE_OPENCV + +#endif // CAFFE_VIDEO_DATA_LAYER_HPP_ diff --git a/include/caffe/layers/window_data_layer.hpp b/include/caffe/layers/window_data_layer.hpp index 35f41b80e63..b30ade733b9 100644 --- a/include/caffe/layers/window_data_layer.hpp +++ b/include/caffe/layers/window_data_layer.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_WINDOW_DATA_LAYER_HPP_ #define CAFFE_WINDOW_DATA_LAYER_HPP_ diff --git a/include/caffe/mkl_memory.hpp b/include/caffe/mkl_memory.hpp new file mode 100644 index 00000000000..3d9de83735f --- /dev/null +++ b/include/caffe/mkl_memory.hpp @@ -0,0 +1,166 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MKL_MEMORY_HPP_ +#define CAFFE_MKL_MEMORY_HPP_ + +#include +#include + +#include "boost/enable_shared_from_this.hpp" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" +#include "mkl_dnn_cppwrapper.h" + +namespace caffe { +template +struct MKLMemoryDescriptorBase : PrvMemDescr, + boost::enable_shared_from_this > { + MKLMemoryDescriptorBase() : layout_usr(NULL), layout_int(NULL), + convert_to_int(NULL), convert_from_int(NULL), convert_prv2prv(NULL), + name("UNKNOWN"), internal_ptr(NULL) {} + ~MKLMemoryDescriptorBase() { + dnnLayoutDelete(layout_usr); + dnnLayoutDelete(layout_int); + +#ifdef USE_MLSL + if (mn::is_multinode()) { + if (internal_ptr != NULL) { + mn::free((void*)internal_ptr); + internal_ptr = NULL; + } + } else { +#endif /* !USE_MLSL */ + dnnReleaseBuffer(internal_ptr); +#ifdef USE_MLSL + } +#endif /* USE_MLSL */ + + dnnDelete(convert_to_int); + dnnDelete(convert_from_int); + dnnDelete(convert_prv2prv); + } + + shared_ptr > get_shared_ptr() { + return this->shared_from_this(); + } + + dnnLayout_t layout_usr; + dnnLayout_t layout_int; + dnnPrimitive_t convert_to_int; + dnnPrimitive_t convert_from_int; + dnnPrimitive_t convert_prv2prv; + shared_ptr > descr_prv2prv_conversion; + + std::string name; // for debugging purposes + void allocate() { + if (internal_ptr == NULL) { + +#ifdef USE_MLSL + if (mn::is_multinode()) { + internal_ptr = (Dtype*)mn::alloc(prv_size(), 64); + if (internal_ptr == NULL) + LOG(FATAL) << "internal_ptr is NULL after MLSL::Alloc"; + } else { +#endif /* !USE_MLSL */ + int status = dnnAllocateBuffer( + reinterpret_cast(&internal_ptr), layout_int); + CHECK_EQ(status, E_SUCCESS) + << "Failed internal_ptr memory allocation with status " + << status << "\n"; +#ifdef USE_MLSL + } +#endif /* USE_MLSL */ + + caffe_set(prv_count(), Dtype(0), internal_ptr); + } + } + virtual void* prv_ptr() { + if (internal_ptr == NULL) + allocate(); + return internal_ptr; + } + inline bool conversion_needed() { return (convert_to_int != NULL);} + void create_conversions(); + void create_internal_layout(const dnnPrimitive_t primitive, + dnnResourceType_t type); + void create_user_layout(size_t dimension, const size_t size[], + const size_t strides[], + bool create_conversion_if_possible = true); + void create_layouts( + const dnnPrimitive_t primitive, dnnResourceType_t type, + size_t dimension, const size_t size[], const size_t strides[]); + + void remove_internal_layout(); + void remove_user_layout(); + + virtual PrvDescrType get_descr_type() {return PRV_DESCR_MKL2017;} + virtual size_t prv_size() { + return dnnLayoutGetMemorySize(layout_int); + } + virtual size_t prv_count() { + return dnnLayoutGetMemorySize(layout_int) / sizeof(Dtype); + } + virtual void convert_from_prv(void* cpu_ptr); + virtual void convert_to_prv(void* cpu_ptr); + virtual bool layout_compare(shared_ptr other); + virtual void convert_from_other(shared_ptr other); + protected: + void remove_conversions(); + protected: + Dtype* internal_ptr; +}; + +template +struct MKLMemoryDescriptor : MKLMemoryDescriptorBase { + // The last get_converted_prv() argument is a hack for reusing + // in backward a conversion done already in the forward direction. + Dtype* get_converted_prv(Blob * blob, bool set_prv_ptr, + MKLMemoryDescriptor* converted_in_fwd = NULL); +}; + +template +struct MKLData : MKLMemoryDescriptor +{}; + +template +struct MKLDiff : MKLMemoryDescriptor +{}; + +} // namespace caffe +#endif // #ifndef CAFFE_MKL_MEMORY_HPP_ diff --git a/include/caffe/mkldnn_base.hpp b/include/caffe/mkldnn_base.hpp new file mode 100644 index 00000000000..f68d590d413 --- /dev/null +++ b/include/caffe/mkldnn_base.hpp @@ -0,0 +1,220 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MKLDNN_BASE_HPP_ +#define CAFFE_MKLDNN_BASE_HPP_ + +#include +#include + +#include "boost/enable_shared_from_this.hpp" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" +#include "mkldnn.hpp" + +using namespace mkldnn; + +namespace caffe { + +// ===== CpuEngine ======================================= +// cpu_engine singleton +class CpuEngine +{ +public: + static CpuEngine & Instance() + { + // I's thread-safe in C++11. + static CpuEngine myInstance; + return myInstance; + } + CpuEngine(CpuEngine const&) = delete; // Copy construct + CpuEngine(CpuEngine&&) = delete; // Move construct + CpuEngine& operator=(CpuEngine const&) = delete; // Copy assign + CpuEngine& operator=(CpuEngine &&) = delete; // Move assign + + engine & get_engine() { return _cpu_engine; } +protected: + CpuEngine() : _cpu_engine(engine::cpu, 0) {} +// CpuEngine() : _cpu_engine(engine::cpu_lazy, 0) {} + ~CpuEngine() {} +private: + engine _cpu_engine; +}; + +#ifdef FPGA_ENABLED +// ===== FPGAEngine ======================================= +// fpga_engine singleton +class FPGAEngine +{ +public: + static FPGAEngine & Instance() + { + // I's thread-safe in C++11. + static FPGAEngine myInstance; + return myInstance; + } + FPGAEngine(FPGAEngine const&) = delete; // Copy construct + FPGAEngine(FPGAEngine&&) = delete; // Move construct + FPGAEngine& operator=(FPGAEngine const&) = delete; // Copy assign + FPGAEngine& operator=(FPGAEngine &&) = delete; // Move assign + + engine & get_engine() { return _fpga_engine; } +protected: + FPGAEngine() : _fpga_engine(engine::fpga, 0) {} + ~FPGAEngine() {} +private: + engine _fpga_engine; +}; +#endif // #ifdef FPGA_ENABLED + +#ifdef DLA_ENABLED +// ===== Deep Learning Accelerator ======================================= +class DLAEngine +{ +public: + static DLAEngine & Instance() + { + // I's thread-safe in C++11. + static DLAEngine myInstance; + return myInstance; + } + DLAEngine(DLAEngine const&) = delete; // Copy construct + DLAEngine(DLAEngine&&) = delete; // Move construct + DLAEngine& operator=(DLAEngine const&) = delete; // Copy assign + DLAEngine& operator=(DLAEngine &&) = delete; // Move assign + + engine & get_engine() { return _dla_engine; } +protected: + DLAEngine() : _dla_engine(engine::dla, 0) {} + ~DLAEngine() {} +private: + engine _dla_engine; +}; + + +#endif // #ifdef DLA_ENABLED + +// ===== MKLDNNStream ======================================= +class MKLDNNStream { +public: + explicit MKLDNNStream():_ready(false) { prepare(); } + virtual ~MKLDNNStream() {} + MKLDNNStream &submit(std::vector primitives) { _stream->submit(primitives); return *this; } + bool wait(bool block = true) { + VLOG(1) << typeid(*this).name()<< " : " << __FUNCTION__ << " : wait stream "; + _ready = false; + bool res = _stream->wait(block); + VLOG(1) << typeid(*this).name()<< " : " << __FUNCTION__ << " : end of stream waiting "; + return res; + } + bool ready() { return _ready; } + void prepare() { + if(_ready == false) { + // stream just created or already executed + // !! TODO: change below if stream will have method to reset its state + VLOG(1) << typeid(*this).name()<< " : " << __FUNCTION__ << " : create new stream"; +// _stream.reset(new stream(stream::kind::any)); + _stream.reset(new stream(stream::kind::eager)); + // TODO: Enable when Unit tests work for this one + //_stream.reset(new stream(stream::kind::lazy)); + } + _ready = true; + } +protected: +private: + bool _ready; + shared_ptr _stream; +}; + +// ===== StreamHolder ======================================= +// singleton +class StreamHolder +{ +public: + static StreamHolder & Instance() + { + // I's thread-safe in C++11. + static StreamHolder myInstance; + return myInstance; + } + StreamHolder(StreamHolder const&) = delete; // Copy construct + StreamHolder(StreamHolder&&) = delete; // Move construct + StreamHolder& operator=(StreamHolder const&) = delete; // Copy assign + StreamHolder& operator=(StreamHolder &&) = delete; // Move assign + + shared_ptr get_stream(); + shared_ptr current_stream() { return _current_stream; } + void prepare_mkldnn_stream(shared_ptr mkldnn_stream) { + _current_stream = mkldnn_stream; + _current_stream->prepare(); + } +protected: + StreamHolder() : _current_stream() {} + ~StreamHolder() {} +private: + shared_ptr _current_stream; +}; + +// ===== MKLDNNLayer ======================================= +template +class MKLDNNLayer { +public: + explicit MKLDNNLayer() {} + virtual ~MKLDNNLayer() {} +}; + +// ===== MKLDNNPrimitive ======================================= +template +class MKLDNNPrimitive { +public: + explicit MKLDNNPrimitive():aprimitive(), mkldnn_stream() {} + + //API for initializing with shared_ptr + MKLDNNPrimitive(shared_ptr aprimitive_input) {this->aprimitive = aprimitive_input;} + + virtual ~MKLDNNPrimitive() {} + void reset(primitive* pprimitive) { this->aprimitive.reset(pprimitive);} + shared_ptr aprimitive; + shared_ptr mkldnn_stream; + shared_ptr get_mkldnn_stream(); + shared_ptr submit(); +private: +}; + +} // namespace caffe +#endif // #ifndef CAFFE_MKLDNN_BASE_HPP_ diff --git a/include/caffe/mkldnn_memory.hpp b/include/caffe/mkldnn_memory.hpp new file mode 100644 index 00000000000..a59ce6e12d9 --- /dev/null +++ b/include/caffe/mkldnn_memory.hpp @@ -0,0 +1,236 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MKLDNN_MEMORY_HPP_ +#define CAFFE_MKLDNN_MEMORY_HPP_ + +#include +#include + +#include "boost/enable_shared_from_this.hpp" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/util/math_functions.hpp" +#include "mkldnn.hpp" +#include "mkldnn_base.hpp" + +using namespace mkldnn; + +namespace caffe { + +// ===== MKLDNNMemoryDescriptorBase ======================================= +template +class MKLDNNMemoryDescriptorBase : public PrvMemDescr + , public boost::enable_shared_from_this > +{ +public: + MKLDNNMemoryDescriptorBase(shared_ptr usr_memory_pd + , shared_ptr prv_memory_pd + , Blob* blob, MKLDNNLayer* mkldnn_layer); + + ~MKLDNNMemoryDescriptorBase() {} + // ---- PrvMemDescr virtual functions ----- + virtual void convert_from_other(shared_ptr other); + virtual bool layout_compare(shared_ptr other); + virtual PrvDescrType get_descr_type() {return PRV_DESCR_MKLDNN;} + + // TODO: assuming size/sizeof = count may be not correct + virtual size_t prv_count() { return prv_size()/sizeof(Dtype); } + + virtual size_t prv_size() { return _prv_memory_pd->get_size(); } + // --------------------------------------- + shared_ptr > get_shared_ptr() { + return this->shared_from_this(); + } + shared_ptr prv_memory_pd() const { + return _prv_memory_pd; + } + shared_ptr usr_memory_pd() const { + return _usr_memory_pd; + } + inline bool conversion_needed() const { return (_reorder_usr2prv_pd != NULL || _reorder_extprv2prv_pd != NULL); } + virtual void* prv_ptr() { return _internal_ptr; } + + shared_ptr get_prv_memory() + { + if (_prv_memory == NULL) allocate(); + return _prv_memory; + } + Dtype* get_prv_ptr() { + if (_prv_memory == NULL) allocate(); + return _internal_ptr; + } + shared_ptr reorder_usr2prv() { return _reorder_usr2prv.aprimitive; } + shared_ptr reorder_prv2usr() { return _reorder_prv2usr.aprimitive; } + shared_ptr reorder_extprv2prv() { return _reorder_extprv2prv.aprimitive; } + + void set_mkldnn_layer(MKLDNNLayer* layer) { _mkldnn_layer = layer; } + MKLDNNLayer* mkldnn_layer() const { return _mkldnn_layer; } + + std::string name; // for debugging purposes +protected: + void check_usr_with_prv_descriptors(); + void set_prv_memory(shared_ptr memory) + { + _prv_memory = memory; + _internal_ptr = (Dtype *)(_prv_memory->get_data_handle()); + } + + void allocate() { + if (_prv_memory == NULL) { +#ifdef USE_MLSL + if (mn::is_multinode()) { + auto mlsl_free = [](char* p) { mn::free((void*)p); }; + _mlsl_memory.reset( + (char*)mn::alloc(_prv_memory_pd->get_size(), 64), mlsl_free); + _prv_memory = shared_ptr( + new memory(*_prv_memory_pd, (void*)_mlsl_memory.get())); + } else { +#endif + _prv_memory = shared_ptr(new memory(*_prv_memory_pd)); +#ifdef USE_MLSL + } +#endif + _internal_ptr = (Dtype *)(_prv_memory->get_data_handle()); + // TODO: may need initialize memory by 0 + } + } + void set_prv_memory_pd(shared_ptr memory_pd) { + _prv_memory_pd = memory_pd; + if (_prv_memory_pd && _usr_memory_pd) { + check_usr_with_prv_descriptors(); + this->create_reorder_descriptors(); + } + } + void set_extprv_memory_pd(shared_ptr memory_pd) { + _extprv_memory_pd = memory_pd; + if (_prv_memory_pd && _usr_memory_pd) { + check_usr_with_prv_descriptors(); + this->create_reorder_descriptors(); + } + } + void set_usr_memory_pd(shared_ptr memory_pd) { + _usr_memory_pd = memory_pd; + if (_prv_memory_pd && _usr_memory_pd) { + check_usr_with_prv_descriptors(); + this->create_reorder_descriptors(); + } + } + void create_reorder_descriptors(); + + shared_ptr _usr_memory_pd; + shared_ptr _prv_memory_pd; + shared_ptr _extprv_memory_pd; + shared_ptr _reorder_usr2prv_pd; + shared_ptr _reorder_prv2usr_pd; + shared_ptr _reorder_extprv2prv_pd; + MKLDNNPrimitive _reorder_usr2prv; + MKLDNNPrimitive _reorder_prv2usr; + MKLDNNPrimitive _reorder_extprv2prv; + shared_ptr _prv_memory; + Dtype* _internal_ptr; + shared_ptr _usr_memory; + void* _cpu_ptr; + + MKLDNNLayer* _mkldnn_layer; + Blob* _blob; +#ifdef USE_MLSL + shared_ptr _mlsl_memory; +#endif +}; + +template +class MKLDNNMemoryDescriptor : public MKLDNNMemoryDescriptorBase { +public: + MKLDNNMemoryDescriptor(shared_ptr usr_memory_pd + , shared_ptr prv_memory_pd + , Blob* blob, MKLDNNLayer* mkldnn_layer); + + virtual void convert_from_prv(void* cpu_ptr); + virtual void convert_to_prv(void* cpu_ptr); + virtual void convert_from_extprv(shared_ptr aprimitive); + virtual bool on_to_cpu(); + + virtual void create_reorder_from_prv(void* cpu_ptr); + virtual void create_reorder_to_prv(void* cpu_ptr); + virtual void create_reorder_from_extprv(shared_ptr aprimitive); + + // The last get_blob_data_ptr() argument is a hack for reusing + // in backward a conversion done already in the forward direction. + shared_ptr get_blob_prv_primitive(Blob * blob, bool set_prv_ptr, bool convert = true, + MKLDNNMemoryDescriptor* converted_in_fwd = NULL); + + void sync_before_read(); + void sync_before_write(bool inplace = false); + + shared_ptr create_input(Blob * blob, bool set_prv_ptr); + shared_ptr create_output_memory(Blob * blob, bool inplace = false); + shared_ptr create_input(bool set_prv_ptr); + shared_ptr create_output_memory(bool inplace = false); + + void set_mkldnn_primitive(MKLDNNPrimitive& mprimitive) { CHECK(mprimitive.aprimitive); _mkldnn_primitive = mprimitive; } + MKLDNNPrimitive& mkldnn_primitive() { return _mkldnn_primitive; } + shared_ptr aprimitive() const { return _mkldnn_primitive.aprimitive; } +private: + MKLDNNPrimitive _mkldnn_primitive; +}; + +template +class MKLDNNData : public MKLDNNMemoryDescriptor +{ +public: + MKLDNNData(shared_ptr usr_memory_pd + , shared_ptr prv_memory_pd + , Blob* blob, MKLDNNLayer* mkldnn_layer) + : MKLDNNMemoryDescriptor(usr_memory_pd, prv_memory_pd, blob, mkldnn_layer) {} +}; + +template +class MKLDNNDiff : public MKLDNNMemoryDescriptor +{ +public: + MKLDNNDiff(shared_ptr usr_memory_pd + , shared_ptr prv_memory_pd + , Blob* blob, MKLDNNLayer* mkldnn_layer) + : MKLDNNMemoryDescriptor(usr_memory_pd, prv_memory_pd, blob, mkldnn_layer ) {} +}; + +template +shared_ptr > get_mkldnn_prv_descriptor(Blob* blob); + +} // namespace caffe +#endif // #ifndef CAFFE_MKLDNN_MEMORY_HPP_ diff --git a/include/caffe/multinode/apply_mn_param.hpp b/include/caffe/multinode/apply_mn_param.hpp new file mode 100644 index 00000000000..1613909de37 --- /dev/null +++ b/include/caffe/multinode/apply_mn_param.hpp @@ -0,0 +1,70 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef _CAFFE_MULTINODE_APPLY_MN_PARAM_HPP_ +#define _CAFFE_MULTINODE_APPLY_MN_PARAM_HPP_ + +#ifdef USE_MLSL + +#include "caffe/proto/caffe.pb.h" +#include "caffe/net.hpp" + +namespace caffe { +/** + * @brief Apply the multinode parameters to the NetParameter + * inserting mn_activation layer if needed. + */ +template +void ApplyMultinodeParams(const NetParameter& param, + NetParameter* param_with_mn); + +/** + * @brief Copy per-layer parameters from a Net object. + */ +template +void CopyMultinodeParamsFromNet(const Net *net, NetParameter *param); + +/** + * @brief Revert all the multinode changes from NetParameter + */ +template +void RevertMultinodeParams(NetParameter* param, bool write_diff = false); +} + +#endif // USE_MLSL + +#endif // _CAFFE_MULTINODE_APPLY_MN_PARAM_HPP_ diff --git a/include/caffe/multinode/mlsl.hpp b/include/caffe/multinode/mlsl.hpp new file mode 100644 index 00000000000..b0d3d13d611 --- /dev/null +++ b/include/caffe/multinode/mlsl.hpp @@ -0,0 +1,414 @@ +/* + * All modification made by Intel Corporation: © 2016 Intel Corporation + * + * All contributions by the University of California: + * Copyright (c) 2014, 2015, The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014, 2015, the respective contributors + * All rights reserved. + * For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CAFFE_MLSL_HPP_ +#define CAFFE_MLSL_HPP_ + +#ifdef USE_MLSL + +#include +#include "caffe/common.hpp" + +namespace caffe { + namespace mn { + +#define MLSL_DEFAULT_COLOR -1 + + void init(int* argc, char** argv[]); + + inline void free(void *addr) { + return MLSL::Environment::GetEnv().Free(addr); + } + + inline void* alloc(size_t size, int alignment) { + return MLSL::Environment::GetEnv().Alloc(size, alignment); + } + + inline int get_node_id() { + return MLSL::Environment::GetEnv().GetProcessIdx(); + } + + inline int get_nodes_count() { + return MLSL::Environment::GetEnv().GetProcessCount(); + } + + inline int get_group_id(int data_parts, int model_parts) { + int node_id = get_node_id(); + int num_nodes = get_nodes_count(); + return (node_id % (num_nodes / data_parts)) / model_parts; + } + + inline bool is_multinode() { + static bool multinode{ get_nodes_count() > 1 }; + return multinode; + } + + inline bool is_root() { + return mn::get_node_id() == 0; + } + + namespace detail { + template + inline MLSL::DataType dtype(); + + template <> + inline MLSL::DataType dtype() { + return MLSL::DataType::DT_DOUBLE; + } + template <> + inline MLSL::DataType dtype() { + return MLSL::DataType::DT_FLOAT; + } + template <> + inline MLSL::DataType dtype() { + return MLSL::DataType::DT_DOUBLE; + } + } + + class Distribution { + public: + Distribution() = delete; + Distribution & operator = (const Distribution &) = delete; + Distribution(const Distribution &) = delete; + + Distribution(int dataParts, int modelParts, int dataColor = MLSL_DEFAULT_COLOR, int modelColor = MLSL_DEFAULT_COLOR, + int dataColorMax = MLSL_DEFAULT_COLOR, int modelColorMax = MLSL_DEFAULT_COLOR) : + data_parts_(dataParts), model_parts_(modelParts), + data_color_(dataColor), model_color_(modelColor), + data_color_max_(dataColorMax), model_color_max_(modelColorMax) { + if (dataColor == MLSL_DEFAULT_COLOR || modelColor == MLSL_DEFAULT_COLOR) { + distrib_ = MLSL::Environment::GetEnv().CreateDistribution(dataParts, modelParts); + } else { + distrib_ = MLSL::Environment::GetEnv().CreateDistributionWithColors(dataColor, modelColor); + } + } + ~Distribution() { + MLSL::Environment::GetEnv().DeleteDistribution(distrib_); + } + operator MLSL::Distribution * () { + return distrib_; + } + template + void reduce(Dtype *sendBuffer, Dtype *recvBuffer, size_t count, size_t rootIdx = 0) { + if (skip_comm(Gtype)) return; + MLSL::CommReq *rqts = distrib_->Reduce((void *)sendBuffer, (void*)recvBuffer, count, detail::dtype(), Rtype, rootIdx, Gtype); + MLSL::Environment::GetEnv().Wait(rqts); + } + template + void bcast(Dtype *buffer, size_t count, int rootId = 0) { + if (skip_comm(Gtype)) return; + MLSL::CommReq *rqts = distrib_->Bcast((void *)buffer, count, detail::dtype(), rootId, Gtype); + MLSL::Environment::GetEnv().Wait(rqts); + } + template + void allreduce(Dtype *sendBuffer, Dtype *recvBuffer, size_t count) { + if (skip_comm(Gtype)) return; + MLSL::CommReq *rqts = distrib_->AllReduce((void *)sendBuffer, (void *)recvBuffer, count, detail::dtype(), Rtype, Gtype); + MLSL::Environment::GetEnv().Wait(rqts); + } + template + void allreduce(Dtype *buffer, size_t count) { + if (skip_comm(Gtype)) return; + MLSL::CommReq *rqts = distrib_->AllReduce((void *)buffer, (void *)buffer, count, detail::dtype(), Rtype, Gtype); + MLSL::Environment::GetEnv().Wait(rqts); + } + template + void gather(const Dtype *sendBuffer, size_t count, Dtype *recvBuffer, size_t rootIdx = 0) { + if (skip_comm(Gtype)) return; + MLSL::CommReq *rqts = distrib_->Gather((void *)sendBuffer, count, (void *)recvBuffer, detail::dtype(), rootIdx, Gtype); + MLSL::Environment::GetEnv().Wait(rqts); + } + template + void scatter(Dtype *sendBuffer, Dtype *recvBuffer, size_t count, size_t rootIdx = 0) { + if (skip_comm(Gtype)) return; + MLSL::CommReq *rqts = distrib_->Scatter((void *)sendBuffer, (void *)recvBuffer, count, detail::dtype(), rootIdx, Gtype); + MLSL::Environment::GetEnv().Wait(rqts); + } + template + void reducescatter(Dtype *sendBuffer, Dtype *recvBuffer, size_t count) { + if (skip_comm(Gtype)) return; + MLSL::CommReq *rqts = distrib_->ReduceScatter(sendBuffer, recvBuffer, count, detail::dtype(), Rtype, Gtype); + MLSL::Environment::GetEnv().Wait(rqts); + } + template + void allgather(Dtype *sendBuffer, size_t count, Dtype *recvBuffer) { + if (skip_comm(Gtype)) return; + // TODO: support allgather from MLSL + gather(sendBuffer, count, recvBuffer); + size_t bcast_count = count; + switch (Gtype) { + case MLSL::GT_MODEL: + bcast_count *= model_parts_; + break; + case MLSL::GT_DATA: + bcast_count *= data_parts_; + break; + case MLSL::GT_GLOBAL: + bcast_count *= model_parts_ * data_parts_; + break; + default: + NOT_IMPLEMENTED; + } + bcast(recvBuffer, bcast_count); + } + template + void barrier() { + if (skip_comm(Gtype)) return; + distrib_->Barrier(Gtype); + } + inline int get_data_parts() { + return data_parts_; + } + inline int get_model_parts() { + return model_parts_; + } + inline int get_group_id() { + return mn::get_group_id(data_parts_, model_parts_); + } + private: + inline bool skip_comm(MLSL::GroupType Gtype) { + if (Gtype == MLSL::GT_DATA && data_color_max_ != MLSL_DEFAULT_COLOR) { + return data_color_ > data_color_max_; + } else if (Gtype == MLSL::GT_MODEL && model_color_max_ != MLSL_DEFAULT_COLOR) { + return model_color_ > model_color_max_; + } else return get_group_id() > 0; + } + + MLSL::Distribution *distrib_{ nullptr }; + int data_parts_; + int model_parts_; + int data_color_; + int model_color_; + int data_color_max_; + int model_color_max_; + }; + + inline void GetCanonicalMnParam(int &num_nodes, int &model_parts) { + if (num_nodes == 0) num_nodes = mn::get_nodes_count(); + if (model_parts == 0 || model_parts > num_nodes) model_parts = num_nodes; + } + + shared_ptr create_distrib( + int dataParts, int modelParts, int dataColor = MLSL_DEFAULT_COLOR, int modelColor = MLSL_DEFAULT_COLOR, + int dataColorMax = MLSL_DEFAULT_COLOR, int modelColorMax = MLSL_DEFAULT_COLOR); + Distribution * get_distrib(int dataParts, int modelParts); + Distribution * get_distrib(); + + template + inline void allreduce(Dtype *sendBuffer, Dtype *recvBuffer, size_t count) { + get_distrib()->allreduce(sendBuffer, recvBuffer, count); + } + template + inline void allreduce(Dtype *buffer, size_t count) { + get_distrib()->allreduce(buffer, count); + } + template + inline void reduce(Dtype *buffer, size_t count, int rootId = 0) { + get_distrib()->reduce(buffer, count, rootId); + } + template + void bcast(Dtype *buffer, size_t count, int rootId = 0) { + get_distrib()->bcast(buffer, count, rootId); + } + template + inline void gather(const Dtype *sendBuffer, size_t count, Dtype *recvBuffer, int rootId = 0) { + get_distrib()->gather(sendBuffer, count, recvBuffer, rootId); + } + template + inline void scatter(Dtype *sendBuffer, Dtype *recvBuffer, size_t count, int rootId = 0) { + get_distrib()->scatter(sendBuffer, recvBuffer, count, rootId); + } + + /* */ + class Session { + public: + Session(MLSL::PhaseType phaseType) + : session_{ MLSL::Environment::GetEnv().CreateSession(phaseType) } { + } + ~Session() { + session_->RemoveOperations(); + MLSL::Environment::GetEnv().DeleteSession(session_); + } + operator MLSL::Session * () { + return session_; + } + void commit() { + session_->Commit(); + } + void set_global_minibatch_size(int global_minibatch_size) { + session_->SetGlobalMinibatchSize(global_minibatch_size); + } + int get_global_minibatch_size() { + return session_->GetGlobalMinibatchSize(); + } + MLSL::Operation * add_operation(MLSL::OperationRegInfo *opRegInfo, MLSL::Distribution *distrib = nullptr) { + return session_->GetOperation(session_->AddOperation(opRegInfo, distrib)); + } + void delete_operation_reg_info(MLSL::OperationRegInfo *opRegInfo) { + session_->DeleteOperationRegInfo(opRegInfo); + } + MLSL::OperationRegInfo * create_operation_reg_info(MLSL::OpType opType) { + return session_->CreateOperationRegInfo(opType); + } + size_t get_operation_count() { + return session_->GetOperationCount(); + } + const char* get_operation_name(size_t idx) { + return session_->GetOperation(idx)->GetName(); + } + MLSL::Statistics * get_stats() { + return session_->GetStats(); + } + private: + MLSL::Session *session_{ nullptr }; + }; + + namespace train { + + inline Session & get_session() { + static Session session{ MLSL::PT_TRAIN }; + return session; + } + + inline MLSL::Operation * add_operation(MLSL::OperationRegInfo* opRegInfo, MLSL::Distribution* distrib = *get_distrib()) { + return get_session().add_operation(opRegInfo, distrib); + } + + inline int get_global_minibatch_size() { + return get_session().get_global_minibatch_size(); + } + + inline void set_global_minibatch_size(int global_minibatch_size) { + get_session().set_global_minibatch_size(global_minibatch_size); + } + + inline void commit() { + get_session().commit(); + } + + namespace stats { + inline void stop() { + get_session().get_stats()->Stop(); + } + inline void print() { + get_session().get_stats()->Print(); + } + inline void reset() { + get_session().get_stats()->Reset(); + } + inline void start() { + get_session().get_stats()->Start(); + } + inline bool is_started() { + return get_session().get_stats()->IsStarted(); + } + inline unsigned long long get_isolation_comm_time(size_t idx) { + return get_session().get_stats()->GetIsolationCommCycles(idx); + } + inline size_t get_comm_size(size_t idx) { + return get_session().get_stats()->GetCommSize(idx); + } + inline unsigned long long get_comm_time(size_t idx) { + return get_session().get_stats()->GetCommCycles(idx); + } + inline unsigned long long get_compute_time(size_t idx) { + return get_session().get_stats()->GetComputeCycles(idx); + } + inline unsigned long long get_total_isolation_comm_time() { + return get_session().get_stats()->GetTotalIsolationCommCycles(); + } + inline size_t get_total_comm_size() { + return get_session().get_stats()->GetTotalCommSize(); + } + inline unsigned long long get_total_comm_time() { + return get_session().get_stats()->GetTotalCommCycles(); + } + inline unsigned long long get_total_compute_time() { + return get_session().get_stats()->GetTotalComputeCycles(); + } + + } + } + + class OpRegInfo { + public: + OpRegInfo() = delete; + OpRegInfo & operator = (const OpRegInfo &) = delete; + OpRegInfo(const OpRegInfo &) = delete; + + OpRegInfo(OpRegInfo &&) = default; + OpRegInfo & operator = (OpRegInfo &&) = default; + + explicit OpRegInfo(Session& session, MLSL::OpType opType) + : opRegInfo_{ session.create_operation_reg_info(opType) }, + session_(session) { + } + ~OpRegInfo() { + session_.delete_operation_reg_info(opRegInfo_); + } + operator MLSL::OperationRegInfo * () { + return opRegInfo_; + } + void set_name(std::string name) { + opRegInfo_->SetName(name.c_str()); + } + template + void add_input(int featureMapCount, int featureMapSize) { + opRegInfo_->AddInput(featureMapCount, featureMapSize, detail::dtype()); + } + template + void add_output(int featureMapCount, int featureMapSize) { + opRegInfo_->AddOutput(featureMapCount, featureMapSize, detail::dtype()); + } + template + void add_parameter_set(int kernelCount, int kernelSize, + bool distributedUpdate = false) + { + opRegInfo_->AddParameterSet(kernelCount, kernelSize, detail::dtype(), + distributedUpdate); + } + private: + MLSL::OperationRegInfo *opRegInfo_{ nullptr }; + Session &session_; + }; + + } // namespace mn +} // namespace caffe + +#endif // USE_MLSL + +#endif // CAFFE_MLSL_HPP_ diff --git a/include/caffe/multinode/mn_activation_layer.hpp b/include/caffe/multinode/mn_activation_layer.hpp new file mode 100644 index 00000000000..5ab278186be --- /dev/null +++ b/include/caffe/multinode/mn_activation_layer.hpp @@ -0,0 +1,103 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MN_ACTIVATION_LAYER_HPP_ +#define CAFFE_MN_ACTIVATION_LAYER_HPP_ + +#ifdef USE_MLSL + +#include + +#include "caffe/blob.hpp" +#include "caffe/layer.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +template +class MnActivationLayer : public Layer { + public: + explicit MnActivationLayer(const LayerParameter& param) + : Layer(param) {} + virtual void LayerSetUp(const vector*>& bottom, + const vector*>& top); + virtual void Reshape(const vector*>& bottom, + const vector*>& top); + + virtual inline const char* type() const { return "MnActivation"; } + virtual inline int MinBottomBlobs() const { return 1; } + virtual inline int ExactNumTopBlobs() const { return 1; } + + protected: + virtual void Forward_cpu(const vector*>& bottom, + const vector*>& top); + virtual void Forward_gpu(const vector*>& bottom, + const vector*>& top); + + virtual void Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + virtual void Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom); + + virtual bool Bypass(const vector*>& bottom, + const vector*>& top); + + private: + void Pack(const Dtype *src, Dtype *dst, int N, int C, int HW, int numC); + void Unpack(const Dtype *src, int N, int C, int HW, int numC, Dtype *dst); + bool Backward_cpu_fast(const vector*>& top, + const vector*>& bottom); + + int num_nodes_in_; + int num_nodes_out_; + int model_parts_in_; + int model_parts_out_; + int data_parts_in_; + int data_parts_out_; + mn::Distribution *distrib_in_; + mn::Distribution *distrib_out_; + shared_ptr distrib_data_in_out_; + Blob top_reduce_buf_; + Blob bottom_gather_buf_; + Blob bottom_gather_work_buf_; +}; + +} // namespace caffe + +#endif + +#endif // CAFFE_MN_ACTIVATION_LAYER_HPP_ diff --git a/include/caffe/multinode/multi_solver.hpp b/include/caffe/multinode/multi_solver.hpp new file mode 100644 index 00000000000..5d2082821ca --- /dev/null +++ b/include/caffe/multinode/multi_solver.hpp @@ -0,0 +1,131 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MLSLSOLVER_HPP_ +#define CAFFE_MLSLSOLVER_HPP_ + +#ifdef USE_MLSL + +#include +#include +#include +#include +#include + +#include "caffe/net.hpp" +#include "caffe/solver.hpp" +#include "caffe/solver_factory.hpp" + +namespace caffe { + +template +class MultiSolver { + public: + explicit MultiSolver(boost::shared_ptr> root_solver) + : root_solver_(root_solver), + iter_size(root_solver_->param().iter_size()) { + root_solver_->set_forward_backward( + boost::bind(&MultiSolver::ForwardBackward, this)); +#ifdef FW_OVERLAP_OPT + Net& net = *root_solver_->net(); + const std::vector>> & layers{ net.layers() }; + layer_finished_flags_.resize(layers.size()); + std::fill(layer_finished_flags_.begin(), layer_finished_flags_.end(), true); +#endif + } + + + // Invoked at specific points during an iteration + class Callback : public Solver::Callback { + protected: + virtual ~Callback() { + } + virtual void on_iter_finished(int layer_id) = 0; + + virtual void on_delwt_wait(int layer_id) = 0; + virtual void apply_updates(int layer_id) = 0; + + template + friend class MultiSolver; + }; + + void add_callback(Callback* value) { + root_solver_->add_callback(value); + callbacks_.push_back(value); + } + + Dtype ForwardBackward(); + + void Solve() { + root_solver_->Solve(); + } + + Net& net() { + return *root_solver_->net(); + } + + const SolverParameter& param() const { + return root_solver_->param(); + } + + boost::shared_ptr> root_solver() { + return root_solver_; + } +#ifdef FW_OVERLAP_OPT + void set_layer_finished_flag(int layer_id, bool flag) { + layer_finished_flags_[layer_id] = flag; + } +#endif + private: + virtual Dtype ForwardBackwardImpl(bool first, bool last); + bool IsSkipWaitGradient(int layer_id); + void WaitAndUpdateGradient(int layer_id); + + protected: + boost::shared_ptr> root_solver_; + int iter_size; + vector callbacks_; +#ifdef FW_OVERLAP_OPT + vector layer_finished_flags_; +#endif +}; + +} // namespace caffe + +#endif // USE_MLSL + +#endif // CAFFE_MLSLSOLVER_HPP_ diff --git a/include/caffe/multinode/multi_sync.hpp b/include/caffe/multinode/multi_sync.hpp new file mode 100644 index 00000000000..6300c487672 --- /dev/null +++ b/include/caffe/multinode/multi_sync.hpp @@ -0,0 +1,391 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_MULTISYNC_HPP_ +#define CAFFE_MULTISYNC_HPP_ + +#ifdef USE_MLSL + +#include +#include "caffe/solver.hpp" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "caffe/caffe.hpp" +#include "caffe/multinode/mlsl.hpp" +#include "caffe/multinode/multi_solver.hpp" + +namespace caffe { + +#define CAN_USE_PRV_DATA(param) (param->prv_data() && (param->prv_data_count() == param->count())) +#define CAN_USE_PRV_DIFF(param) (param->prv_diff() && (param->prv_diff_count() == param->count())) + + template + class MultiSync : public MultiSolver::Callback { + + boost::shared_ptr> solver; + + vector>> layers; + shared_ptr> net; + const vector *> &net_params; + vector> layer_param_ids; +#ifdef FW_OVERLAP_OPT + vector> param_ids_finished_flags; +#endif + + // layer_id -> blob_id -> cached blob to restore + // statistics + vector>>> cached_stats; + +#ifdef PERFORMANCE_MONITORING + #define STATS_OUTPUT_FILE "mlsl_stats.txt" + + struct StatsIterResult { + unsigned long long isolationCommTime; + unsigned long long commTime; + unsigned long long computeTime; + size_t commSize; + }; + + // Operations[Iteration] + vector> statsIterResult; + + unsigned long long totalIsolationCommTime; + unsigned long long totalCommTime; + unsigned long long totalComputeTime; + size_t totalCommSize; +#endif + + public: + + MultiSync(shared_ptr >); + + virtual ~MultiSync() { + } + + void synchronize_parameters() { + LOG(INFO) << "synchronize_params: bcast"; + for (int i = 0; i < layers.size(); i++) { + mn::Distribution &distrib = layers[i]->GetDistribution(); + for (int j = 0; j < layer_param_ids[i].size(); j++) { + int layer_param_id = layer_param_ids[i][j]; + if (CAN_USE_PRV_DATA(net_params[layer_param_id])) { + distrib.bcast( + net_params[layer_param_id]->mutable_prv_data(), + net_params[layer_param_id]->prv_data_count()); + } else { + distrib.bcast( + net_params[layer_param_id]->mutable_cpu_data(), + net_params[layer_param_id]->count()); + } + } + } + } + + void synchronize_statistics() { + cached_stats.resize(layers.size()); + for (int i = 0; i < layers.size(); i++) { + if (string(layers[i]->type()) == "BatchNorm" && + !layers[i]->layer_param().batch_norm_param().use_global_stats()) { + vector>> cached_blobs; + // 3 blobs: mean, variance and scaling factor + for (int j = 0; j < layer_param_ids[i].size() && j < 3; j++) { + shared_ptr> b = shared_ptr>(new Blob()); + Blob *net_param = net_params[layer_param_ids[i][j]]; + b->ReshapeLike(*net_param); + b->CopyFrom(*net_param); + cached_blobs.push_back(b); + mn::Distribution &distrib = layers[i]->GetDistribution(); + distrib.allreduce( + net_param->mutable_cpu_data(), net_param->mutable_cpu_data(), + net_param->count()); + caffe_scal(net_param->count(), 1./distrib.get_data_parts(), + net_param->mutable_cpu_data()); + } + cached_stats[i] = cached_blobs; + } + } + } + + void restore_statistics() { + for (int i = 0; i < layers.size(); i++) { + if (string(layers[i]->type()) == "BatchNorm" && + !layers[i]->layer_param().batch_norm_param().use_global_stats()) { + // 3 blobs: mean, variance and scaling factor + for (int j = 0; j < layer_param_ids[i].size() && j < 3; j++) { + Blob *net_param = net_params[layer_param_ids[i][j]]; + net_param->CopyFrom(*cached_stats[i][j]); + } + } + } + } + + void run() { + LOG(WARNING) << "RUN: " + << "PER LAYER TIMINGS ARE" +#ifdef CAFFE_PER_LAYER_TIMINGS + << " ENABLED" +#else + << " DISABLED" +#endif + << ", FORWARD OVERLAP OPTIMIZATION IS" +#ifdef FW_OVERLAP_OPT + << " ENABLED" +#else + << " DISABLED" +#endif + << ", SINGLE DB SPLITTING IS" +#ifdef CAFFE_MLSL_SHUFFLE + << " ENABLED"; +#else + << " DISABLED"; +#endif + + synchronize_parameters(); + mn::train::commit(); + +#ifdef PERFORMANCE_MONITORING + statsIterResult.resize(caffe::mn::train::get_session().get_operation_count()); + caffe::mn::train::stats::start(); +#endif + + solver->add_callback(this); + solver->Solve(); + +#ifdef PERFORMANCE_MONITORING + dump_stats_to_file(); +#endif + } + + void apply_updates(int layer_id) { + std::vector ¶m_ids = layer_param_ids[layer_id]; + for (int i = 0; i < param_ids.size(); ++i) { + solver->root_solver()->ApplyUpdate(param_ids[i]); + } + } + + void on_start() { + DLOG(INFO) << "started iteration " << solver->root_solver()->iter(); + } + + void on_iter_finished(int layer_id) { +#ifdef FW_OVERLAP_OPT + solver->set_layer_finished_flag(layer_id, false); +#endif + + boost::shared_ptr> &layer = layers[layer_id]; + if (layer->layerOp == nullptr) { + return; + } + +#ifdef FW_OVERLAP_OPT + std::fill(param_ids_finished_flags[layer_id].begin(), + param_ids_finished_flags[layer_id].end(), + false); +#endif + + std::vector ¶m_ids = layer_param_ids[layer_id]; + for (int i = 0; i < param_ids.size(); ++i) { + if (!layer->ParamNeedReduce(i)) continue; + if (CAN_USE_PRV_DIFF(net_params[param_ids[i]])) { + layer->layerOp->GetParameterSet(i)->StartGradientComm((void *) net_params[param_ids[i]]->mutable_prv_diff()); + } else { + layer->layerOp->GetParameterSet(i)->StartGradientComm((void *) net_params[param_ids[i]]->mutable_cpu_diff()); + } + } + } + + void on_delwt_wait(int layer_id) { + boost::shared_ptr> &layer = layers[layer_id]; + if (layer->layerOp == nullptr) { +#ifdef FW_OVERLAP_OPT + solver->set_layer_finished_flag(layer_id, true); +#endif + return; + } + + std::vector ¶m_ids = layer_param_ids[layer_id]; + for (int i=0; iParamNeedReduce(i) +#ifdef FW_OVERLAP_OPT + || (param_ids_finished_flags[layer_id][i] == true)) { + param_ids_finished_flags[layer_id][i] = true; +#else + ) { +#endif + continue; + } + +#ifdef FW_OVERLAP_OPT + bool is_completed = false; + Dtype *delwt_buf{(Dtype *) layer->layerOp->GetParameterSet(i)->TestGradientComm(&is_completed)}; +#else + Dtype *delwt_buf{(Dtype *) layer->layerOp->GetParameterSet(i)->WaitGradientComm()}; +#endif + if (delwt_buf) { +#ifdef FW_OVERLAP_OPT + assert(is_completed); + param_ids_finished_flags[layer_id][i] = true; +#endif + if (CAN_USE_PRV_DIFF(net_params[param_ids[i]])) { + if (delwt_buf != net_params[param_ids[i]]->prv_diff()) + caffe_copy(net_params[param_ids[i]]->count(), + delwt_buf, + net_params[param_ids[i]]->mutable_prv_diff()); + } else if (delwt_buf != net_params[param_ids[i]]->cpu_diff()) + caffe_copy(net_params[param_ids[i]]->count(), + delwt_buf, + net_params[param_ids[i]]->mutable_cpu_diff()); + } + } + +#ifdef FW_OVERLAP_OPT + int finished_count = std::count(param_ids_finished_flags[layer_id].begin(), + param_ids_finished_flags[layer_id].end(), true); + if (finished_count == param_ids.size()) { + solver->set_layer_finished_flag(layer_id, true); + } +#endif + } + + void on_gradients_ready() { + DLOG(INFO) << "finished iteration " << solver->root_solver()->iter(); + +#ifdef PERFORMANCE_MONITORING + caffe::mn::train::stats::stop(); + + size_t opCount = caffe::mn::train::get_session().get_operation_count(); + + for (size_t opIdx = 0; opIdx < opCount; ++opIdx) { + StatsIterResult iterResult; + + iterResult.isolationCommTime = caffe::mn::train::stats::get_isolation_comm_time(opIdx); + iterResult.commTime = caffe::mn::train::stats::get_comm_time(opIdx); + iterResult.computeTime = caffe::mn::train::stats::get_compute_time(opIdx); + iterResult.commSize = caffe::mn::train::stats::get_comm_size(opIdx); + + statsIterResult[opIdx].push_back(iterResult); + + // Save total values before reset statistics + totalIsolationCommTime = caffe::mn::train::stats::get_total_isolation_comm_time(); + totalCommTime = caffe::mn::train::stats::get_total_comm_time(); + totalComputeTime = caffe::mn::train::stats::get_total_compute_time(); + totalCommSize = caffe::mn::train::stats::get_total_comm_size(); + } + + caffe::mn::train::stats::reset(); + caffe::mn::train::stats::start(); +#endif //PERFORMANCE_MONITORING + } + + void on_before_test() { + synchronize_statistics(); + synchronize_parameters(); + } + + void on_after_test() { + restore_statistics(); + } + + void on_before_snapshot() { + synchronize_statistics(); + } + + void on_after_snapshot() { + restore_statistics(); + } + +#ifdef PERFORMANCE_MONITORING + void dump_stats_to_file() { + FILE* outputFile = fopen(STATS_OUTPUT_FILE, "w"); + if(outputFile == NULL) { + LOG(ERROR) << "unable to create file " << STATS_OUTPUT_FILE; + return; + } + + size_t opCount = caffe::mn::train::get_session().get_operation_count(); + + // Write file header + fprintf(outputFile, " MLSL common communication statistics\n\n"); + + fprintf(outputFile, "Total IsolationCommTime: %12llu\n", totalIsolationCommTime); + fprintf(outputFile, "Total CommTime: %12llu\n", totalCommTime); + fprintf(outputFile, "Total ComputeTime: %12llu\n", totalComputeTime); + fprintf(outputFile, "Total CommSize: %12zu\n", totalCommSize); + fprintf(outputFile, "Num operations: %12zu\n\n", opCount); + + fprintf(outputFile, " MLSL detailed communication statistics\n\n"); + + fprintf(outputFile, "Format:\n"); + fprintf(outputFile, " OperationName\n"); + fprintf(outputFile, " Iteration, IsolationCommTime (kCycles), CommTime (kCycles), ComputeTime (kCycles), CommSize (KB)\n"); + + // Write all iteratons for each layer + for (size_t opIdx = 0; opIdx < opCount; ++opIdx) { + fprintf(outputFile, "\n%s\n\n", caffe::mn::train::get_session().get_operation_name(opIdx)); + for (size_t iter = 0; iter < statsIterResult[opIdx].size(); ++iter) { + fprintf(outputFile, "%6zu %11llu %11llu %11llu %8zu\n", + iter+1, + statsIterResult[opIdx][iter].isolationCommTime, + statsIterResult[opIdx][iter].commTime, + statsIterResult[opIdx][iter].computeTime, + statsIterResult[opIdx][iter].commSize); + } + } + + fclose(outputFile); + } +#endif //PERFORMANCE_MONITORING + }; + +} // namespace caffe + +#endif /* USE_MLSL */ + +#endif // CAFFE_MULTISYNC_HPP_ diff --git a/include/caffe/net.hpp b/include/caffe/net.hpp index 493bdf294e2..ba47be9867a 100644 --- a/include/caffe/net.hpp +++ b/include/caffe/net.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_NET_HPP_ #define CAFFE_NET_HPP_ @@ -26,7 +63,7 @@ class Net { explicit Net(const NetParameter& param, const Net* root_net = NULL); explicit Net(const string& param_file, Phase phase, const int level = 0, const vector* stages = NULL, - const Net* root_net = NULL); + const Net* root_net = NULL, std::string engine = ""); virtual ~Net() {} /// @brief Initialize a network with a NetParameter. @@ -63,6 +100,7 @@ class Net { * @brief Zeroes out the diffs of all net parameters. * Should be run before Backward. */ + void ClearParamDiffs(int learnable_param_id); void ClearParamDiffs(); /** @@ -178,6 +216,9 @@ class Net { inline const vector*>& learnable_params() const { return learnable_params_; } + + vector get_layer_learnable_param_ids(int layer_id) const; + /// @brief returns the learnable parameter learning rate multipliers inline const vector& params_lr() const { return params_lr_; } inline const vector& has_params_lr() const { return has_params_lr_; } @@ -224,6 +265,51 @@ class Net { */ static void FilterNet(const NetParameter& param, NetParameter* param_filtered); + + /** + * @brief Remove or Replace layers that the user specified should be excluded to increase + * computational performance. + */ + static void CompileNet(const NetParameter& param, + NetParameter* param_compiled); + + /** + * @brief This is rule that analyze layer if it is of type Scale and if that is the case + * and previous layer which serves as input layer to Scale Layer is MKLBatchNorm + * then scale layer can be dropped + */ + // TODO: Make it decent C++ anonymous function etc. + static void CompilationRuleOne(const NetParameter& param, + NetParameter* param_compiled); + /** + * @brief This is rule that analyze layer if it is of type MKLDNNReLU and if that is the case + * and previous layer which serves as input layer to MKLDNNReLU Layer is MKLDNNConvolution + * then MKLDNNReLU layer can be dropped + */ + + + static void CompilationRuleTwo(const NetParameter& param, + NetParameter* param_compiled); + + /** + * @brief This is rule analyze if layer is of type MKLBatchNorm + * and is to perform in place computation + * if positive then make it doing out-ofplace computation + */ + static void CompilationRuleThree(const NetParameter& param, + NetParameter* param_compiled); + + + /** + * @brief If find "Conv--BN--Scale" in current network, merge BN and Scale layer into Convolution + * layers, this optimization only works in caffe TEST phase now. + */ + + static void GetBlobConsumers(std::vector &cnsmer_blobs, + const string& blob_name_to_find, + const NetParameter& param, + int layer_id); + /// @brief return whether NetState state meets NetStateRule rule static bool StateMeetsRule(const NetState& state, const NetStateRule& rule, const string& layer_name); @@ -248,9 +334,13 @@ class Net { void BackwardDebugInfo(const int layer_id); /// @brief Helper for displaying debug info in Update. void UpdateDebugInfo(const int param_id); - + bool bn_scale_remove_; + bool bn_scale_merge_; + vector kept_bn_layers_; /// @brief The network name string name_; + /// @brief The engine name + string engine_name_; /// @brief The phase: TRAIN or TEST Phase phase_; /// @brief Individual layers in the net diff --git a/include/caffe/parallel.hpp b/include/caffe/parallel.hpp index 6c496c884e3..1c70b6538f9 100644 --- a/include/caffe/parallel.hpp +++ b/include/caffe/parallel.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_PARALLEL_HPP_ #define CAFFE_PARALLEL_HPP_ @@ -96,7 +133,7 @@ class P2PSync : public GPUParams, public Solver::Callback, void Run(const vector& gpus); void Prepare(const vector& gpus, vector > >* syncs); - inline const int initial_iter() const { return initial_iter_; } + inline int initial_iter() const { return initial_iter_; } protected: void on_start(); diff --git a/include/caffe/serialization/ProtoSerialize.hpp b/include/caffe/serialization/ProtoSerialize.hpp new file mode 100644 index 00000000000..13ba997e0fc --- /dev/null +++ b/include/caffe/serialization/ProtoSerialize.hpp @@ -0,0 +1,59 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_SERIALIZATION_PROTOSERIALIZE_HPP_ +#define CAFFE_SERIALIZATION_PROTOSERIALIZE_HPP_ + +#include +#include +#include +#include "caffe/blob.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +bool deserialize(const char* data, + size_t size, + ::google::protobuf::Message* msg); + +string serialize(const ::google::protobuf::Message& msg); + + +} // namespace caffe + +#endif // CAFFE_SERIALIZATION_PROTOSERIALIZE_HPP_ + diff --git a/include/caffe/sgd_solvers.hpp b/include/caffe/sgd_solvers.hpp index 1fc52d87137..a11da89de77 100644 --- a/include/caffe/sgd_solvers.hpp +++ b/include/caffe/sgd_solvers.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SGD_SOLVERS_HPP_ #define CAFFE_SGD_SOLVERS_HPP_ @@ -25,8 +62,10 @@ class SGDSolver : public Solver { protected: void PreSolve(); + Dtype GetWarmUpLR(int cur_iter, int warmup_iter, Dtype warmup_start_lr); Dtype GetLearningRate(); virtual void ApplyUpdate(); + virtual void ApplyUpdate(int param_id); virtual void Normalize(int param_id); virtual void Regularize(int param_id); virtual void ComputeUpdateValue(int param_id, Dtype rate); @@ -42,6 +81,10 @@ class SGDSolver : public Solver { // of gradients/updates and is not needed in snapshots vector > > history_, update_, temp_; + // loss history for 'plateau' LR policy (should be stored in snapshots) + Dtype minimum_loss_; + int iter_last_event_; + DISABLE_COPY_AND_ASSIGN(SGDSolver); }; diff --git a/include/caffe/solver.hpp b/include/caffe/solver.hpp index 38259edad9f..9b97c3c0b32 100644 --- a/include/caffe/solver.hpp +++ b/include/caffe/solver.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SOLVER_HPP_ #define CAFFE_SOLVER_HPP_ #include @@ -7,6 +44,8 @@ #include "caffe/net.hpp" #include "caffe/solver_factory.hpp" +#include "caffe/util/benchmark.hpp" + namespace caffe { /** @@ -19,6 +58,7 @@ namespace caffe { */ namespace SolverAction { enum Enum { + UNKNOWN = -1, NONE = 0, // Take no special action. STOP = 1, // Stop training. snapshot_after_train controls whether a // snapshot is created. @@ -57,22 +97,22 @@ class Solver { virtual void Solve(const char* resume_file = NULL); inline void Solve(const string resume_file) { Solve(resume_file.c_str()); } void Step(int iters); + + virtual Dtype ForwardBackward(); + // The Restore method simply dispatches to one of the // RestoreSolverStateFrom___ protected methods. You should implement these // methods to restore the state from the appropriate snapshot type. void Restore(const char* resume_file); - // The Solver::Snapshot function implements the basic snapshotting utility - // that stores the learned net. You should implement the SnapshotSolverState() - // function that produces a SolverState protocol buffer that needs to be - // written to disk together with the learned net. - void Snapshot(); virtual ~Solver() {} inline const SolverParameter& param() const { return param_; } + inline SolverParameter& param() { return param_; } inline shared_ptr > net() { return net_; } inline const vector > >& test_nets() { return test_nets_; } int iter() { return iter_; } + void set_iter(int value) { iter_ = value; } // Invoked at specific points during an iteration class Callback { @@ -80,6 +120,13 @@ class Solver { virtual void on_start() = 0; virtual void on_gradients_ready() = 0; +#ifdef USE_MLSL + virtual void on_before_test() {} + virtual void on_after_test() {} + virtual void on_before_snapshot() {} + virtual void on_after_snapshot() {} +#endif + template friend class Solver; }; @@ -88,21 +135,62 @@ class Solver { callbacks_.push_back(value); } + typedef boost::function ForwardBackwardFunc; + void set_forward_backward(ForwardBackwardFunc func) { + forward_backward_ = func; + } + void CheckSnapshotWritePermissions(); /** * @brief Returns the solver type. */ virtual inline const char* type() const { return ""; } - protected: + // The Solver::Snapshot function implements the basic snapshotting utility + // that stores the learned net. You should implement the SnapshotSolverState() + // function that produces a SolverState protocol buffer that needs to be + // written to disk together with the learned net. + void Snapshot(); + // Make and apply the update value for the current iteration. virtual void ApplyUpdate() = 0; + virtual void ApplyUpdate(int param_id) = 0; + + void TestAll(); + + +#ifdef CAFFE_PER_LAYER_TIMINGS + /* Timers for performance measurements */ + Timer timer; + std::vector forward_time_per_layer; + std::vector backward_time_per_layer; + std::vector update_time_per_layer; +#ifdef USE_MLSL + std::vector startcomm_time_per_layer; + std::vector waitcomm_time_per_layer; +#endif + + std::vector forward_time_per_layer_total; + std::vector backward_time_per_layer_total; + std::vector update_time_per_layer_total; +#ifdef USE_MLSL + std::vector startcomm_time_per_layer_total; + std::vector waitcomm_time_per_layer_total; +#endif + + void InitTimers(); + void ResetTimers(); + void PrintTimers(bool printTotal); +#endif /* CAFFE_PER_LAYER_TIMINGS */ + + protected: string SnapshotFilename(const string extension); string SnapshotToBinaryProto(); string SnapshotToHDF5(); // The test routine - void TestAll(); void Test(const int test_net_id = 0); + void TestClassification(const int test_net_id = 0); + void TestDetection(const int test_net_id = 0); virtual void SnapshotSolverState(const string& model_filename) = 0; virtual void RestoreSolverStateFromHDF5(const string& state_file) = 0; virtual void RestoreSolverStateFromBinaryProto(const string& state_file) = 0; @@ -129,6 +217,8 @@ class Solver { // True iff a request to stop early was received. bool requested_early_exit_; + ForwardBackwardFunc forward_backward_; + DISABLE_COPY_AND_ASSIGN(Solver); }; @@ -144,7 +234,8 @@ class WorkerSolver : public Solver { : Solver(param, root_solver) {} protected: - void ApplyUpdate() {} + void ApplyUpdate() { } + void ApplyUpdate(int param_id) { } void SnapshotSolverState(const string& model_filename) { LOG(FATAL) << "Should not be called on worker solver."; } diff --git a/include/caffe/solver_factory.hpp b/include/caffe/solver_factory.hpp index cfff721af40..3010a4b6b3e 100644 --- a/include/caffe/solver_factory.hpp +++ b/include/caffe/solver_factory.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + /** * @brief A solver factory that allows one to register solvers, similar to * layer factory. During runtime, registered solvers could be called by passing diff --git a/include/caffe/syncedmem.hpp b/include/caffe/syncedmem.hpp index 38ee4664028..13c3791dee6 100644 --- a/include/caffe/syncedmem.hpp +++ b/include/caffe/syncedmem.hpp @@ -1,10 +1,55 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_SYNCEDMEM_HPP_ #define CAFFE_SYNCEDMEM_HPP_ #include +#ifdef USE_MKL + #include + #include +#endif + +#include "boost/thread/mutex.hpp" #include "caffe/common.hpp" +#include "caffe/multinode/mlsl.hpp" + namespace caffe { // If CUDA is available and in GPU mode, host memory will be allocated pinned, @@ -20,7 +65,23 @@ inline void CaffeMallocHost(void** ptr, size_t size, bool* use_cuda) { return; } #endif - *ptr = malloc(size); + +#ifdef USE_MLSL + if (mn::is_multinode()) { + *ptr = mn::alloc(size ? size : 1, 64); + } else { +#endif /* !USE_MLSL */ + +#ifdef USE_MKL + *ptr = mkl_malloc(size ? size : 1, 64); +#else + *ptr = malloc(size); +#endif + +#ifdef USE_MLSL + } +#endif /* USE_MLSL */ + *use_cuda = false; CHECK(*ptr) << "host allocation of size " << size << " failed"; } @@ -32,9 +93,44 @@ inline void CaffeFreeHost(void* ptr, bool use_cuda) { return; } #endif - free(ptr); + +#ifdef USE_MLSL + if (mn::is_multinode()) { + mn::free(ptr); + } else { +#endif /* !USE_MLSL */ + +#ifdef USE_MKL + mkl_free(ptr); +#else + free(ptr); +#endif + +#ifdef USE_MLSL + } +#endif /* USE_MLSL */ + } +// Base class +struct PrvMemDescr { + virtual ~PrvMemDescr() {} + virtual void convert_from_prv(void* cpu_ptr) = 0; + virtual void convert_to_prv(void* cpu_ptr) = 0; + virtual void convert_from_other(shared_ptr other) = 0; + virtual bool on_to_cpu() { return false; } + virtual void* prv_ptr() = 0; + // returns true for matching layouts + virtual bool layout_compare(shared_ptr other) = 0; + virtual size_t prv_count() = 0; + virtual size_t prv_size() = 0; // TODO: do we need both count() and size()? + // This might help using prv_ptr_ by different accelerators/engines + enum PrvDescrType { + PRV_DESCR_MKL2017, + PRV_DESCR_MKLDNN + }; + virtual PrvDescrType get_descr_type() = 0; +}; /** * @brief Manages memory allocation and synchronization between the host (CPU) @@ -45,12 +141,14 @@ inline void CaffeFreeHost(void* ptr, bool use_cuda) { class SyncedMemory { public: SyncedMemory() - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(0), head_(UNINITIALIZED), - own_cpu_data_(false), cpu_malloc_use_cuda_(false), own_gpu_data_(false), + : cpu_ptr_(NULL), gpu_ptr_(NULL), + size_(0), head_(UNINITIALIZED), own_cpu_data_(false), + cpu_malloc_use_cuda_(false), own_gpu_data_(false), own_prv_data_(false), gpu_device_(-1) {} explicit SyncedMemory(size_t size) - : cpu_ptr_(NULL), gpu_ptr_(NULL), size_(size), head_(UNINITIALIZED), - own_cpu_data_(false), cpu_malloc_use_cuda_(false), own_gpu_data_(false), + : cpu_ptr_(NULL), gpu_ptr_(NULL), + size_(size), head_(UNINITIALIZED), own_cpu_data_(false), + cpu_malloc_use_cuda_(false), own_gpu_data_(false), own_prv_data_(false), gpu_device_(-1) {} ~SyncedMemory(); const void* cpu_data(); @@ -59,7 +157,15 @@ class SyncedMemory { void set_gpu_data(void* data); void* mutable_cpu_data(); void* mutable_gpu_data(); - enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED }; + + const void* cpu_ptr() const { return cpu_ptr_; } + + shared_ptr prv_descriptor_; + void set_prv_descriptor(shared_ptr descriptor, bool same_data); + const void* prv_data(); + void* mutable_prv_data(); + enum SyncedHead { UNINITIALIZED, HEAD_AT_CPU, HEAD_AT_GPU, SYNCED, + HEAD_AT_PRV, SYNCED_PRV}; SyncedHead head() { return head_; } size_t size() { return size_; } @@ -72,12 +178,14 @@ class SyncedMemory { void to_gpu(); void* cpu_ptr_; void* gpu_ptr_; - size_t size_; + const size_t size_; SyncedHead head_; bool own_cpu_data_; bool cpu_malloc_use_cuda_; bool own_gpu_data_; + bool own_prv_data_; int gpu_device_; + boost::mutex mtx; DISABLE_COPY_AND_ASSIGN(SyncedMemory); }; // class SyncedMemory diff --git a/include/caffe/test/test_caffe_main.hpp b/include/caffe/test/test_caffe_main.hpp index fc156091476..ecb81768d83 100644 --- a/include/caffe/test/test_caffe_main.hpp +++ b/include/caffe/test/test_caffe_main.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // The main caffe test code. Your test cpp code should include this hpp // to allow a main function to be compiled into the binary. #ifndef CAFFE_TEST_TEST_CAFFE_MAIN_HPP_ diff --git a/include/caffe/test/test_gradient_check_util.hpp b/include/caffe/test/test_gradient_check_util.hpp index b25a84875ef..2c3753905d0 100644 --- a/include/caffe/test/test_gradient_check_util.hpp +++ b/include/caffe/test/test_gradient_check_util.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_TEST_GRADIENT_CHECK_UTIL_H_ #define CAFFE_TEST_GRADIENT_CHECK_UTIL_H_ diff --git a/include/caffe/training_utils.hpp b/include/caffe/training_utils.hpp new file mode 100644 index 00000000000..e414865e344 --- /dev/null +++ b/include/caffe/training_utils.hpp @@ -0,0 +1,141 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +using caffe::Solver; +using caffe::shared_ptr; +using caffe::string; +using caffe::vector; + +vector get_stages_from_flags(const std::string& stages_flag) { + vector stages; + boost::split(stages, stages_flag, boost::is_any_of(",")); + return stages; +} + +void use_flags(caffe::SolverParameter* solver_param, + const std::string& flag_solver, + const std::string& flag_engine, + const int& flag_level, + const std::string& stages_flag) { + caffe::UpgradeSolverAsNeeded(flag_solver, solver_param); + vector stages = get_stages_from_flags(stages_flag); + + // Override engine if provided in cmd line + if (flag_engine != "") { + solver_param->set_engine(flag_engine); + } + + solver_param->mutable_train_state()->set_level(flag_level); + for (int i = 0; i < stages.size(); i++) { + solver_param->mutable_train_state()->add_stage(stages[i]); + } +} + +int multiphase_train(caffe::MultiPhaseSolverParameter* multi_solver_params, + const std::string& flag_solver, + const std::string& flag_engine, + const int& flag_level, + const std::string& stages_flag) { + LOG(INFO) << "Running multiphase solver."; + caffe::NetParameter solver_phase_net_param; + caffe::NetParameter topology_net_param; + caffe::SolverParameter solver_param; + CHECK(multi_solver_params->params_pair(0).has_solver_params()) + << "Solver parameters should be provided in at least first params pair"; + CHECK(caffe::ReadProtoFromTextFile( + multi_solver_params->params_pair(0).solver_params().net(), + &topology_net_param)) + << "Could not read from net parameter of solver proto file"; + string snapshot_prefix = multi_solver_params-> + params_pair(0).solver_params().snapshot_prefix() + "_phase_"; + + for (int j = 0; j < multi_solver_params->params_pair_size(); j++) { + if (multi_solver_params->params_pair(j).has_solver_params()) { + solver_param = multi_solver_params->params_pair(j).solver_params(); + + if (solver_param.solver_mode() != + caffe::SolverParameter_SolverMode_CPU) { + LOG(ERROR) << "CPU mode supported only"; + return -1; + } + } + + if (multi_solver_params->params_pair(j).has_batch_size()) { + for (int i = 0; i < topology_net_param.layer_size(); i++) { + if (topology_net_param.layer(i).type() == "Data") { + topology_net_param.mutable_layer(i)->mutable_data_param()-> + set_batch_size(multi_solver_params->params_pair(j).batch_size()); + break; + } + } + } + + solver_param.set_snapshot_prefix(snapshot_prefix + + boost::lexical_cast(j)); + + solver_param.set_allocated_net_param(&topology_net_param); + solver_param.clear_net(); + + use_flags( + &solver_param, + flag_solver, + flag_engine, + flag_level, + stages_flag); + + shared_ptr > + solver(caffe::SolverRegistry::CreateSolver(solver_param)); + + topology_net_param = *solver_param.release_net_param(); + + solver->net()->CopyTrainedLayersFrom(solver_phase_net_param); + for (int i = 0; i < solver->test_nets().size(); ++i) { + solver->test_nets()[i]->CopyTrainedLayersFrom(solver_phase_net_param); + } + + solver->Solve(); + solver->net()->ToProto( + &solver_phase_net_param, + solver->param().snapshot_diff()); + } + + LOG(INFO) << "Optimization Done."; + return 0; +} diff --git a/include/caffe/util/bbox_util.hpp b/include/caffe/util/bbox_util.hpp new file mode 100644 index 00000000000..e33e88f6bfd --- /dev/null +++ b/include/caffe/util/bbox_util.hpp @@ -0,0 +1,561 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_OPENCV +#include +#include +#include +#endif // USE_OPENCV + +#ifndef CAFFE_UTIL_BBOX_UTIL_H_ +#define CAFFE_UTIL_BBOX_UTIL_H_ + +#include +#include // for std::fabs and std::signbit +#include +#include +#include +#include + +#include "glog/logging.h" + +#include "caffe/caffe.hpp" + +namespace caffe { + +typedef EmitConstraint_EmitType EmitType; +typedef PriorBoxParameter_CodeType CodeType; +typedef MultiBoxLossParameter_MatchType MatchType; +typedef MultiBoxLossParameter_LocLossType LocLossType; +typedef MultiBoxLossParameter_ConfLossType ConfLossType; +typedef MultiBoxLossParameter_MiningType MiningType; + +typedef map > LabelBBox; + +// Function used to sort NormalizedBBox, stored in STL container (e.g. vector), +// in ascend order based on the score value. +bool SortBBoxAscend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2); + +// Function used to sort NormalizedBBox, stored in STL container (e.g. vector), +// in descend order based on the score value. +bool SortBBoxDescend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2); + +// Function sued to sort pair, stored in STL container (e.g. vector) +// in descend order based on the score (first) value. +template +bool SortScorePairAscend(const pair& pair1, + const pair& pair2); + +// Function sued to sort pair, stored in STL container (e.g. vector) +// in descend order based on the score (first) value. +template +bool SortScorePairDescend(const pair& pair1, + const pair& pair2); + +// Generate unit bbox [0, 0, 1, 1] +NormalizedBBox UnitBBox(); + +// Check if a bbox is cross boundary or not. +bool IsCrossBoundaryBBox(const NormalizedBBox& bbox); + +// Compute the intersection between two bboxes. +void IntersectBBox(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2, + NormalizedBBox* intersect_bbox); + +// Compute bbox size. +float BBoxSize(const NormalizedBBox& bbox, const bool normalized = true); + +template +Dtype BBoxSize(const Dtype* bbox, const bool normalized = true); + +// Clip the NormalizedBBox such that the range for each corner is [0, 1]. +void ClipBBox(const NormalizedBBox& bbox, NormalizedBBox* clip_bbox); + +// Clip the bbox such that the bbox is within [0, 0; width, height]. +void ClipBBox(const NormalizedBBox& bbox, const float height, const float width, + NormalizedBBox* clip_bbox); + +// Scale the NormalizedBBox w.r.t. height and width. +void ScaleBBox(const NormalizedBBox& bbox, const int height, const int width, + NormalizedBBox* scale_bbox); + +// Output predicted bbox on the actual image. +void OutputBBox(const NormalizedBBox& bbox, const pair& img_size, + const bool has_resize, const ResizeParameter& resize_param, + NormalizedBBox* out_bbox); + +// Locate bbox in the coordinate system that src_bbox sits. +void LocateBBox(const NormalizedBBox& src_bbox, const NormalizedBBox& bbox, + NormalizedBBox* loc_bbox); + +// Project bbox onto the coordinate system defined by src_bbox. +bool ProjectBBox(const NormalizedBBox& src_bbox, const NormalizedBBox& bbox, + NormalizedBBox* proj_bbox); + +// Extrapolate the transformed bbox if height_scale and width_scale is +// explicitly provided, and it is only effective for FIT_SMALL_SIZE case. +void ExtrapolateBBox(const ResizeParameter& param, const int height, + const int width, const NormalizedBBox& crop_bbox, NormalizedBBox* bbox); + +// Compute the jaccard (intersection over union IoU) overlap between two bboxes. +float JaccardOverlap(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2, + const bool normalized = true); + +template +Dtype JaccardOverlap(const Dtype* bbox1, const Dtype* bbox2); + +// Compute the coverage of bbox1 by bbox2. +float BBoxCoverage(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2); + +// Encode a bbox according to a prior bbox. +void EncodeBBox(const NormalizedBBox& prior_bbox, + const vector& prior_variance, const CodeType code_type, + const bool encode_variance_in_target, const NormalizedBBox& bbox, + NormalizedBBox* encode_bbox); + +// Check if a bbox meet emit constraint w.r.t. src_bbox. +bool MeetEmitConstraint(const NormalizedBBox& src_bbox, + const NormalizedBBox& bbox, const EmitConstraint& emit_constraint); + +// Decode a bbox according to a prior bbox. +void DecodeBBox(const NormalizedBBox& prior_bbox, + const vector& prior_variance, const CodeType code_type, + const bool variance_encoded_in_target, const bool clip_bbox, + const NormalizedBBox& bbox, NormalizedBBox* decode_bbox); + +// Decode a set of bboxes according to a set of prior bboxes. +void DecodeBBoxes(const vector& prior_bboxes, + const vector >& prior_variances, + const CodeType code_type, const bool variance_encoded_in_target, + const bool clip_bbox, const vector& bboxes, + vector* decode_bboxes); + +// Decode all bboxes in a batch. +void DecodeBBoxesAll(const vector& all_loc_pred, + const vector& prior_bboxes, + const vector >& prior_variances, + const int num, const bool share_location, + const int num_loc_classes, const int background_label_id, + const CodeType code_type, const bool variance_encoded_in_target, + const bool clip, vector* all_decode_bboxes); + +// Match prediction bboxes with ground truth bboxes. +void MatchBBox(const vector& gt, + const vector& pred_bboxes, const int label, + const MatchType match_type, const float overlap_threshold, + const bool ignore_cross_boundary_bbox, + vector* match_indices, vector* match_overlaps); + +// Find matches between prediction bboxes and ground truth bboxes. +// all_loc_preds: stores the location prediction, where each item contains +// location prediction for an image. +// all_gt_bboxes: stores ground truth bboxes for the batch. +// prior_bboxes: stores all the prior bboxes in the format of NormalizedBBox. +// prior_variances: stores all the variances needed by prior bboxes. +// multibox_loss_param: stores the parameters for MultiBoxLossLayer. +// all_match_overlaps: stores jaccard overlaps between predictions and gt. +// all_match_indices: stores mapping between predictions and ground truth. +void FindMatches(const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector& prior_bboxes, + const vector >& prior_variances, + const MultiBoxLossParameter& multibox_loss_param, + vector > >* all_match_overlaps, + vector > >* all_match_indices); + +// Count the number of matches from the match indices. +int CountNumMatches(const vector > >& all_match_indices, + const int num); + +// Mine the hard examples from the batch. +// conf_blob: stores the confidence prediction. +// all_loc_preds: stores the location prediction, where each item contains +// location prediction for an image. +// all_gt_bboxes: stores ground truth bboxes for the batch. +// prior_bboxes: stores all the prior bboxes in the format of NormalizedBBox. +// prior_variances: stores all the variances needed by prior bboxes. +// all_match_overlaps: stores jaccard overlap between predictions and gt. +// multibox_loss_param: stores the parameters for MultiBoxLossLayer. +// all_match_indices: stores mapping between predictions and ground truth. +// all_loc_loss: stores the confidence loss per location for each image. +template +void MineHardExamples(const Blob& conf_blob, + const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector& prior_bboxes, + const vector >& prior_variances, + const vector > >& all_match_overlaps, + const MultiBoxLossParameter& multibox_loss_param, + int* num_matches, int* num_negs, + vector > >* all_match_indices, + vector >* all_neg_indices); + +// Retrieve bounding box ground truth from gt_data. +// gt_data: 1 x 1 x num_gt x 7 blob. +// num_gt: the number of ground truth. +// background_label_id: the label for background class which is used to do +// santity check so that no ground truth contains it. +// all_gt_bboxes: stores ground truth for each image. Label of each bbox is +// stored in NormalizedBBox. +template +void GetGroundTruth(const Dtype* gt_data, const int num_gt, + const int background_label_id, const bool use_difficult_gt, + map >* all_gt_bboxes); +// Store ground truth bboxes of same label in a group. +template +void GetGroundTruth(const Dtype* gt_data, const int num_gt, + const int background_label_id, const bool use_difficult_gt, + map* all_gt_bboxes); + +// Get location predictions from loc_data. +// loc_data: num x num_preds_per_class * num_loc_classes * 4 blob. +// num: the number of images. +// num_preds_per_class: number of predictions per class. +// num_loc_classes: number of location classes. It is 1 if share_location is +// true; and is equal to number of classes needed to predict otherwise. +// share_location: if true, all classes share the same location prediction. +// loc_preds: stores the location prediction, where each item contains +// location prediction for an image. +template +void GetLocPredictions(const Dtype* loc_data, const int num, + const int num_preds_per_class, const int num_loc_classes, + const bool share_location, vector* loc_preds); + +// Encode the localization prediction and ground truth for each matched prior. +// all_loc_preds: stores the location prediction, where each item contains +// location prediction for an image. +// all_gt_bboxes: stores ground truth bboxes for the batch. +// all_match_indices: stores mapping between predictions and ground truth. +// prior_bboxes: stores all the prior bboxes in the format of NormalizedBBox. +// prior_variances: stores all the variances needed by prior bboxes. +// multibox_loss_param: stores the parameters for MultiBoxLossLayer. +// loc_pred_data: stores the location prediction results. +// loc_gt_data: stores the encoded location ground truth. +template +void EncodeLocPrediction(const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector > >& all_match_indices, + const vector& prior_bboxes, + const vector >& prior_variances, + const MultiBoxLossParameter& multibox_loss_param, + Dtype* loc_pred_data, Dtype* loc_gt_data); + +// Compute the localization loss per matched prior. +// loc_pred: stores the location prediction results. +// loc_gt: stores the encoded location ground truth. +// all_match_indices: stores mapping between predictions and ground truth. +// num: number of images in the batch. +// num_priors: total number of priors. +// loc_loss_type: type of localization loss, Smooth_L1 or L2. +// all_loc_loss: stores the localization loss for all priors in a batch. +template +void ComputeLocLoss(const Blob& loc_pred, const Blob& loc_gt, + const vector > >& all_match_indices, + const int num, const int num_priors, const LocLossType loc_loss_type, + vector >* all_loc_loss); + +// Get confidence predictions from conf_data. +// conf_data: num x num_preds_per_class * num_classes blob. +// num: the number of images. +// num_preds_per_class: number of predictions per class. +// num_classes: number of classes. +// conf_preds: stores the confidence prediction, where each item contains +// confidence prediction for an image. +template +void GetConfidenceScores(const Dtype* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + vector > >* conf_scores); + +// Get confidence predictions from conf_data. +// conf_data: num x num_preds_per_class * num_classes blob. +// num: the number of images. +// num_preds_per_class: number of predictions per class. +// num_classes: number of classes. +// class_major: if true, data layout is +// num x num_classes x num_preds_per_class; otherwise, data layerout is +// num x num_preds_per_class * num_classes. +// conf_preds: stores the confidence prediction, where each item contains +// confidence prediction for an image. +template +void GetConfidenceScores(const Dtype* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const bool class_major, vector > >* conf_scores); + +// Compute the confidence loss for each prior from conf_data. +// conf_data: num x num_preds_per_class * num_classes blob. +// num: the number of images. +// num_preds_per_class: number of predictions per class. +// num_classes: number of classes. +// background_label_id: it is used to skip selecting max scores from +// background class. +// loss_type: compute the confidence loss according to the loss type. +// all_match_indices: stores mapping between predictions and ground truth. +// all_gt_bboxes: stores ground truth bboxes from the batch. +// all_conf_loss: stores the confidence loss per location for each image. +template +void ComputeConfLoss(const Dtype* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + const vector > >& all_match_indices, + const map >& all_gt_bboxes, + vector >* all_conf_loss); + +// Compute the negative confidence loss for each prior from conf_data. +// conf_data: num x num_preds_per_class * num_classes blob. +// num: the number of images. +// num_preds_per_class: number of predictions per class. +// num_classes: number of classes. +// background_label_id: it is used to skip selecting max scores from +// background class. +// loss_type: compute the confidence loss according to the loss type. +// all_conf_loss: stores the confidence loss per location for each image. +template +void ComputeConfLoss(const Dtype* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + vector >* all_conf_loss); + +// Encode the confidence predictions and ground truth for each matched prior. +// conf_data: num x num_priors * num_classes blob. +// num: number of images. +// num_priors: number of priors (predictions) per image. +// multibox_loss_param: stores the parameters for MultiBoxLossLayer. +// all_match_indices: stores mapping between predictions and ground truth. +// all_neg_indices: stores the indices for negative samples. +// all_gt_bboxes: stores ground truth bboxes for the batch. +// conf_pred_data: stores the confidence prediction results. +// conf_gt_data: stores the confidence ground truth. +template +void EncodeConfPrediction(const Dtype* conf_data, const int num, + const int num_priors, const MultiBoxLossParameter& multibox_loss_param, + const vector > >& all_match_indices, + const vector >& all_neg_indices, + const map >& all_gt_bboxes, + Dtype* conf_pred_data, Dtype* conf_gt_data); + +// Get prior bounding boxes from prior_data. +// prior_data: 1 x 2 x num_priors * 4 x 1 blob. +// num_priors: number of priors. +// prior_bboxes: stores all the prior bboxes in the format of NormalizedBBox. +// prior_variances: stores all the variances needed by prior bboxes. +template +void GetPriorBBoxes(const Dtype* prior_data, const int num_priors, + vector* prior_bboxes, + vector >* prior_variances); + +// Get detection results from det_data. +// det_data: 1 x 1 x num_det x 7 blob. +// num_det: the number of detections. +// background_label_id: the label for background class which is used to do +// santity check so that no detection contains it. +// all_detections: stores detection results for each class from each image. +template +void GetDetectionResults(const Dtype* det_data, const int num_det, + const int background_label_id, + map* all_detections); + +// Get top_k scores with corresponding indices. +// scores: a set of scores. +// indices: a set of corresponding indices. +// top_k: if -1, keep all; otherwise, keep at most top_k. +// score_index_vec: store the sorted (score, index) pair. +void GetTopKScoreIndex(const vector& scores, const vector& indices, + const int top_k, vector >* score_index_vec); + +// Get max scores with corresponding indices. +// scores: a set of scores. +// threshold: only consider scores higher than the threshold. +// top_k: if -1, keep all; otherwise, keep at most top_k. +// score_index_vec: store the sorted (score, index) pair. +void GetMaxScoreIndex(const vector& scores, const float threshold, + const int top_k, vector >* score_index_vec); + +// Get max scores with corresponding indices. +// scores: an array of scores. +// num: number of total scores in the array. +// threshold: only consider scores higher than the threshold. +// top_k: if -1, keep all; otherwise, keep at most top_k. +// score_index_vec: store the sorted (score, index) pair. +template +void GetMaxScoreIndex(const Dtype* scores, const int num, const float threshold, + const int top_k, vector >* score_index_vec); + +// Get max scores with corresponding indices. +// scores: a set of scores. +// threshold: only consider scores higher than the threshold. +// top_k: if -1, keep all; otherwise, keep at most top_k. +// score_index_vec: store the sorted (score, index) pair. +void GetMaxScoreIndex(const vector& scores, const float threshold, + const int top_k, vector >* score_index_vec); + +// Do non maximum suppression given bboxes and scores. +// bboxes: a set of bounding boxes. +// scores: a set of corresponding confidences. +// threshold: the threshold used in non maximum suppression. +// top_k: if not -1, keep at most top_k picked indices. +// reuse_overlaps: if true, use and update overlaps; otherwise, always +// compute overlap. +// overlaps: a temp place to optionally store the overlaps between pairs of +// bboxes if reuse_overlaps is true. +// indices: the kept indices of bboxes after nms. +void ApplyNMS(const vector& bboxes, const vector& scores, + const float threshold, const int top_k, const bool reuse_overlaps, + map >* overlaps, vector* indices); + +void ApplyNMS(const vector& bboxes, const vector& scores, + const float threshold, const int top_k, vector* indices); + +void ApplyNMS(const bool* overlapped, const int num, vector* indices); + +// Do non maximum suppression given bboxes and scores. +// Inspired by Piotr Dollar's NMS implementation in EdgeBox. +// https://goo.gl/jV3JYS +// bboxes: a set of bounding boxes. +// scores: a set of corresponding confidences. +// score_threshold: a threshold used to filter detection results. +// nms_threshold: a threshold used in non maximum suppression. +// eta: adaptation rate for nms threshold (see Piotr's paper). +// top_k: if not -1, keep at most top_k picked indices. +// indices: the kept indices of bboxes after nms. +void ApplyNMSFast(const vector& bboxes, + const vector& scores, const float score_threshold, + const float nms_threshold, const float eta, const int top_k, + vector* indices); + +// Do non maximum suppression based on raw bboxes and scores data. +// Inspired by Piotr Dollar's NMS implementation in EdgeBox. +// https://goo.gl/jV3JYS +// bboxes: an array of bounding boxes. +// scores: an array of corresponding confidences. +// num: number of total boxes/confidences in the array. +// score_threshold: a threshold used to filter detection results. +// nms_threshold: a threshold used in non maximum suppression. +// eta: adaptation rate for nms threshold (see Piotr's paper). +// top_k: if not -1, keep at most top_k picked indices. +// indices: the kept indices of bboxes after nms. +template +void ApplyNMSFast(const Dtype* bboxes, const Dtype* scores, const int num, + const float score_threshold, const float nms_threshold, + const float eta, const int top_k, vector* indices); + +// Compute cumsum of a set of pairs. +void CumSum(const vector >& pairs, vector* cumsum); + +// Compute average precision given true positive and false positive vectors. +// tp: contains pairs of scores and true positive. +// num_pos: number of positives. +// fp: contains pairs of scores and false positive. +// ap_version: different ways of computing Average Precision. +// Check https://sanchom.wordpress.com/tag/average-precision/ for details. +// 11point: the 11-point interpolated average precision. Used in VOC2007. +// MaxIntegral: maximally interpolated AP. Used in VOC2012/ILSVRC. +// Integral: the natural integral of the precision-recall curve. +// prec: stores the computed precisions. +// rec: stores the computed recalls. +// ap: the computed Average Precision. +void ComputeAP(const vector >& tp, const int num_pos, + const vector >& fp, const string ap_version, + vector* prec, vector* rec, float* ap); + +#ifndef CPU_ONLY // GPU +template +__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox, + const bool normalized = true); + +template +__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1, + const Dtype* bbox2); + +template +void DecodeBBoxesGPU(const int nthreads, + const Dtype* loc_data, const Dtype* prior_data, + const CodeType code_type, const bool variance_encoded_in_target, + const int num_priors, const bool share_location, + const int num_loc_classes, const int background_label_id, + const bool clip_bbox, Dtype* bbox_data); + +template +void PermuteDataGPU(const int nthreads, + const Dtype* data, const int num_classes, const int num_data, + const int num_dim, Dtype* new_data); + +template +void SoftMaxGPU(const Dtype* data, const int outer_num, const int channels, + const int inner_num, Dtype* prob); + +template +void ComputeOverlappedGPU(const int nthreads, + const Dtype* bbox_data, const int num_bboxes, const int num_classes, + const Dtype overlap_threshold, bool* overlapped_data); + +template +void ComputeOverlappedByIdxGPU(const int nthreads, + const Dtype* bbox_data, const Dtype overlap_threshold, + const int* idx, const int num_idx, bool* overlapped_data); + +template +void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data, + const int num_bboxes, const float confidence_threshold, + const int top_k, const float nms_threshold, vector* indices); + +template +void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data, + const int image_id, const int label, const vector& indices, + const bool clip_bbox, Blob* detection_blob); + +template + void ComputeConfLossGPU(const Blob& conf_blob, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + const vector > >& all_match_indices, + const map >& all_gt_bboxes, + vector >* all_conf_loss); +#endif // !CPU_ONLY + +#ifdef USE_OPENCV +vector GetColors(const int n); + +template +void VisualizeBBox(const vector& images, const Blob* detections, + const float threshold, const vector& colors, + const map& label_to_display_name, + const string& save_file); +#endif // USE_OPENCV + +} // namespace caffe + +#endif // CAFFE_UTIL_BBOX_UTIL_H_ diff --git a/include/caffe/util/benchmark.hpp b/include/caffe/util/benchmark.hpp index d63582776ee..cc00416438f 100644 --- a/include/caffe/util/benchmark.hpp +++ b/include/caffe/util/benchmark.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_BENCHMARK_H_ #define CAFFE_UTIL_BENCHMARK_H_ diff --git a/include/caffe/util/blocking_queue.hpp b/include/caffe/util/blocking_queue.hpp index d3de2e59b80..7be3d7104e6 100644 --- a/include/caffe/util/blocking_queue.hpp +++ b/include/caffe/util/blocking_queue.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_BLOCKING_QUEUE_HPP_ #define CAFFE_UTIL_BLOCKING_QUEUE_HPP_ @@ -6,6 +43,18 @@ namespace caffe { +class Element { + public: + template + inline T* cast() { + T* ret = dynamic_cast(this); + CHECK(ret != NULL); + return ret; + } + + virtual inline ~Element() {} +}; + template class BlockingQueue { public: diff --git a/include/caffe/util/compareToolUtilities.h b/include/caffe/util/compareToolUtilities.h new file mode 100644 index 00000000000..754890b0d8c --- /dev/null +++ b/include/caffe/util/compareToolUtilities.h @@ -0,0 +1,466 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef INCLUDE_CAFFE_UTIL_COMPARETOOLUTILITIES_H_ +#define INCLUDE_CAFFE_UTIL_COMPARETOOLUTILITIES_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "float_compare.hpp" + +template +class Data { + int dataSize; + DataType *dataPointer; + + Data(const Data &data); + Data &operator =(const Data &data); + + public: + Data() : + dataSize(0), + dataPointer(NULL) { + } + + ~Data() { + clear(); + } + + int getDataSize() const { + return dataSize; + } + + const DataType *getDataPointer() const { + return dataPointer; + } + + void clear() { + delete [] dataPointer; + dataPointer = NULL; + dataSize = 0; + } + + bool loadFromFile(const char *fileName) { + boost::filesystem::path filePath(fileName); + if (!boost::filesystem::exists(filePath)) { + return false; + } + + if (boost::filesystem::is_empty(filePath)) { + return false; + } + + FILE *file = fopen(fileName, "rb"); + if (!file) + return false; + + int64_t fileSize = boost::filesystem::file_size(filePath); + DataType *fileDataPointer = new DataType[fileSize]; + size_t bytesRead = fread(fileDataPointer, 1, fileSize, file); + fclose(file); + if (bytesRead != fileSize) { + delete [] fileDataPointer; + return false; + } + + clear(); + + dataPointer = fileDataPointer; + dataSize = fileSize / sizeof(DataType); + return true; + } +}; + +class Log { + FILE *logFile; + + Log() { + logFile = fopen("log.txt", "w+b"); + CHECK(logFile != NULL) << "Could not open log.txt file"; + } + + public: + ~Log() { + if (logFile) + fclose(logFile); + } + + static void log(const char *format, ...) { + va_list args; + + static Log log; + + va_start(args, format); + vfprintf(log.logFile, format, args); + + va_start(args, format); + vprintf(format, args); + + va_end(args); + } +}; + +void getFileName(char *file_name, bool is_target, const char *name, int id) { + snprintf(file_name, FILENAME_MAX, "%s%04i.bin", name, id); +} + +void getBinFilePath(char *file_path, const char *name) { + snprintf(file_path, FILENAME_MAX, "%s/%s", + FLAGS_collect_dir.c_str(), name); +} + +bool saveToFile(const char *prefix, + int id, const float *data, unsigned count) { + char file_name[FILENAME_MAX]; + getFileName(file_name, false, prefix, id); + + FILE *file = fopen((FLAGS_collect_dir + "/" + file_name).c_str(), "w+b"); + if (!file) { + LOG(ERROR) << "Failed to create file '" << FLAGS_collect_dir << "'."; + return false; + } + + size_t bytesToWrite = count * sizeof(data[0]); + size_t bytesWritten = fwrite(data, 1, bytesToWrite, file); + fclose(file); + + if (bytesWritten != bytesToWrite) { + LOG(ERROR) << "Failed to write data to '" << FLAGS_collect_dir + << "' file."; + return false; + } + + return true; +} + +bool loadFromFile(const char *file_path, float *data, unsigned count) { + FILE *file = fopen(file_path, "rb"); + if (!file) { + LOG(ERROR) << "Failed to open file '" << file_path << "' for read."; + return false; + } + + size_t bytesToRead = count * sizeof(data[0]); + size_t bytesRead = fread(data, 1, bytesToRead, file); + fclose(file); + + if (bytesRead != bytesToRead) { + LOG(ERROR) << "Failed to read data from '" << file_path << "' file."; + return false; + } + + return true; +} + +bool compareDataWithFileData(const char *referenceFileName, + const float *targetDataPointer, double *maxDiff, + unsigned *diffCounter, const char *outputDir) { + typedef uint32_t CastType; + const char *format = "%i;%08X;%08X;%g;%g;%g\n"; + const float epsilon = static_cast(FLAGS_epsilon); + bool is_nan_filler = + std::isnan(static_cast(FLAGS_buffer_filler)); + + Data referenceData; + char file_path[FILENAME_MAX]; + getBinFilePath(file_path, referenceFileName); + if (!referenceData.loadFromFile(file_path)) { + Log::log("Failed to load reference data file '%s'.\n", + referenceFileName); + return false; + } + + char diffFileName[FILENAME_MAX]; + snprintf(diffFileName, FILENAME_MAX, "./%s/OUT%s", outputDir, + referenceFileName); + FILE *file = fopen(diffFileName, "w+t"); + if (!file) { + return false; + } + + *maxDiff = -1; + *diffCounter = 0; + int dataSize = referenceData.getDataSize(); + const float *referenceDataPointer = referenceData.getDataPointer(); + for (int i = 0; i < dataSize; i++) { + float a = referenceDataPointer[i]; + float b = targetDataPointer[i]; + if (std::isnan(a) && std::isnan(b) && is_nan_filler){ + continue; + } + + float diff = caffe::floatDiff(a, b, epsilon); + if (diff != FP_ZERO) { + fprintf(file, format, i,(CastType)a, (CastType)b, diff, a, b); + (*diffCounter)++; + } + + if (*maxDiff < diff) { + *maxDiff = diff; + } + + if (FLAGS_fast_compare && (*diffCounter) >= FLAGS_fast_compare_max) { + break; + } + } + + if (file) + fclose(file); + + return true; +} + +void checkData(const char *referenceFileName, const float *targetDataPointer, + const char *layerName, const char *outputDir, + std::unordered_set *erronousLayers) { + double maxDiff; + unsigned diffCounter; + bool success = compareDataWithFileData(referenceFileName, + targetDataPointer, &maxDiff, &diffCounter, outputDir); + + if (!success) { + Log::log("%-18s %-20s : failed\n", referenceFileName, layerName); + } else if (!diffCounter) { + Log::log("%-18s %-20s : success\n", referenceFileName, layerName); + } else { + Log::log("%-18s %-20s : %g %u\n", referenceFileName, layerName, + maxDiff, diffCounter); + (*erronousLayers).insert(layerName); + } +} + +void checkAllNans(const float *targetDataPointer, unsigned count, + const char *bufferName, const char *layerName, + std::unordered_set *erronousLayers) { + float buffer_filler = static_cast(FLAGS_buffer_filler); + float epsilon = static_cast(FLAGS_epsilon); + if (std::isnan(buffer_filler)){ + for (int i = 0; i < count; i++) { + if (!std::isnan(targetDataPointer[i])) { + Log::log("Not all elements in %s are NaNs\n", bufferName); + (*erronousLayers).insert(layerName); + return; + } + } + } else { + for (int i = 0; i < count; i++) { + if (caffe::floatDiff(targetDataPointer[i], buffer_filler, epsilon) + != FP_ZERO) { + Log::log("Not all elements in %s are %.1f\n", + bufferName, buffer_filler); + (*erronousLayers).insert(layerName); + return; + } + } + } +} + +int collectAndCheckLayerData(bool collect_step, + bool use_gpu, const char *output_dir) { + Net caffe_net(FLAGS_model, caffe::TRAIN, FLAGS_level, + NULL, NULL, FLAGS_engine); + const vector > >& layers = caffe_net.layers(); + const vector > >& params = caffe_net.params(); + const vector*> >& bottom_vecs = caffe_net.bottom_vecs(); + const vector*> >& top_vecs = caffe_net.top_vecs(); + const vector >& bottom_need_backward = + caffe_net.bottom_need_backward(); + + std::unordered_set erronous_layers; + FILE *infoFile = fopen(use_gpu ? + (FLAGS_collect_dir + "/" + "GPUInfo.txt").c_str() : + (FLAGS_collect_dir + "/" + "CPUInfo.txt").c_str(), "w+t"); + CHECK(infoFile != NULL) << "Could not open info file"; + char file_name[FILENAME_MAX]; + char file_path[FILENAME_MAX]; + string message_prefix = collect_step ? "Collecting" : "Comparing"; + float buffer_filler = static_cast(FLAGS_buffer_filler); + LOG(INFO) << message_prefix << " weights"; + for (int i = 0; i < params.size(); i++) { + if (collect_step) { + saveToFile("Wght", i, + params[i]->cpu_data(), params[i]->count()); + } else { + getFileName(file_name, false, "Wght", i); + checkData(file_name, params[i]->cpu_data(), + layers[i]->type(), output_dir, + &erronous_layers); + } + + caffe::caffe_set(params[i]->count(), buffer_filler, + params[i]->mutable_cpu_diff()); + } + + LOG(INFO) << message_prefix << " FW Layers"; + for (int i = 0; i < layers.size(); ++i) { + fprintf(infoFile, "Fwrd%04i %s\n", i, layers[i]->type()); + + if (bottom_need_backward[i].size() > 0 && bottom_need_backward[i][0]) { + if (collect_step) { + saveToFile("FwrdBtmDat", i, bottom_vecs[i][0]->cpu_data(), + bottom_vecs[i][0]->count()); + } else { + getFileName(file_name, false, "FwrdBtmDat", i); + getBinFilePath(file_path, file_name); + loadFromFile(file_path, bottom_vecs[i][0]->mutable_cpu_data(), + bottom_vecs[i][0]->count()); + } + } + + for (int j = 0; j < bottom_vecs[i].size(); j++) { + caffe::caffe_set(bottom_vecs[i][j]->count(), buffer_filler, + bottom_vecs[i][j]->mutable_cpu_diff()); + } + + for (int j = 0; j < top_vecs[i].size(); j++) { + caffe::caffe_set(top_vecs[i][j]->count(), buffer_filler, + top_vecs[i][j]->mutable_cpu_diff()); + } + + layers[i]->Forward(bottom_vecs[i], top_vecs[i]); + + if (collect_step) { + saveToFile("FwrdTopDat", i, top_vecs[i][0]->cpu_data(), + top_vecs[i][0]->count()); + } else { + getFileName(file_name, false, "FwrdTopDat", i); + checkData(file_name, top_vecs[i][0]->cpu_data(), + layers[i]->type(), output_dir, + &erronous_layers); + } + + if (bottom_need_backward[i].size() > 0 && bottom_need_backward[i][0]) { + // We check data only for out-of-place computations + if (bottom_vecs[i][0] != top_vecs[i][0]) { + getFileName(file_name, false, "FwrdBtmDat", i); + checkData(file_name, bottom_vecs[i][0]->cpu_data(), + layers[i]->type(), output_dir, + &erronous_layers); + } + checkAllNans(bottom_vecs[i][0]->cpu_diff(), + bottom_vecs[i][0]->count(), "bottom.diff", + layers[i]->type(), &erronous_layers); + } + + checkAllNans(top_vecs[i][0]->cpu_diff(), + top_vecs[i][0]->count(), "top.diff", + layers[i]->type(), &erronous_layers); + } + + LOG(INFO) << message_prefix + << " weights again"; + for (int i = 0; i < params.size(); i++) { + getFileName(file_name, false, "Wght", i); + checkData(file_name, params[i]->cpu_data(), + layers[i]->type(), output_dir, + &erronous_layers); + checkAllNans(params[i]->cpu_diff(), params[i]->count(), "param.diff", + layers[i]->type(), &erronous_layers); + } + + LOG(INFO) << message_prefix << " BW Layers"; + for (int i = layers.size() - 1; i >= 0; --i) { + fprintf(infoFile, "Bwrd%04i %s\n", i, layers[i]->type()); + + layers[i]->Backward(top_vecs[i], + bottom_need_backward[i], bottom_vecs[i]); + + if (collect_step) { + saveToFile("BwrdTopDif", i, + top_vecs[i][0]->cpu_diff(), top_vecs[i][0]->count()); + + if (bottom_need_backward[i].size() > 0 && + bottom_need_backward[i][0]) { + saveToFile("BwrdBtmDif", i, + bottom_vecs[i][0]->cpu_diff(), bottom_vecs[i][0]->count()); + } + } else { + getFileName(file_name, false, "BwrdTopDif", i); + checkData(file_name, top_vecs[i][0]->cpu_diff(), + layers[i]->type(), output_dir, + &erronous_layers); + + if (bottom_need_backward[i].size() > 0 && + bottom_need_backward[i][0]) { + getFileName(file_name, false, "BwrdBtmDif", i); + checkData(file_name, bottom_vecs[i][0]->cpu_diff(), + layers[i]->type(), output_dir, + &erronous_layers); + } + } + } + + LOG(INFO) << message_prefix + << " weights and gradients"; + for (int i = 0; i < params.size(); i++) { + getFileName(file_name, false, "Wght", i); + checkData(file_name, params[i]->cpu_data(), + layers[i]->type(), output_dir, + &erronous_layers); + + if (collect_step) { + saveToFile("Grad", i, + params[i]->cpu_diff(), params[i]->count()); + } else { + getFileName(file_name, false, "Grad", i); + checkData(file_name, params[i]->cpu_diff(), + layers[i]->type(), output_dir, + &erronous_layers); + } + } + + fclose(infoFile); + + if (erronous_layers.size() > 0) { + LOG(INFO) << "Invalid layer behaviour detected on: "; + for (const std::string& layer_name : erronous_layers) { + LOG(WARNING) << "\t" << layer_name; + } + } + + return 0; +} + +#endif // INCLUDE_CAFFE_UTIL_COMPARETOOLUTILITIES_H_ diff --git a/include/caffe/util/cpu_info.hpp b/include/caffe/util/cpu_info.hpp new file mode 100644 index 00000000000..758db2faa43 --- /dev/null +++ b/include/caffe/util/cpu_info.hpp @@ -0,0 +1,182 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_UTIL_CPU_INFO_HPP +#define CAFFE_UTIL_CPU_INFO_HPP + +#include +#include +#include +#include +#include +#include +#include + + +namespace caffe { +namespace cpu { + +struct Processor { + unsigned processor; + unsigned physicalId; + unsigned siblings; + unsigned coreId; + unsigned cpuCores; + unsigned speedMHz; + + Processor(); +}; + +class CpuInfoInterface { + public: + virtual ~CpuInfoInterface() {} + virtual const char *getFirstLine() = 0; + virtual const char *getNextLine() = 0; +}; + +class CpuInfo : public CpuInfoInterface { + public: + CpuInfo(); + explicit CpuInfo(const char *content); + virtual ~CpuInfo(); + + virtual const char *getFirstLine(); + virtual const char *getNextLine(); + + private: + const char *fileContentBegin; + const char *fileContentEnd; + const char *currentLine; + + void loadContentFromFile(const char *fileName); + void loadContent(const char *content); + void parseLines(char *content); +}; + +class CollectionInterface { + public: + virtual ~CollectionInterface() {} + virtual unsigned getProcessorSpeedMHz() = 0; + virtual unsigned getTotalNumberOfSockets() = 0; + virtual unsigned getTotalNumberOfCpuCores() = 0; + virtual unsigned getNumberOfProcessors() = 0; + virtual const Processor &getProcessor(unsigned processorId) = 0; +}; + +class Collection : public CollectionInterface { + public: + explicit Collection(CpuInfoInterface *cpuInfo); + + virtual unsigned getProcessorSpeedMHz(); + virtual unsigned getTotalNumberOfSockets(); + virtual unsigned getTotalNumberOfCpuCores(); + virtual unsigned getNumberOfProcessors(); + virtual const Processor &getProcessor(unsigned processorId); + + private: + CpuInfoInterface &cpuInfo; + unsigned totalNumberOfSockets; + unsigned totalNumberOfCpuCores; + std::vector processors; + Processor *currentProcessor; + + Collection(const Collection &collection); + Collection &operator =(const Collection &collection); + + void parseCpuInfo(); + void parseCpuInfoLine(const char *cpuInfoLine); + void parseValue(const char *fieldName, const char *valueString); + void appendNewProcessor(); + bool beginsWith(const char *lineBuffer, const char *text) const; + unsigned parseInteger(const char *text) const; + unsigned extractSpeedFromModelName(const char *text) const; + + void collectBasicCpuInformation(); + void updateCpuInformation(const Processor &processor, + unsigned numberOfUniquePhysicalId); +}; + +#ifdef _OPENMP + +class OpenMpManager { + public: + static void setGpuEnabled(); + static void setGpuDisabled(); + + static void bindCurrentThreadToNonPrimaryCoreIfPossible(); + + static void bindOpenMpThreads(); + static void printVerboseInformation(); + + static bool isMajorThread(boost::thread::id currentThread); + static unsigned getProcessorSpeedMHz(); + + private: + boost::thread::id mainThreadId; + Collection &collection; + + bool isGpuEnabled; + bool isAnyOpenMpEnvVarSpecified; + cpu_set_t currentCpuSet; + cpu_set_t currentCoreSet; + + explicit OpenMpManager(Collection *collection); + OpenMpManager(const OpenMpManager &openMpManager); + OpenMpManager &operator =(const OpenMpManager &openMpManager); + static OpenMpManager &getInstance(); + + void getOpenMpEnvVars(); + void getCurrentCpuSet(); + void getDefaultCpuSet(cpu_set_t *defaultCpuSet); + void getCurrentCoreSet(); + + void selectAllCoreCpus(cpu_set_t *set, unsigned physicalCoreId); + unsigned getPhysicalCoreId(unsigned logicalCoreId); + + bool isThreadsBindAllowed(); + void setOpenMpThreadNumberLimit(); + void bindCurrentThreadToLogicalCoreCpu(unsigned logicalCoreId); + void bindCurrentThreadToLogicalCoreCpus(unsigned logicalCoreId); +}; + +#endif // _OPENMP + +} // namespace cpu + +} // namespace caffe + +#endif // CAFFE_UTIL_CPU_INFO_HPP diff --git a/include/caffe/util/cudnn.hpp b/include/caffe/util/cudnn.hpp index a7d8dbbad4c..f6a1f327c55 100644 --- a/include/caffe/util/cudnn.hpp +++ b/include/caffe/util/cudnn.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_CUDNN_H_ #define CAFFE_UTIL_CUDNN_H_ #ifdef USE_CUDNN diff --git a/include/caffe/util/db.hpp b/include/caffe/util/db.hpp index 59ec3d390ba..7dfc921fbbe 100644 --- a/include/caffe/util/db.hpp +++ b/include/caffe/util/db.hpp @@ -1,7 +1,45 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_DB_HPP #define CAFFE_UTIL_DB_HPP #include +#include #include "caffe/common.hpp" #include "caffe/proto/caffe.pb.h" @@ -18,6 +56,7 @@ class Cursor { virtual void Next() = 0; virtual string key() = 0; virtual string value() = 0; + virtual std::pair valuePointer() = 0; virtual bool valid() = 0; DISABLE_COPY_AND_ASSIGN(Cursor); diff --git a/include/caffe/util/db_leveldb.hpp b/include/caffe/util/db_leveldb.hpp index e9fa0d32b66..53deaebf82e 100644 --- a/include/caffe/util/db_leveldb.hpp +++ b/include/caffe/util/db_leveldb.hpp @@ -1,8 +1,46 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_LEVELDB #ifndef CAFFE_UTIL_DB_LEVELDB_HPP #define CAFFE_UTIL_DB_LEVELDB_HPP #include +#include #include "leveldb/db.h" #include "leveldb/write_batch.h" @@ -20,6 +58,10 @@ class LevelDBCursor : public Cursor { virtual void Next() { iter_->Next(); } virtual string key() { return iter_->key().ToString(); } virtual string value() { return iter_->value().ToString(); } + virtual std::pair valuePointer() { + CHECK(false) << "Function valuePointer not implemented in LevelDBCursor"; + return std::pair{}; + } virtual bool valid() { return iter_->Valid(); } private: diff --git a/include/caffe/util/db_lmdb.hpp b/include/caffe/util/db_lmdb.hpp index ee370322383..acb9da55f60 100644 --- a/include/caffe/util/db_lmdb.hpp +++ b/include/caffe/util/db_lmdb.hpp @@ -1,8 +1,46 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_LMDB #ifndef CAFFE_UTIL_DB_LMDB_HPP #define CAFFE_UTIL_DB_LMDB_HPP #include +#include #include #include "lmdb.h" @@ -34,6 +72,10 @@ class LMDBCursor : public Cursor { return string(static_cast(mdb_value_.mv_data), mdb_value_.mv_size); } + virtual std::pair valuePointer() { + return std::make_pair(mdb_value_.mv_data, mdb_value_.mv_size); + } + virtual bool valid() { return valid_; } private: diff --git a/include/caffe/util/device_alternate.hpp b/include/caffe/util/device_alternate.hpp index e3fe4fe29fd..bdf769b8049 100644 --- a/include/caffe/util/device_alternate.hpp +++ b/include/caffe/util/device_alternate.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_DEVICE_ALTERNATE_H_ #define CAFFE_UTIL_DEVICE_ALTERNATE_H_ diff --git a/include/caffe/util/float_compare.hpp b/include/caffe/util/float_compare.hpp new file mode 100644 index 00000000000..4db0fad637f --- /dev/null +++ b/include/caffe/util/float_compare.hpp @@ -0,0 +1,61 @@ +/* +All modification made by Intel Corporation: 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +* Neither the name of Intel Corporation nor the names of its contributors +may be used to endorse or promote products derived from this software +without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef FloatCompareH +#define FloatCompareH + +#include +#include + +namespace caffe { + float floatDiff(const float &a, const float &b, const float &epsilon) { + float diff = fabs(a - b); + if (!std::isfinite(diff)) { + static float nan_float = std::nanf(""); + return nan_float; + } + + if (diff <= epsilon || + fabs(boost::math::float_distance(a, b)) <= 1.f) { + return FP_ZERO; + } + + return diff; + } +} + +#endif // ifndef FloatCompareH \ No newline at end of file diff --git a/include/caffe/util/format.hpp b/include/caffe/util/format.hpp index 925ad2e0479..af581e515c4 100644 --- a/include/caffe/util/format.hpp +++ b/include/caffe/util/format.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_FORMAT_H_ #define CAFFE_UTIL_FORMAT_H_ diff --git a/include/caffe/util/hdf5.hpp b/include/caffe/util/hdf5.hpp index ce568c5eb0d..9c4140c199d 100644 --- a/include/caffe/util/hdf5.hpp +++ b/include/caffe/util/hdf5.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_HDF5_H_ #define CAFFE_UTIL_HDF5_H_ @@ -27,6 +64,12 @@ void hdf5_save_nd_dataset( int hdf5_load_int(hid_t loc_id, const string& dataset_name); void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i); + +template +Dtype hdf5_load_float(hid_t loc_id, const string& dataset_name); +template +void hdf5_save_float(hid_t loc_id, const string& dataset_name, Dtype f); + string hdf5_load_string(hid_t loc_id, const string& dataset_name); void hdf5_save_string(hid_t loc_id, const string& dataset_name, const string& s); diff --git a/include/caffe/util/im2col.hpp b/include/caffe/util/im2col.hpp old mode 100644 new mode 100755 index a35bc6e0b1c..1f2b567e3b3 --- a/include/caffe/util/im2col.hpp +++ b/include/caffe/util/im2col.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef _CAFFE_UTIL_IM2COL_HPP_ #define _CAFFE_UTIL_IM2COL_HPP_ @@ -17,6 +54,13 @@ void im2col_cpu(const Dtype* data_im, const int channels, Dtype* data_col); template +void im3d2col_cpu(const Dtype* data_im, const int channels, + const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, + const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, + const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, + Dtype* data_col); + +template void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, @@ -30,6 +74,13 @@ void col2im_cpu(const Dtype* data_col, const int channels, Dtype* data_im); template +void col2im3d_cpu(const Dtype* data_col, const int channels, + const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, + const int pad_d, const int pad_h, const int pad_w, const int stride_d, const int stride_h, + const int stride_w, const int dilation_d, const int dilation_h, const int dilation_w, + Dtype* data_im); + +template void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, const int col_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, diff --git a/include/caffe/util/im_transforms.hpp b/include/caffe/util/im_transforms.hpp new file mode 100644 index 00000000000..dd4fd705f52 --- /dev/null +++ b/include/caffe/util/im_transforms.hpp @@ -0,0 +1,127 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef IM_TRANSFORMS_HPP +#define IM_TRANSFORMS_HPP + +#ifdef USE_OPENCV +#include +#include +#endif // USE_OPENCV + +#include + +#include "caffe/common.hpp" +#include "caffe/proto/caffe.pb.h" + +namespace caffe { + +// Generate random number given the probablities for each number. +int roll_weighted_die(const std::vector& probabilities); + +void UpdateBBoxByResizePolicy(const ResizeParameter& param, + const int old_width, const int old_height, + NormalizedBBox* bbox); + +void InferNewSize(const ResizeParameter& resize_param, + const int old_width, const int old_height, + int* new_width, int* new_height); + +#ifdef USE_OPENCV +template +bool is_border(const cv::Mat& edge, T color); + +// Auto cropping image. +template +cv::Rect CropMask(const cv::Mat& src, T point, int padding = 2); + +cv::Mat colorReduce(const cv::Mat& image, int div = 64); + +void fillEdgeImage(const cv::Mat& edgesIn, cv::Mat* filledEdgesOut); + +void CenterObjectAndFillBg(const cv::Mat& in_img, const bool fill_bg, + cv::Mat* out_img); + +cv::Mat AspectKeepingResizeAndPad(const cv::Mat& in_img, + const int new_width, const int new_height, + const int pad_type = cv::BORDER_CONSTANT, + const cv::Scalar pad = cv::Scalar(0, 0, 0), + const int interp_mode = cv::INTER_LINEAR); + +cv::Mat AspectKeepingResizeBySmall(const cv::Mat& in_img, + const int new_width, const int new_height, + const int interp_mode = cv::INTER_LINEAR); + +void constantNoise(const int n, const vector& val, cv::Mat* image); + +cv::Mat ApplyResize(const cv::Mat& in_img, const ResizeParameter& param); + +cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param); + + +void RandomBrightness(const cv::Mat& in_img, cv::Mat* out_img, + const float brightness_prob, const float brightness_delta); + +void AdjustBrightness(const cv::Mat& in_img, const float delta, + cv::Mat* out_img); + +void RandomContrast(const cv::Mat& in_img, cv::Mat* out_img, + const float contrast_prob, const float lower, const float upper); + +void AdjustContrast(const cv::Mat& in_img, const float delta, + cv::Mat* out_img); + +void RandomSaturation(const cv::Mat& in_img, cv::Mat* out_img, + const float saturation_prob, const float lower, const float upper); + +void AdjustSaturation(const cv::Mat& in_img, const float delta, + cv::Mat* out_img); + +void RandomHue(const cv::Mat& in_img, cv::Mat* out_img, + const float hue_prob, const float hue_delta); + +void AdjustHue(const cv::Mat& in_img, const float delta, cv::Mat* out_img); + +void RandomOrderChannels(const cv::Mat& in_img, cv::Mat* out_img, + const float random_order_prob); + +cv::Mat ApplyDistort(const cv::Mat& in_img, const DistortionParameter& param); +#endif // USE_OPENCV + +} // namespace caffe + +#endif // IM_TRANSFORMS_HPP diff --git a/include/caffe/util/insert_splits.hpp b/include/caffe/util/insert_splits.hpp index 446abb817be..1b8c13d5f78 100644 --- a/include/caffe/util/insert_splits.hpp +++ b/include/caffe/util/insert_splits.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef _CAFFE_UTIL_INSERT_SPLITS_HPP_ #define _CAFFE_UTIL_INSERT_SPLITS_HPP_ diff --git a/include/caffe/util/io.hpp b/include/caffe/util/io.hpp index 1a599883ca3..2b66a207f5d 100644 --- a/include/caffe/util/io.hpp +++ b/include/caffe/util/io.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_IO_H_ #define CAFFE_UTIL_IO_H_ @@ -49,6 +86,38 @@ inline void MakeTempFilename(string* temp_filename) { (temp_files_subpath/caffe::format_int(next_temp_file++, 9)).string(); } +inline void GetTempDirname(string* temp_dirname) { + temp_dirname->clear(); + const path& model = + boost::filesystem::temp_directory_path()/"caffe_test.%%%%-%%%%"; + for ( int i = 0; i < CAFFE_TMP_DIR_RETRIES; i++ ) { + const path& dir = boost::filesystem::unique_path(model).string(); + bool done = boost::filesystem::create_directory(dir); + if ( done ) { + bool remove_done = boost::filesystem::remove(dir); + if (remove_done) { + *temp_dirname = dir.string(); + return; + } + LOG(FATAL) << "Failed to remove a temporary directory."; + } + } + LOG(FATAL) << "Failed to create a temporary directory."; +} + +inline void GetTempFilename(string* temp_filename) { + static path temp_files_subpath; + static uint64_t next_temp_file = 0; + temp_filename->clear(); + if ( temp_files_subpath.empty() ) { + string path_string=""; + GetTempDirname(&path_string); + temp_files_subpath = path_string; + } + *temp_filename = + (temp_files_subpath/caffe::format_int(next_temp_file++, 9)).string(); +} + bool ReadProtoFromTextFile(const char* filename, Message* proto); inline bool ReadProtoFromTextFile(const string& filename, Message* proto) { @@ -129,6 +198,73 @@ inline bool ReadImageToDatum(const string& filename, const int label, bool DecodeDatumNative(Datum* datum); bool DecodeDatum(Datum* datum, bool is_color); + +void GetImageSize(const string& filename, int* height, int* width); + +bool ReadRichImageToAnnotatedDatum(const string& filename, + const string& labelname, const int height, const int width, + const int min_dim, const int max_dim, const bool is_color, + const std::string& encoding, const AnnotatedDatum_AnnotationType type, + const string& labeltype, const std::map& name_to_label, + AnnotatedDatum* anno_datum); + +inline bool ReadRichImageToAnnotatedDatum(const string& filename, + const string& labelname, const int height, const int width, + const bool is_color, const std::string & encoding, + const AnnotatedDatum_AnnotationType type, const string& labeltype, + const std::map& name_to_label, AnnotatedDatum* anno_datum) { + return ReadRichImageToAnnotatedDatum(filename, labelname, height, width, 0, 0, + is_color, encoding, type, labeltype, name_to_label, + anno_datum); +} + +bool ReadXMLToAnnotatedDatum(const string& labelname, const int img_height, + const int img_width, const std::map& name_to_label, + AnnotatedDatum* anno_datum); + +bool ReadJSONToAnnotatedDatum(const string& labelname, const int img_height, + const int img_width, const std::map& name_to_label, + AnnotatedDatum* anno_datum); + +bool ReadTxtToAnnotatedDatum(const string& labelname, const int height, + const int width, AnnotatedDatum* anno_datum); + +bool ReadLabelFileToLabelMap(const string& filename, bool include_background, + const string& delimiter, LabelMap* map); + +inline bool ReadLabelFileToLabelMap(const string& filename, + bool include_background, LabelMap* map) { + return ReadLabelFileToLabelMap(filename, include_background, " ", map); +} + +inline bool ReadLabelFileToLabelMap(const string& filename, LabelMap* map) { + return ReadLabelFileToLabelMap(filename, true, map); +} + +bool MapNameToLabel(const LabelMap& map, const bool strict_check, + std::map* name_to_label); + +inline bool MapNameToLabel(const LabelMap& map, + std::map* name_to_label) { + return MapNameToLabel(map, true, name_to_label); +} + +bool MapLabelToName(const LabelMap& map, const bool strict_check, + std::map* label_to_name); + +inline bool MapLabelToName(const LabelMap& map, + std::map* label_to_name) { + return MapLabelToName(map, true, label_to_name); +} + +bool MapLabelToDisplayName(const LabelMap& map, const bool strict_check, + std::map* label_to_display_name); + +inline bool MapLabelToDisplayName(const LabelMap& map, + std::map* label_to_display_name) { + return MapLabelToDisplayName(map, true, label_to_display_name); +} + #ifdef USE_OPENCV cv::Mat ReadImageToCVMat(const string& filename, const int height, const int width, const bool is_color); @@ -144,6 +280,9 @@ cv::Mat ReadImageToCVMat(const string& filename); cv::Mat DecodeDatumToCVMatNative(const Datum& datum); cv::Mat DecodeDatumToCVMat(const Datum& datum, bool is_color); +void EncodeCVMatToDatum(const cv::Mat& cv_img, const string& encoding, + Datum* datum); + void CVMatToDatum(const cv::Mat& cv_img, Datum* datum); #endif // USE_OPENCV diff --git a/include/caffe/util/math_functions.hpp b/include/caffe/util/math_functions.hpp index 6f6d3feeae2..47328eced52 100644 --- a/include/caffe/util/math_functions.hpp +++ b/include/caffe/util/math_functions.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_MATH_FUNCTIONS_H_ #define CAFFE_UTIL_MATH_FUNCTIONS_H_ @@ -26,46 +63,49 @@ void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, const int N, Dtype* y); template -void caffe_axpy(const int N, const Dtype alpha, const Dtype* X, +void caffe_axpy(const long N, const Dtype alpha, const Dtype* X, Dtype* Y); template -void caffe_cpu_axpby(const int N, const Dtype alpha, const Dtype* X, +void caffe_cpu_axpby(const long N, const Dtype alpha, const Dtype* X, const Dtype beta, Dtype* Y); template -void caffe_copy(const int N, const Dtype *X, Dtype *Y); +void caffe_copy(const size_t N, const Dtype *X, Dtype *Y); + +template +void caffe_cpu_copy(const size_t N, const Dtype* X, Dtype* Y); template -void caffe_set(const int N, const Dtype alpha, Dtype *X); +void caffe_set(const size_t N, const Dtype alpha, Dtype *X); inline void caffe_memset(const size_t N, const int alpha, void* X) { memset(X, alpha, N); // NOLINT(caffe/alt_fn) } template -void caffe_add_scalar(const int N, const Dtype alpha, Dtype *X); +void caffe_add_scalar(const long N, const Dtype alpha, Dtype *X); template -void caffe_scal(const int N, const Dtype alpha, Dtype *X); +void caffe_scal(const long N, const Dtype alpha, Dtype *X); template -void caffe_sqr(const int N, const Dtype* a, Dtype* y); +void caffe_sqr(const long N, const Dtype* a, Dtype* y); template -void caffe_add(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_add(const long N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_sub(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_sub(const long N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_mul(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_mul(const long N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_div(const int N, const Dtype* a, const Dtype* b, Dtype* y); +void caffe_div(const long N, const Dtype* a, const Dtype* b, Dtype* y); template -void caffe_powx(const int n, const Dtype* a, const Dtype b, Dtype* y); +void caffe_powx(const long n, const Dtype* a, const Dtype b, Dtype* y); unsigned int caffe_rng_rand(); @@ -73,37 +113,37 @@ template Dtype caffe_nextafter(const Dtype b); template -void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r); +void caffe_rng_uniform(const long n, const Dtype a, const Dtype b, Dtype* r); template -void caffe_rng_gaussian(const int n, const Dtype mu, const Dtype sigma, +void caffe_rng_gaussian(const long n, const Dtype mu, const Dtype sigma, Dtype* r); template -void caffe_rng_bernoulli(const int n, const Dtype p, int* r); +void caffe_rng_bernoulli(const long n, const Dtype p, int* r); template -void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r); +void caffe_rng_bernoulli(const long n, const Dtype p, unsigned int* r); template -void caffe_exp(const int n, const Dtype* a, Dtype* y); +void caffe_exp(const long n, const Dtype* a, Dtype* y); template -void caffe_log(const int n, const Dtype* a, Dtype* y); +void caffe_log(const long n, const Dtype* a, Dtype* y); template -void caffe_abs(const int n, const Dtype* a, Dtype* y); +void caffe_abs(const long n, const Dtype* a, Dtype* y); template -Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y); +Dtype caffe_cpu_dot(const long n, const Dtype* x, const Dtype* y); template -Dtype caffe_cpu_strided_dot(const int n, const Dtype* x, const int incx, +Dtype caffe_cpu_strided_dot(const long n, const Dtype* x, const int incx, const Dtype* y, const int incy); // Returns the sum of the absolute values of the elements of vector x template -Dtype caffe_cpu_asum(const int n, const Dtype* x); +Dtype caffe_cpu_asum(const long n, const Dtype* x); // the branchless, type-safe version from // http://stackoverflow.com/questions/1903954/is-there-a-standard-sign-function-signum-sgn-in-c-c @@ -140,7 +180,7 @@ DEFINE_CAFFE_CPU_UNARY_FUNC(sgnbit, \ DEFINE_CAFFE_CPU_UNARY_FUNC(fabs, y[i] = std::fabs(x[i])); template -void caffe_cpu_scale(const int n, const Dtype alpha, const Dtype *x, Dtype* y); +void caffe_cpu_scale(const long n, const Dtype alpha, const Dtype *x, Dtype* y); #ifndef CPU_ONLY // GPU diff --git a/include/caffe/util/mkl_alternate.hpp b/include/caffe/util/mkl_alternate.hpp index 3355b6658a3..7a7d0e443d1 100644 --- a/include/caffe/util/mkl_alternate.hpp +++ b/include/caffe/util/mkl_alternate.hpp @@ -1,9 +1,46 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_MKL_ALTERNATE_H_ #define CAFFE_UTIL_MKL_ALTERNATE_H_ #ifdef USE_MKL -#include +#include #else // If use MKL, simply include the MKL header @@ -36,25 +73,6 @@ DEFINE_VSL_UNARY_FUNC(Exp, y[i] = exp(a[i])); DEFINE_VSL_UNARY_FUNC(Ln, y[i] = log(a[i])); DEFINE_VSL_UNARY_FUNC(Abs, y[i] = fabs(a[i])); -// A simple way to define the vsl unary functions with singular parameter b. -// The operation should be in the form e.g. y[i] = pow(a[i], b) -#define DEFINE_VSL_UNARY_FUNC_WITH_PARAM(name, operation) \ - template \ - void v##name(const int n, const Dtype* a, const Dtype b, Dtype* y) { \ - CHECK_GT(n, 0); CHECK(a); CHECK(y); \ - for (int i = 0; i < n; ++i) { operation; } \ - } \ - inline void vs##name( \ - const int n, const float* a, const float b, float* y) { \ - v##name(n, a, b, y); \ - } \ - inline void vd##name( \ - const int n, const double* a, const float b, double* y) { \ - v##name(n, a, b, y); \ - } - -DEFINE_VSL_UNARY_FUNC_WITH_PARAM(Powx, y[i] = pow(a[i], b)); - // A simple way to define the vsl binary functions. The operation should // be in the form e.g. y[i] = a[i] + b[i] #define DEFINE_VSL_BINARY_FUNC(name, operation) \ @@ -77,6 +95,20 @@ DEFINE_VSL_BINARY_FUNC(Sub, y[i] = a[i] - b[i]); DEFINE_VSL_BINARY_FUNC(Mul, y[i] = a[i] * b[i]); DEFINE_VSL_BINARY_FUNC(Div, y[i] = a[i] / b[i]); +// Power function +template +inline void vPowx(const int n, const Dtype* a, Dtype b, Dtype* y) { +#ifdef _OPENMP + #pragma omp parallel for +#endif + for (int i = 0; i < n; ++i) { + y[i] = std::pow(a[i], b); + } +} + +#define vsPowx vPowx +#define vdPowx vPowx + // In addition, MKL comes with an additional function axpby that is not present // in standard blas. We will simply use a two-step (inefficient, of course) way // to mimic that. diff --git a/include/caffe/util/performance.hpp b/include/caffe/util/performance.hpp new file mode 100644 index 00000000000..9b9bd065787 --- /dev/null +++ b/include/caffe/util/performance.hpp @@ -0,0 +1,635 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef PerformanceH +#define PerformanceH + +#ifdef PERFORMANCE_MONITORING + +#define PERFORMANCE_EVENT_ID_UNSET (-1) + +#define PERFORMANCE_EVENT_ID_DECL(id_name) \ + int id_name + +#define PERFORMANCE_EVENT_ID_RESET(id_name) \ + id_name = PERFORMANCE_EVENT_ID_UNSET + +#define PERFORMANCE_EVENT_ID_INIT(id_name, event_name) \ + if ((id_name) == PERFORMANCE_EVENT_ID_UNSET) \ + id_name = performance::monitor.GetEventIdByName(event_name) + +#define PERFORMANCE_MEASUREMENT_BEGIN() \ + performance::Measurement m_MACRO; \ + m_MACRO.Start(); + +#define PERFORMANCE_MEASUREMENT_END(name) \ + m_MACRO.Stop(); \ + int id_MACRO = performance::monitor.GetEventIdByName(name); \ + performance::monitor.UpdateEventById(id_MACRO, m_MACRO); + +#define PERFORMANCE_MEASUREMENT_END_STATIC(name) \ + m_MACRO.Stop(); \ + static int id_MACRO = performance::monitor.GetEventIdByName(name); \ + performance::monitor.UpdateEventById(id_MACRO, m_MACRO); + +#define PERFORMANCE_MEASUREMENT_END_ID(id_name) \ + m_MACRO.Stop(); \ + performance::monitor.UpdateEventById(id_name, m_MACRO); + +#define PERFORMANCE_CREATE_MONITOR() \ + namespace performance { \ + Monitor monitor; }; + +#define PERFORMANCE_INIT_MONITOR() \ + performance::monitor.EnableMeasurements(); \ + performance::monitor.MarkAsInitialized(); + +#define PERFORMANCE_MEASUREMENT_END_MKL(prefix) \ + do { \ + static char name[256]; \ + snprintf(name, sizeof(name), "%s_mkl_%s", prefix, \ + this->layer_param_.name().c_str()); \ + PERFORMANCE_MEASUREMENT_END(name); \ + } while(0) + +#define PERFORMANCE_MEASUREMENT_END_MKL_DETAILED(prefix, suffix) \ + do { \ + static char name[256]; \ + snprintf(name, sizeof(name), "%s_mkl_%s%s", prefix, \ + this->layer_param_.name().c_str(), suffix); \ + PERFORMANCE_MEASUREMENT_END(name); \ + } while(0) + +#define PERFORMANCE_MKL_NAME_DETAILED(prefix, suffix) \ + (std::string(prefix) + "_mkl_" + this->layer_param_.name() \ + + std::string(suffix)).c_str() + +#define PERFORMANCE_MKL_NAME(prefix) \ + (std::string(prefix) + "_mkl_" + this->layer_param_.name()).c_str() + +#define PERFORMANCE_MKLDNN_NAME_DETAILED(prefix, suffix) \ + (std::string(prefix) + "_mkldnn_" + this->layer_param_.name() \ + + std::string(suffix)).c_str() + +#define PERFORMANCE_MKLDNN_NAME(prefix) \ + (std::string(prefix) + "_mkldnn_" + this->layer_param_.name()).c_str() + +#else +#define PERFORMANCE_EVENT_ID_DECL(id_name) +#define PERFORMANCE_EVENT_ID_RESET(id_name) +#define PERFORMANCE_EVENT_ID_INIT(id_name, event_name) +#define PERFORMANCE_MEASUREMENT_BEGIN() +#define PERFORMANCE_MEASUREMENT_END(name) +#define PERFORMANCE_MEASUREMENT_END_STATIC(name) +#define PERFORMANCE_MEASUREMENT_END_ID(id_name) +#define PERFORMANCE_CREATE_MONITOR() +#define PERFORMANCE_INIT_MONITOR() +#define PERFORMANCE_MEASUREMENT_END_MKL(prefix) +#define PERFORMANCE_MEASUREMENT_END_MKL_DETAILED(prefix, suffix) +#define PERFORMANCE_MKL_NAME_DETAILED(prefix, suffix) +#define PERFORMANCE_MKL_NAME(prefix) +#define PERFORMANCE_MKLDNN_NAME_DETAILED(prefix, suffix) +#define PERFORMANCE_MKLDNN_NAME(prefix) +#endif + +#ifdef PERFORMANCE_MONITORING + +#include +#include +#include +#ifdef PERFORMANCE_MONITORING_USE_TSC +#include +#endif +#include +#include +#include +#include + +namespace performance { + + class PreciseTime { + static const uint64_t clocks_per_second_ = 1000000000; + + uint64_t time_stamp_; + +#ifdef PERFORMANCE_MONITORING_USE_TSC + static double GetTSCFreq() { + static double tsc_freq = 0; + if (!tsc_freq) { + // Calibrate the frequency + const int usleep_one_second = 1000000; + uint64_t tsc0 = GetTSC(); + usleep(usleep_one_second); + uint64_t tsc1 = GetTSC(); + uint64_t tsc_diff = tsc1 - tsc0; + tsc_freq = (double)tsc_diff / clocks_per_second_; + } + return tsc_freq; + } + + static uint64_t GetTSC() { + uint32_t lo, hi; + __asm__ volatile("rdtscp" : "=a"(lo), "=d"(hi) : : "%ecx"); + return (uint64_t)lo | ((uint64_t)hi << 32); + } + + static PreciseTime GetTimeStamp(clockid_t clock_id) { + return PreciseTime((uint64_t)(GetTSC() / GetTSCFreq())); + } +#else + static PreciseTime GetTimeStamp(clockid_t clock_id) { + timespec current_time; + clock_gettime(clock_id, ¤t_time); + + return PreciseTime(clocks_per_second_ * ((uint64_t)current_time.tv_sec) + + ((uint64_t)current_time.tv_nsec)); + } +#endif + + public: + PreciseTime() { + } + + static void Calibrate() { +#ifdef PERFORMANCE_MONITORING_USE_TSC + GetTSCFreq(); +#endif + } + + explicit PreciseTime(uint64_t time_stamp) : time_stamp_(time_stamp) { + } + + operator uint64_t() const { + return time_stamp_; + } + + PreciseTime& operator=(const uint64_t& time) { + this->time_stamp_ = time; + return *this; + } + + friend PreciseTime operator+(PreciseTime lhs, const PreciseTime& rhs) { + lhs.time_stamp_ += rhs.time_stamp_; + return lhs; + } + + friend PreciseTime operator-(PreciseTime lhs, const PreciseTime& rhs) { + lhs.time_stamp_ -= rhs.time_stamp_; + return lhs; + } + + static PreciseTime GetClocksPerSecond() { + return PreciseTime(clocks_per_second_); + } + + static PreciseTime GetMonotonicTime() { + return GetTimeStamp(CLOCK_MONOTONIC); + } + + static PreciseTime GetProcessTime() { + return GetTimeStamp(CLOCK_THREAD_CPUTIME_ID); + } + }; + + class Measurement { + PreciseTime process_accumulator_; + PreciseTime process_time_stamp_; + PreciseTime monotonic_accumulator_; + PreciseTime monotonic_time_stamp_; + Measurement* next_; + + static Measurement*& GetStack() { + static Measurement* stack = NULL; + return stack; + } + + void Suspend() { + process_accumulator_ = process_accumulator_ + + PreciseTime::GetProcessTime() - process_time_stamp_; + monotonic_accumulator_ = monotonic_accumulator_ + + PreciseTime::GetMonotonicTime() - monotonic_time_stamp_; + } + + void Resume() { + monotonic_time_stamp_ = PreciseTime::GetMonotonicTime(); + process_time_stamp_ = PreciseTime::GetProcessTime(); + } + + public: + Measurement() { + } + + void Start() { + static Measurement*& stack = GetStack(); + + if (stack) + stack->Suspend(); + + next_ = stack; + stack = this; + + monotonic_accumulator_ = 0; + process_accumulator_ = 0; + monotonic_time_stamp_ = PreciseTime::GetMonotonicTime(); + process_time_stamp_ = PreciseTime::GetProcessTime(); + } + + void Stop() { + process_accumulator_ = process_accumulator_ + + PreciseTime::GetProcessTime() - process_time_stamp_; + monotonic_accumulator_ = monotonic_accumulator_ + + PreciseTime::GetMonotonicTime() - monotonic_time_stamp_; + + static Measurement*& stack = GetStack(); + + stack = next_; + + if (stack) + stack->Resume(); + } + + const PreciseTime &GetProcessTimeStamp() const { + return process_accumulator_; + } + + const PreciseTime &GetMonotonicTimeStamp() const { + return monotonic_accumulator_; + } + }; + + class Event { + unsigned number_of_calls_; + PreciseTime total_process_time_; + PreciseTime minimal_process_time_; + PreciseTime maximal_process_time_; + + PreciseTime total_monotonic_time_; + PreciseTime minimal_monotonic_time_; + PreciseTime maximal_monotonic_time_; + + public: + Event() : number_of_calls_(0), + total_process_time_(0), + minimal_process_time_(0), + maximal_process_time_(0), + total_monotonic_time_(0), + minimal_monotonic_time_(0), + maximal_monotonic_time_(0) { + } + + void Update(const Measurement &measurement) { + const PreciseTime &process_time_stamp = measurement.GetProcessTimeStamp(); + const PreciseTime &monotonic_time_stamp = + measurement.GetMonotonicTimeStamp(); + + total_process_time_ = total_process_time_ + process_time_stamp; + total_monotonic_time_ = total_monotonic_time_ + monotonic_time_stamp; + + if (minimal_process_time_ > process_time_stamp || !number_of_calls_) + minimal_process_time_ = process_time_stamp; + + if (maximal_process_time_ < process_time_stamp || !number_of_calls_) + maximal_process_time_ = process_time_stamp; + + if (minimal_monotonic_time_ > monotonic_time_stamp || !number_of_calls_) + minimal_monotonic_time_ = monotonic_time_stamp; + + if (maximal_monotonic_time_ < monotonic_time_stamp || !number_of_calls_) + maximal_monotonic_time_ = monotonic_time_stamp; + + number_of_calls_++; + } + + PreciseTime GetTotalProcessTime() const { + return total_process_time_; + } + + PreciseTime GetAverageMonotonicTime() const { + return number_of_calls_ + ? PreciseTime(total_monotonic_time_ / number_of_calls_) + : PreciseTime(0); + } + + PreciseTime GetMinimalMonotonicTime() const { + return minimal_monotonic_time_; + } + + PreciseTime GetMaximalMonotonicTime() const { + return maximal_monotonic_time_; + } + + PreciseTime GetAverageProcessTime() const { + return number_of_calls_ + ? PreciseTime(total_process_time_ / number_of_calls_) + : PreciseTime(0); + } + + PreciseTime GetMinimalProcessTime() const { + return minimal_process_time_; + } + + PreciseTime GetMaximalProcessTime() const { + return maximal_process_time_; + } + + PreciseTime GetNumberOfCalls() const { + return PreciseTime(number_of_calls_); + } + }; + + class Log { + public: + static void WriteLine() { + printf("\n"); + } + + static void WriteLine(const char* string) { + printf("%31s\n", string); + } + + static void WriteHeaders() { + printf("%10s %16s %16s %16s %16s %16s %16s : %s\n\n", + "Calls", + "Avg(total)", "Min(total)", "Max(total)", + "Avg(proc)", "Min(proc)", "Max(proc)", + "Layer"); + } + + static void WriteNoSpacing(const char* string, const PreciseTime& time) { + printf("%18lu : %s\n", (uint64_t)time, string); + } + + static void Write(const char* string, const PreciseTime& time) { + printf("%18lu %10c %s\n", (uint64_t)time, ':', string); + } + + static void Write(const char* string, const PreciseTime& time, + double percentage) { + printf("%18lu %6.2f %% : %s\n", (uint64_t)time, percentage, string); + } + + static void Write(const char *string, const Event &event) { + printf("%10lu %16lu %16lu %16lu %16lu %16lu %16lu : %s \n", + (uint64_t)event.GetNumberOfCalls(), + (uint64_t)event.GetAverageMonotonicTime(), + (uint64_t)event.GetMinimalMonotonicTime(), + (uint64_t)event.GetMaximalMonotonicTime(), + (uint64_t)event.GetAverageProcessTime(), + (uint64_t)event.GetMinimalProcessTime(), + (uint64_t)event.GetMaximalProcessTime(), + string); + } + }; + + class Monitor { + typedef std::vector NameVector; + typedef std::vector EventVector; + typedef std::pair Pair; + typedef std::map Map; + typedef Map::iterator Iterator; + typedef std::pair Status; + + EventVector events_; + Map event_name_id_map_; + + bool are_measurements_enabled_; + + NameVector event_names_; + PreciseTime total_non_mkl_time_; + PreciseTime total_mkl_time_; + PreciseTime total_mkl_conversions_time_; + PreciseTime total_data_layer_time_; + PreciseTime total_weights_update_time_; + PreciseTime total_monotonic_time_; + PreciseTime total_init_time_; + PreciseTime total_process_time_; + + void DumpStatistics() { + if (events_.size()) + DumpEventsLog(); + + DumpGeneralLog(); + } + + void DumpEventsLog() { + ObtainEventNames(); + ObtainTotalMklConversionTime(); + ObtainTotalWeightsUpdateTime(); + ObtainTotalDataLayerTime(); + ObtainTotalMklTime(); + + Log::WriteLine(); + Log::WriteLine("Detailed event information"); + Log::WriteLine(); + Log::WriteHeaders(); + DumpDetailedEventInformation(); + } + + void DumpGeneralLog() { + Log::WriteLine(); + Log::WriteLine(); + Log::WriteLine("Total event execution time"); + Log::WriteLine(); + DumpEventTimings(); + + Log::WriteLine(); + Log::WriteLine(); + Log::WriteLine("Summarized information"); + Log::WriteLine(); + DumpGeneralTimings(); + + Log::WriteLine(); + } + + void DumpGeneralTimings() { + const PreciseTime framework_time = total_process_time_ - + total_non_mkl_time_ - total_mkl_time_ - + total_mkl_conversions_time_ - total_data_layer_time_ - + total_weights_update_time_ - total_init_time_; + const PreciseTime system_time = total_monotonic_time_ - + total_process_time_; + + Log::Write("Data layer", total_data_layer_time_, + GetTimePercentage(total_data_layer_time_)); + Log::Write("Weight update", total_weights_update_time_, + GetTimePercentage(total_weights_update_time_)); + Log::Write("Non-MKL(DNN) events", total_non_mkl_time_, + GetTimePercentage(total_non_mkl_time_)); + Log::Write("MKL(DNN) conversions", total_mkl_conversions_time_, + GetTimePercentage(total_mkl_conversions_time_)); + Log::Write("MKL(DNN) events", total_mkl_time_, + GetTimePercentage(total_mkl_time_)); + Log::Write("Framework", framework_time, + GetTimePercentage(framework_time)); + Log::Write("System", system_time, GetTimePercentage(system_time)); + Log::Write("Initialization", total_init_time_); + Log::Write("Process", total_process_time_); + Log::Write("Total", total_monotonic_time_); + } + + void DumpEventTimings() { + for (unsigned i = 0; i < events_.size(); i++) { + Log::WriteNoSpacing(event_names_[i].c_str(), + events_[i].GetTotalProcessTime()); + } + } + + void DumpDetailedEventInformation() { + for (unsigned i = 0; i < events_.size(); i++) { + Log::Write(event_names_[i].c_str(), events_[i]); + } + } + + void ObtainTotalMklTime() { + total_non_mkl_time_ = 0; + total_mkl_time_ = 0; + + Iterator iterator = event_name_id_map_.begin(); + for (; iterator != event_name_id_map_.end(); iterator++) { + if (iterator->first.find("mkl") != std::string::npos) + total_mkl_time_ = total_mkl_time_ + + events_[iterator->second].GetTotalProcessTime(); + else + total_non_mkl_time_ = total_non_mkl_time_ + + events_[iterator->second].GetTotalProcessTime(); + } + total_non_mkl_time_ = total_non_mkl_time_ - + total_weights_update_time_ - total_data_layer_time_; + total_mkl_time_ = total_mkl_time_ - total_mkl_conversions_time_; + } + + void ObtainTotalMklConversionTime() { + if (event_name_id_map_.count("mkl_conversion") > 0) { + unsigned mkl_conv_id = event_name_id_map_["mkl_conversion"]; + total_mkl_conversions_time_ = + events_[mkl_conv_id].GetTotalProcessTime(); + } else if (event_name_id_map_.count("mkldnn_conversion") > 0) { + unsigned mkldnn_conv_id = event_name_id_map_["mkldnn_conversion"]; + total_mkl_conversions_time_ = + events_[mkldnn_conv_id].GetTotalProcessTime(); + } else { + total_mkl_conversions_time_ = 0; + } + } + + void ObtainTotalDataLayerTime() { + for (unsigned i = 0; i < event_names_.size(); i++) { + if (event_names_[i].find("W_") != std::string::npos) { + total_data_layer_time_ = events_[i].GetTotalProcessTime(); + break; + } + } + } + + void ObtainTotalWeightsUpdateTime() { + if (event_name_id_map_.count("weights_update") > 0) { + unsigned weights_update_id = event_name_id_map_["weights_update"]; + total_weights_update_time_ = + events_[weights_update_id].GetTotalProcessTime(); + } else { + total_weights_update_time_ = 0; + } + } + + void ObtainEventNames() { + event_names_.resize(event_name_id_map_.size()); + + Iterator iterator = event_name_id_map_.begin(); + for (; iterator != event_name_id_map_.end(); iterator++) + event_names_[iterator->second] = iterator->first; + } + + double GetTimePercentage(const PreciseTime& time) { + return (100.0 * static_cast(time)) / + static_cast(total_monotonic_time_ - total_init_time_); + } + + public: + Monitor() { + events_.reserve(64); + + PreciseTime::Calibrate(); + + are_measurements_enabled_ = false; + + total_monotonic_time_ = PreciseTime::GetMonotonicTime(); + total_process_time_ = PreciseTime::GetProcessTime(); + total_init_time_ = 0; + } + + ~Monitor() { + total_process_time_ = PreciseTime::GetProcessTime() - total_process_time_; + total_monotonic_time_ = PreciseTime::GetMonotonicTime() - + total_monotonic_time_; + + if (are_measurements_enabled_) + DumpStatistics(); + } + + void EnableMeasurements() { + are_measurements_enabled_ = true; + } + + void MarkAsInitialized() { + total_init_time_ = PreciseTime::GetProcessTime() - total_process_time_; + } + + unsigned GetEventIdByName(const char *event_name) { + if (!are_measurements_enabled_) + return PERFORMANCE_EVENT_ID_UNSET; + + Pair pair(event_name, events_.size()); + Status status = event_name_id_map_.insert(pair); + + // If insertion succeeded + if (status.second) + events_.push_back(Event()); + + return status.first->second; + } + + void UpdateEventById(unsigned event_id, const Measurement &measurement) { + if (are_measurements_enabled_) + events_[event_id].Update(measurement); + } + }; + + extern Monitor monitor; + +} // namespace performance + +#endif // ifdef PERFORMANCE_MONITORING +#endif // ifndef PerformanceH diff --git a/include/caffe/util/remove_batch_norm.hpp b/include/caffe/util/remove_batch_norm.hpp new file mode 100644 index 00000000000..316a4c0222f --- /dev/null +++ b/include/caffe/util/remove_batch_norm.hpp @@ -0,0 +1,75 @@ + +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifndef COMPILE_NET_UTIL_HPP_ +#define COMPILE_NET_UTIL_HPP_ +#include "caffe/proto/caffe.pb.h" + +namespace caffe { +/** + * @brief If CompileNet's compilation rule one does work, some scale layer's weights and bias blobs + * may be merged into batch norm layer. RecoverScaleFromBN will recover the merged scale layer's info. + * Currently, we only care about the weights and bias info. + */ +template +void RecoverScaleFromBN(const LayerParameter& bn_layer_param, LayerParameter& scale_layer_param, Dtype default_scale_weights, Dtype default_scale_bias); +/** + * @brief rename layer1's top to layer2's + */ +void MergeLayer(LayerParameter &layer1, const LayerParameter &layer2); + +/** + * @brief After removing the batch norm and scale layer after a convolution layer, to make the inference + * result correct, we must adjust convolution layer's weights and bias blobs + */ + +template +void AdjustConvLayer(LayerParameter &conv_layer, + const LayerParameter &batch_norm_layer, + const LayerParameter &scale_layer, bool is_net_init); + +/** + * @brief The batch norm and scale layer may be merged due to compilation rule one's effect, RecoverBNScaleMergedNet + * is used to recover the scale layer + */ +template +void RecoverBNScaleMergedNet(NetParameter * net_param, NetParameter* recovered_net_param); + +template +void RemoveBNScale(const NetParameter& param, NetParameter* param_compiled); +} +#endif diff --git a/include/caffe/util/rng.hpp b/include/caffe/util/rng.hpp index 8f1cf0d17c2..255c8d605dc 100644 --- a/include/caffe/util/rng.hpp +++ b/include/caffe/util/rng.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_RNG_CPP_HPP_ #define CAFFE_RNG_CPP_HPP_ diff --git a/include/caffe/util/sampler.hpp b/include/caffe/util/sampler.hpp new file mode 100644 index 00000000000..8d02f35cfff --- /dev/null +++ b/include/caffe/util/sampler.hpp @@ -0,0 +1,76 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CAFFE_UTIL_SAMPLER_H_ +#define CAFFE_UTIL_SAMPLER_H_ + +#include + +#include "glog/logging.h" + +#include "caffe/caffe.hpp" + +namespace caffe { + +// Find all annotated NormalizedBBox. +void GroupObjectBBoxes(const AnnotatedDatum& anno_datum, + vector* object_bboxes); + +// Check if a sampled bbox satisfy the constraints with all object bboxes. +bool SatisfySampleConstraint(const NormalizedBBox& sampled_bbox, + const vector& object_bboxes, + const SampleConstraint& sample_constraint); + +// Sample a NormalizedBBox given the specifictions. +void SampleBBox(const Sampler& sampler, NormalizedBBox* sampled_bbox); + +// Generate samples from NormalizedBBox using the BatchSampler. +void GenerateSamples(const NormalizedBBox& source_bbox, + const vector& object_bboxes, + const BatchSampler& batch_sampler, + vector* sampled_bboxes); + +// Generate samples from AnnotatedDatum using the BatchSampler. +// All sampled bboxes which satisfy the constraints defined in BatchSampler +// is stored in sampled_bboxes. +void GenerateBatchSamples(const AnnotatedDatum& anno_datum, + const vector& batch_samplers, + vector* sampled_bboxes); + +} // namespace caffe + +#endif // CAFFE_UTIL_SAMPLER_H_ diff --git a/include/caffe/util/signal_handler.h b/include/caffe/util/signal_handler.h index fb84c65bd2e..a1e37723d1f 100644 --- a/include/caffe/util/signal_handler.h +++ b/include/caffe/util/signal_handler.h @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ #define INCLUDE_CAFFE_UTIL_SIGNAL_HANDLER_H_ diff --git a/include/caffe/util/upgrade_proto.hpp b/include/caffe/util/upgrade_proto.hpp index 14e1936a8c2..138bf32fab6 100644 --- a/include/caffe/util/upgrade_proto.hpp +++ b/include/caffe/util/upgrade_proto.hpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CAFFE_UTIL_UPGRADE_PROTO_H_ #define CAFFE_UTIL_UPGRADE_PROTO_H_ @@ -77,6 +114,11 @@ bool UpgradeSolverAsNeeded(const string& param_file, SolverParameter* param); void ReadSolverParamsFromTextFileOrDie(const string& param_file, SolverParameter* param); +#ifdef USE_MLSL +void ReplaceMultinodeSolverParams(SolverParameter* param); + +void ReplaceMultinodeNetParams(NetParameter* sparam); +#endif } // namespace caffe #endif // CAFFE_UTIL_UPGRADE_PROTO_H_ diff --git a/include/mkl_dnn_cppwrapper.h b/include/mkl_dnn_cppwrapper.h new file mode 100644 index 00000000000..8010137186a --- /dev/null +++ b/include/mkl_dnn_cppwrapper.h @@ -0,0 +1,827 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + + #ifndef _MKL_DNN_CPPWRAPPER_H + #define _MKL_DNN_CPPWRAPPER_H + + #include + #include + + #include "mkl_dnn_types.h" + #include "mkl_dnn.h" + #include "mkl_version.h" + + #define TEMPLATE_PREFIX template inline + #define SPEC_PREFIX template <> inline + + #if (__INTEL_MKL__ < 2017) || (__INTEL_MKL_BUILD_DATE <= 20160311) + #error: To use the new MKL DNN API, you must install Intel(R) MKL 2017 Beta Update 1 or higher. + #endif + + + + TEMPLATE_PREFIX dnnError_t dnnLayoutCreate( + dnnLayout_t *pLayout, size_t dimension, const size_t size[], const size_t strides[]); + SPEC_PREFIX dnnError_t dnnLayoutCreate( + dnnLayout_t *pLayout, size_t dimension, const size_t size[], const size_t strides[]) + {return dnnLayoutCreate_F32(pLayout, dimension, size, strides);} + SPEC_PREFIX dnnError_t dnnLayoutCreate( + dnnLayout_t *pLayout, size_t dimension, const size_t size[], const size_t strides[]) + {return dnnLayoutCreate_F64(pLayout, dimension, size, strides);} + + TEMPLATE_PREFIX dnnError_t dnnLayoutCreateFromPrimitive( + dnnLayout_t *pLayout, const dnnPrimitive_t primitive, dnnResourceType_t type); + SPEC_PREFIX dnnError_t dnnLayoutCreateFromPrimitive( + dnnLayout_t *pLayout, const dnnPrimitive_t primitive, dnnResourceType_t type) + {return dnnLayoutCreateFromPrimitive_F32(pLayout, primitive, type);} + SPEC_PREFIX dnnError_t dnnLayoutCreateFromPrimitive( + dnnLayout_t *pLayout, const dnnPrimitive_t primitive, dnnResourceType_t type) + {return dnnLayoutCreateFromPrimitive_F64(pLayout, primitive, type);} + + TEMPLATE_PREFIX size_t dnnLayoutGetMemorySize( + const dnnLayout_t layout); + SPEC_PREFIX size_t dnnLayoutGetMemorySize( + const dnnLayout_t layout) + {return dnnLayoutGetMemorySize_F32(layout);} + SPEC_PREFIX size_t dnnLayoutGetMemorySize( + const dnnLayout_t layout) + {return dnnLayoutGetMemorySize_F64(layout);} + + TEMPLATE_PREFIX int dnnLayoutCompare( + const dnnLayout_t l1, const dnnLayout_t l2); + SPEC_PREFIX int dnnLayoutCompare( + const dnnLayout_t l1, const dnnLayout_t l2) + {return dnnLayoutCompare_F32(l1, l2);} + SPEC_PREFIX int dnnLayoutCompare( + const dnnLayout_t l1, const dnnLayout_t l2) + {return dnnLayoutCompare_F64(l1, l2);} + + + TEMPLATE_PREFIX dnnError_t dnnAllocateBuffer( + void **pPtr, dnnLayout_t layout); + SPEC_PREFIX dnnError_t dnnAllocateBuffer( + void **pPtr, dnnLayout_t layout) + {return dnnAllocateBuffer_F32(pPtr, layout);} + SPEC_PREFIX dnnError_t dnnAllocateBuffer( + void **pPtr, dnnLayout_t layout) + {return dnnAllocateBuffer_F64(pPtr, layout);} + + TEMPLATE_PREFIX dnnError_t dnnReleaseBuffer( + void* ptr); + SPEC_PREFIX dnnError_t dnnReleaseBuffer( + void* ptr) { + dnnError_t status = E_SUCCESS; + if( ptr != NULL) { + status = dnnReleaseBuffer_F32(ptr); + } + return status; + } + SPEC_PREFIX dnnError_t dnnReleaseBuffer( + void* ptr) { + dnnError_t status = E_SUCCESS; + if( ptr != NULL) { + status = dnnReleaseBuffer_F64(ptr); + } + return status; + } + + TEMPLATE_PREFIX dnnError_t dnnLayoutDelete( + dnnLayout_t& layout); + SPEC_PREFIX dnnError_t dnnLayoutDelete( + dnnLayout_t& layout) { + dnnError_t status = E_SUCCESS; + if( layout != NULL) { + status = dnnLayoutDelete_F32(layout); + layout = NULL; + } + return status; + } + SPEC_PREFIX dnnError_t dnnLayoutDelete( + dnnLayout_t& layout) { + dnnError_t status = E_SUCCESS; + if( layout != NULL) { + status = dnnLayoutDelete_F64(layout); + layout = NULL; + } + return status; + } + +TEMPLATE_PREFIX dnnError_t dnnPrimitiveAttributesCreate( + dnnPrimitiveAttributes_t *attributes); +SPEC_PREFIX dnnError_t dnnPrimitiveAttributesCreate( + dnnPrimitiveAttributes_t *attributes) + {return dnnPrimitiveAttributesCreate_F32 (attributes);} +SPEC_PREFIX dnnError_t dnnPrimitiveAttributesCreate( + dnnPrimitiveAttributes_t *attributes) + {return dnnPrimitiveAttributesCreate_F64 (attributes);} + + +TEMPLATE_PREFIX dnnError_t dnnPrimitiveAttributesDestroy( + dnnPrimitiveAttributes_t attributes); +SPEC_PREFIX dnnError_t dnnPrimitiveAttributesDestroy( + dnnPrimitiveAttributes_t attributes) + {return dnnPrimitiveAttributesDestroy_F32(attributes);} +SPEC_PREFIX dnnError_t dnnPrimitiveAttributesDestroy( + dnnPrimitiveAttributes_t attributes) + {return dnnPrimitiveAttributesDestroy_F64(attributes);} + +TEMPLATE_PREFIX dnnError_t dnnPrimitiveGetAttributes( + dnnPrimitive_t primitive, + dnnPrimitiveAttributes_t *attributes); +SPEC_PREFIX dnnError_t dnnPrimitiveGetAttributes( + dnnPrimitive_t primitive, + dnnPrimitiveAttributes_t *attributes) + {return dnnPrimitiveGetAttributes_F32(primitive, attributes);} +SPEC_PREFIX dnnError_t dnnPrimitiveGetAttributes( + dnnPrimitive_t primitive, + dnnPrimitiveAttributes_t *attributes) + {return dnnPrimitiveGetAttributes_F64(primitive, attributes);} + +TEMPLATE_PREFIX dnnError_t dnnExecute( + dnnPrimitive_t primitive, void *resources[]); +SPEC_PREFIX dnnError_t dnnExecute( + dnnPrimitive_t primitive, void *resources[]) + {return dnnExecute_F32(primitive, resources);} +SPEC_PREFIX dnnError_t dnnExecute( + dnnPrimitive_t primitive, void *resources[]) + {return dnnExecute_F64(primitive, resources);} + +TEMPLATE_PREFIX dnnError_t dnnExecuteAsync( + dnnPrimitive_t primitive, void *resources[]); +SPEC_PREFIX dnnError_t dnnExecuteAsync( + dnnPrimitive_t primitive, void *resources[]) + {return dnnExecuteAsync_F32(primitive, resources);} +SPEC_PREFIX dnnError_t dnnExecuteAsync( + dnnPrimitive_t primitive, void *resources[]) + {return dnnExecuteAsync_F64(primitive, resources);} + +TEMPLATE_PREFIX dnnError_t dnnWaitFor( + dnnPrimitive_t primitive); +SPEC_PREFIX dnnError_t dnnWaitFor( + dnnPrimitive_t primitive) + {return dnnWaitFor_F32(primitive);} +SPEC_PREFIX dnnError_t dnnWaitFor( + dnnPrimitive_t primitive) + {return dnnWaitFor_F64(primitive);} + +TEMPLATE_PREFIX dnnError_t dnnDelete( + dnnPrimitive_t& primitive); +SPEC_PREFIX dnnError_t dnnDelete( + dnnPrimitive_t& primitive) { + dnnError_t status = E_SUCCESS; + if (primitive != NULL) { + status = dnnDelete_F32(primitive); + primitive = NULL; + } + return status; +} +SPEC_PREFIX dnnError_t dnnDelete( + dnnPrimitive_t& primitive) { + dnnError_t status = E_SUCCESS; + if (primitive != NULL) { + status = dnnDelete_F64(primitive); + primitive = NULL; + } + return status; +} + +TEMPLATE_PREFIX dnnError_t dnnConversionCreate( + dnnPrimitive_t* pConversion, const dnnLayout_t from, const dnnLayout_t to); +SPEC_PREFIX dnnError_t dnnConversionCreate( + dnnPrimitive_t* pConversion, const dnnLayout_t from, const dnnLayout_t to) + {return dnnConversionCreate_F32(pConversion, from, to);} +SPEC_PREFIX dnnError_t dnnConversionCreate( + dnnPrimitive_t* pConversion, const dnnLayout_t from, const dnnLayout_t to) + {return dnnConversionCreate_F64(pConversion, from, to);} + + +TEMPLATE_PREFIX dnnError_t dnnConversionExecute( + dnnPrimitive_t conversion, void *from, void *to); +SPEC_PREFIX dnnError_t dnnConversionExecute( + dnnPrimitive_t conversion, void *from, void *to) + {return dnnConversionExecute_F32(conversion, from, to);} +SPEC_PREFIX dnnError_t dnnConversionExecute( + dnnPrimitive_t conversion, void *from, void *to) + {return dnnConversionExecute_F64(conversion, from, to);} + + +TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateForward( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnConvolutionCreateForward( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) + {return dnnConvolutionCreateForward_F32( + pConvolution, + attributes, + algorithm, + dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + +SPEC_PREFIX dnnError_t dnnConvolutionCreateForward( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) + {return dnnConvolutionCreateForward_F64( + pConvolution, + attributes, + algorithm, + dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + + +TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateForwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnConvolutionCreateForwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) + {return dnnConvolutionCreateForwardBias_F32( + pConvolution, + attributes, + algorithm, + dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} +SPEC_PREFIX dnnError_t dnnConvolutionCreateForwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) + {return dnnConvolutionCreateForwardBias_F64( + pConvolution, + attributes, + algorithm, + dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + + +TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateBackwardData( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardData( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnConvolutionCreateBackwardData_F32( + pConvolution, + attributes, + algorithm, + dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} +SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardData( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnConvolutionCreateBackwardData_F64( + pConvolution, + attributes, + algorithm, + dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + +TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateBackwardFilter( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardFilter( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnConvolutionCreateBackwardFilter_F32( + pConvolution, + attributes, + algorithm, + dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} +SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardFilter( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnConvolutionCreateBackwardFilter_F64( + pConvolution, + attributes, + algorithm, + dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + +TEMPLATE_PREFIX dnnError_t dnnConvolutionCreateBackwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t dstSize[]); +SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t dstSize[]) +{return dnnConvolutionCreateBackwardBias_F32( + pConvolution, + attributes, + algorithm, + dimension, dstSize);} +SPEC_PREFIX dnnError_t dnnConvolutionCreateBackwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t dimension, const size_t dstSize[]) +{return dnnConvolutionCreateBackwardBias_F64( + pConvolution, + attributes, + algorithm, + dimension, dstSize);} + +TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateForward( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateForward( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnGroupsConvolutionCreateForward_F32( + pConvolution, + attributes, + algorithm, + groups, dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateForward( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnGroupsConvolutionCreateForward_F64( + pConvolution, + attributes, + algorithm, + groups, dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + +TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateForwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateForwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnGroupsConvolutionCreateForwardBias_F32( + pConvolution, + attributes, + algorithm, + groups, dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateForwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnGroupsConvolutionCreateForwardBias_F64( + pConvolution, + attributes, + algorithm, + groups, dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + +TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardData( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardData( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnGroupsConvolutionCreateBackwardData_F32( + pConvolution, + attributes, + algorithm, + groups, dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardData( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnGroupsConvolutionCreateBackwardData_F64( + pConvolution, + attributes, + algorithm, + groups, dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + + +TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardFilter( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardFilter( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnGroupsConvolutionCreateBackwardFilter_F32( + pConvolution, + attributes, + algorithm, + groups, dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardFilter( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t srcSize[], const size_t dstSize[], const size_t filterSize[], + const size_t convolutionStrides[], const int inputOffset[], const dnnBorder_t border_type) +{return dnnGroupsConvolutionCreateBackwardFilter_F64( + pConvolution, + attributes, + algorithm, + groups, dimension, srcSize, dstSize, filterSize, + convolutionStrides, inputOffset, border_type);} + +TEMPLATE_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t dstSize[]); +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t dstSize[]) +{return dnnGroupsConvolutionCreateBackwardBias_F32( + pConvolution, + attributes, + algorithm, + groups, dimension, dstSize);} +SPEC_PREFIX dnnError_t dnnGroupsConvolutionCreateBackwardBias( + dnnPrimitive_t* pConvolution, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t algorithm, + size_t groups, size_t dimension, const size_t dstSize[]) +{return dnnGroupsConvolutionCreateBackwardBias_F64( + pConvolution, + attributes, + algorithm, + groups, dimension, dstSize);} + +TEMPLATE_PREFIX dnnError_t dnnReLUCreateForward( + dnnPrimitive_t* pRelu, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float negativeSlope); +SPEC_PREFIX dnnError_t dnnReLUCreateForward( + dnnPrimitive_t* pRelu, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float negativeSlope) +{return dnnReLUCreateForward_F32( + pRelu, + attributes, + dataLayout, negativeSlope);} +SPEC_PREFIX dnnError_t dnnReLUCreateForward( + dnnPrimitive_t* pRelu, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float negativeSlope) +{return dnnReLUCreateForward_F64( + pRelu, + attributes, + dataLayout, negativeSlope);} + +TEMPLATE_PREFIX dnnError_t dnnReLUCreateBackward( + dnnPrimitive_t* pRelu, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t diffLayout, const dnnLayout_t dataLayout, float negativeSlope); +SPEC_PREFIX dnnError_t dnnReLUCreateBackward( + dnnPrimitive_t* pRelu, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t diffLayout, const dnnLayout_t dataLayout, float negativeSlope) +{return dnnReLUCreateBackward_F32( + pRelu, + attributes, + diffLayout, dataLayout, negativeSlope);} +SPEC_PREFIX dnnError_t dnnReLUCreateBackward( + dnnPrimitive_t* pRelu, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t diffLayout, const dnnLayout_t dataLayout, float negativeSlope) +{return dnnReLUCreateBackward_F64( + pRelu, + attributes, + diffLayout, dataLayout, negativeSlope);} + +TEMPLATE_PREFIX dnnError_t dnnLRNCreateForward( + dnnPrimitive_t* pLrn, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, size_t kernel_size, float alpha, float beta, float k); +SPEC_PREFIX dnnError_t dnnLRNCreateForward( + dnnPrimitive_t* pLrn, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, size_t kernel_size, float alpha, float beta, float k) +{return dnnLRNCreateForward_F32( + pLrn, + attributes, + dataLayout, kernel_size, alpha, beta, k);} +SPEC_PREFIX dnnError_t dnnLRNCreateForward( + dnnPrimitive_t* pLrn, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, size_t kernel_size, float alpha, float beta, float k) +{return dnnLRNCreateForward_F64( + pLrn, + attributes, + dataLayout, kernel_size, alpha, beta, k);} + + +TEMPLATE_PREFIX dnnError_t dnnLRNCreateBackward( + dnnPrimitive_t* pLrn, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t diffLayout, const dnnLayout_t dataLayout, size_t kernel_size, float alpha, float beta, float k); +SPEC_PREFIX dnnError_t dnnLRNCreateBackward( + dnnPrimitive_t* pLrn, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t diffLayout, const dnnLayout_t dataLayout, size_t kernel_size, float alpha, float beta, float k) +{return dnnLRNCreateBackward_F32( + pLrn, + attributes, + diffLayout, dataLayout, kernel_size, alpha, beta, k);} +SPEC_PREFIX dnnError_t dnnLRNCreateBackward( + dnnPrimitive_t* pLrn, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t diffLayout, const dnnLayout_t dataLayout, size_t kernel_size, float alpha, float beta, float k) +{return dnnLRNCreateBackward_F64( + pLrn, + attributes, + diffLayout, dataLayout, kernel_size, alpha, beta, k);} + + +TEMPLATE_PREFIX dnnError_t dnnPoolingCreateForward( + dnnPrimitive_t* pPooling, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t op, + const dnnLayout_t srcLayout, + const size_t kernelSize[], const size_t kernelStride[], + const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnPoolingCreateForward( + dnnPrimitive_t* pPooling, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t op, + const dnnLayout_t srcLayout, + const size_t kernelSize[], const size_t kernelStride[], + const int inputOffset[], const dnnBorder_t border_type) +{return dnnPoolingCreateForward_F32( + pPooling, + attributes, + op, + srcLayout, + kernelSize, kernelStride, + inputOffset, border_type);} +SPEC_PREFIX dnnError_t dnnPoolingCreateForward( + dnnPrimitive_t* pPooling, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t op, + const dnnLayout_t srcLayout, + const size_t kernelSize[], const size_t kernelStride[], + const int inputOffset[], const dnnBorder_t border_type) +{return dnnPoolingCreateForward_F64( + pPooling, + attributes, + op, + srcLayout, + kernelSize, kernelStride, + inputOffset, border_type);} + + +TEMPLATE_PREFIX dnnError_t dnnPoolingCreateBackward( + dnnPrimitive_t* pPooling, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t op, + const dnnLayout_t srcLayout, + const size_t kernelSize[], const size_t kernelStride[], + const int inputOffset[], const dnnBorder_t border_type); +SPEC_PREFIX dnnError_t dnnPoolingCreateBackward( + dnnPrimitive_t* pPooling, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t op, + const dnnLayout_t srcLayout, + const size_t kernelSize[], const size_t kernelStride[], + const int inputOffset[], const dnnBorder_t border_type) +{return dnnPoolingCreateBackward_F32( + pPooling, + attributes, + op, + srcLayout, + kernelSize, kernelStride, + inputOffset,border_type);} +SPEC_PREFIX dnnError_t dnnPoolingCreateBackward( + dnnPrimitive_t* pPooling, + dnnPrimitiveAttributes_t attributes, + dnnAlgorithm_t op, + const dnnLayout_t srcLayout, + const size_t kernelSize[], const size_t kernelStride[], + const int inputOffset[], const dnnBorder_t border_type) +{return dnnPoolingCreateBackward_F64( + pPooling, + attributes, + op, + srcLayout, + kernelSize, kernelStride, + inputOffset,border_type);} + +TEMPLATE_PREFIX dnnError_t dnnConcatCreate( + dnnPrimitive_t *pConcat, + dnnPrimitiveAttributes_t attributes, + const size_t N, + dnnLayout_t src[]); +SPEC_PREFIX dnnError_t dnnConcatCreate( + dnnPrimitive_t *pConcat, + dnnPrimitiveAttributes_t attributes, + const size_t N, + dnnLayout_t src[]) +{return dnnConcatCreate_F32( + pConcat, + attributes, + N, + src);} +SPEC_PREFIX dnnError_t dnnConcatCreate( + dnnPrimitive_t *pConcat, + dnnPrimitiveAttributes_t attributes, + const size_t N, + dnnLayout_t src[]) +{return dnnConcatCreate_F64( + pConcat, + attributes, + N, + src);} + + +TEMPLATE_PREFIX dnnError_t dnnSplitCreate( + dnnPrimitive_t *pSplit, + dnnPrimitiveAttributes_t attributes, + const size_t N, + dnnLayout_t src, + size_t dst[]); +SPEC_PREFIX dnnError_t dnnSplitCreate( + dnnPrimitive_t *pSplit, + dnnPrimitiveAttributes_t attributes, + const size_t N, + dnnLayout_t src, + size_t dst[]) +{return dnnSplitCreate_F32( + pSplit, + attributes, + N, + src, + dst);} +SPEC_PREFIX dnnError_t dnnSplitCreate( + dnnPrimitive_t *pSplit, + dnnPrimitiveAttributes_t attributes, + const size_t N, + dnnLayout_t src, + size_t dst[]) +{return dnnSplitCreate_F64( + pSplit, + attributes, + N, + src, + dst);} + +TEMPLATE_PREFIX dnnError_t dnnSumCreate( + dnnPrimitive_t *pSum, + dnnPrimitiveAttributes_t attributes, + const size_t nSummands, dnnLayout_t layout, Dtype *coefficients); +SPEC_PREFIX dnnError_t dnnSumCreate( + dnnPrimitive_t *pSum, + dnnPrimitiveAttributes_t attributes, + const size_t nSummands, dnnLayout_t layout, float *coefficients) +{return dnnSumCreate_F32( + pSum, + attributes, + nSummands, + layout, coefficients);} +SPEC_PREFIX dnnError_t dnnSumCreate( + dnnPrimitive_t *pSum, + dnnPrimitiveAttributes_t attributes, + const size_t nSummands, dnnLayout_t layout, double *coefficients) +{return dnnSumCreate_F64( + pSum, + attributes, + nSummands, + layout, coefficients);} + +TEMPLATE_PREFIX dnnError_t dnnBatchNormalizationCreateForward( + dnnPrimitive_t* pBatchNormalization, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float eps, unsigned int flags); +SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateForward( + dnnPrimitive_t* pBatchNormalization, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float eps, unsigned int flags) +{return dnnBatchNormalizationCreateForward_v2_F32( + pBatchNormalization, + attributes, + dataLayout, eps, flags); } +SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateForward( + dnnPrimitive_t* pBatchNormalization, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float eps, unsigned int flags) +{return dnnBatchNormalizationCreateForward_v2_F64( + pBatchNormalization, + attributes, + dataLayout, eps, flags); } + +TEMPLATE_PREFIX dnnError_t dnnBatchNormalizationCreateBackward( + dnnPrimitive_t* pBatchNormalization, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float eps, unsigned int flags); +SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateBackward( + dnnPrimitive_t* pBatchNormalization, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float eps, unsigned int flags) +{return dnnBatchNormalizationCreateBackward_v2_F32( + pBatchNormalization, + attributes, + dataLayout, eps, flags); } +SPEC_PREFIX dnnError_t dnnBatchNormalizationCreateBackward( + dnnPrimitive_t* pBatchNormalization, + dnnPrimitiveAttributes_t attributes, + const dnnLayout_t dataLayout, float eps, unsigned int flags) +{return dnnBatchNormalizationCreateBackward_v2_F64( + pBatchNormalization, + attributes, + dataLayout, eps, flags); } +#endif diff --git a/matlab/+caffe/+test/test_io.m b/matlab/+caffe/+test/test_io.m index 2c34bd1e938..193dad34cf4 100644 --- a/matlab/+caffe/+test/test_io.m +++ b/matlab/+caffe/+test/test_io.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% classdef test_io < matlab.unittest.TestCase methods (Test) function test_read_write_mean(self) diff --git a/matlab/+caffe/+test/test_net.m b/matlab/+caffe/+test/test_net.m index 3dabe84d111..1ea2231dbf2 100644 --- a/matlab/+caffe/+test/test_net.m +++ b/matlab/+caffe/+test/test_net.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% classdef test_net < matlab.unittest.TestCase properties diff --git a/matlab/+caffe/+test/test_solver.m b/matlab/+caffe/+test/test_solver.m index 739258b0e85..546b51b0a5e 100644 --- a/matlab/+caffe/+test/test_solver.m +++ b/matlab/+caffe/+test/test_solver.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% classdef test_solver < matlab.unittest.TestCase properties diff --git a/matlab/+caffe/Blob.m b/matlab/+caffe/Blob.m index e39f7ee3f20..55f5d4dcdce 100644 --- a/matlab/+caffe/Blob.m +++ b/matlab/+caffe/Blob.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% classdef Blob < handle % Wrapper class of caffe::Blob in matlab diff --git a/matlab/+caffe/Layer.m b/matlab/+caffe/Layer.m index 4c2023101a5..00848c84d7f 100644 --- a/matlab/+caffe/Layer.m +++ b/matlab/+caffe/Layer.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% classdef Layer < handle % Wrapper class of caffe::Layer in matlab diff --git a/matlab/+caffe/Net.m b/matlab/+caffe/Net.m index e6295bba1a4..f72733f75c1 100644 --- a/matlab/+caffe/Net.m +++ b/matlab/+caffe/Net.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% classdef Net < handle % Wrapper class of caffe::Net in matlab diff --git a/matlab/+caffe/Solver.m b/matlab/+caffe/Solver.m index f8bdc4e22b2..7dcea74a1a5 100644 --- a/matlab/+caffe/Solver.m +++ b/matlab/+caffe/Solver.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% classdef Solver < handle % Wrapper class of caffe::SGDSolver in matlab diff --git a/matlab/+caffe/get_net.m b/matlab/+caffe/get_net.m index 4b5683eb82e..8fe7a5335f0 100644 --- a/matlab/+caffe/get_net.m +++ b/matlab/+caffe/get_net.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function net = get_net(varargin) % net = get_net(model_file, phase_name) or % net = get_net(model_file, weights_file, phase_name) diff --git a/matlab/+caffe/get_solver.m b/matlab/+caffe/get_solver.m index 74d576eb31b..2773fc5b825 100644 --- a/matlab/+caffe/get_solver.m +++ b/matlab/+caffe/get_solver.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function solver = get_solver(solver_file) % solver = get_solver(solver_file) % Construct a Solver object from solver_file diff --git a/matlab/+caffe/io.m b/matlab/+caffe/io.m index 4b072fecdab..9945e847389 100644 --- a/matlab/+caffe/io.m +++ b/matlab/+caffe/io.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% classdef io % a class for input and output functions diff --git a/matlab/+caffe/private/CHECK.m b/matlab/+caffe/private/CHECK.m index 21706549cfa..5cf783b4f64 100644 --- a/matlab/+caffe/private/CHECK.m +++ b/matlab/+caffe/private/CHECK.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function CHECK(expr, error_msg) if ~expr diff --git a/matlab/+caffe/private/CHECK_FILE_EXIST.m b/matlab/+caffe/private/CHECK_FILE_EXIST.m index 8c80fb8094f..4f179ac0a98 100644 --- a/matlab/+caffe/private/CHECK_FILE_EXIST.m +++ b/matlab/+caffe/private/CHECK_FILE_EXIST.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function CHECK_FILE_EXIST(filename) if exist(filename, 'file') == 0 diff --git a/matlab/+caffe/private/caffe_.cpp b/matlab/+caffe/private/caffe_.cpp index 1b1b2bff861..de1f0c85fbf 100644 --- a/matlab/+caffe/private/caffe_.cpp +++ b/matlab/+caffe/private/caffe_.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // // caffe_.cpp provides wrappers of the caffe::Solver class, caffe::Net class, // caffe::Layer class and caffe::Blob class and some caffe::Caffe functions, diff --git a/matlab/+caffe/private/is_valid_handle.m b/matlab/+caffe/private/is_valid_handle.m index a0648ecdf61..286347aaa06 100644 --- a/matlab/+caffe/private/is_valid_handle.m +++ b/matlab/+caffe/private/is_valid_handle.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function valid = is_valid_handle(hObj) % valid = is_valid_handle(hObj) or is_valid_handle('get_new_init_key') % Check if a handle is valid (has the right data type and init_key matches) diff --git a/matlab/+caffe/reset_all.m b/matlab/+caffe/reset_all.m index a8b33dee8d5..3f700a783c5 100644 --- a/matlab/+caffe/reset_all.m +++ b/matlab/+caffe/reset_all.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function reset_all() % reset_all() % clear all solvers and stand-alone nets and reset Caffe to initial status diff --git a/matlab/+caffe/run_tests.m b/matlab/+caffe/run_tests.m index 6dbf6b23151..5467205ac0c 100644 --- a/matlab/+caffe/run_tests.m +++ b/matlab/+caffe/run_tests.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function results = run_tests() % results = run_tests() % run all tests in this caffe matlab wrapper package diff --git a/matlab/+caffe/set_device.m b/matlab/+caffe/set_device.m index f94068cbe98..3c154c86249 100644 --- a/matlab/+caffe/set_device.m +++ b/matlab/+caffe/set_device.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function set_device(device_id) % set_device(device_id) % set Caffe's GPU device ID diff --git a/matlab/+caffe/set_mode_cpu.m b/matlab/+caffe/set_mode_cpu.m index a87e0e2852b..99b9563fc12 100644 --- a/matlab/+caffe/set_mode_cpu.m +++ b/matlab/+caffe/set_mode_cpu.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function set_mode_cpu() % set_mode_cpu() % set Caffe to CPU mode diff --git a/matlab/+caffe/set_mode_gpu.m b/matlab/+caffe/set_mode_gpu.m index 78e5f6773a1..6967f2cbfc9 100644 --- a/matlab/+caffe/set_mode_gpu.m +++ b/matlab/+caffe/set_mode_gpu.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function set_mode_gpu() % set_mode_gpu() % set Caffe to GPU mode diff --git a/matlab/+caffe/version.m b/matlab/+caffe/version.m index 61cae4f76dc..b530c1b7f18 100644 --- a/matlab/+caffe/version.m +++ b/matlab/+caffe/version.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function version_str = version() % version() % show Caffe's version. diff --git a/matlab/demo/classification_demo.m b/matlab/demo/classification_demo.m index 2b60332970b..a1f45222559 100644 --- a/matlab/demo/classification_demo.m +++ b/matlab/demo/classification_demo.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function [scores, maxlabel] = classification_demo(im, use_gpu) % [scores, maxlabel] = classification_demo(im, use_gpu) % diff --git a/matlab/hdf5creation/demo.m b/matlab/hdf5creation/demo.m index 4f9f7b5a454..1cf1e3aa0aa 100644 --- a/matlab/hdf5creation/demo.m +++ b/matlab/hdf5creation/demo.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% %% WRITING TO HDF5 filename='trial.h5'; diff --git a/matlab/hdf5creation/store2hdf5.m b/matlab/hdf5creation/store2hdf5.m index 4e8c81d9de8..2c1f23c11eb 100644 --- a/matlab/hdf5creation/store2hdf5.m +++ b/matlab/hdf5creation/store2hdf5.m @@ -1,3 +1,39 @@ +% +% All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +% +% All contributions by the University of California: +% Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +% All rights reserved. +% +% All other contributions: +% Copyright (c) 2014, 2015, the respective contributors +% All rights reserved. +% For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +% +% +% Redistribution and use in source and binary forms, with or without +% modification, are permitted provided that the following conditions are met: +% +% * Redistributions of source code must retain the above copyright notice, +% this list of conditions and the following disclaimer. +% * Redistributions in binary form must reproduce the above copyright +% notice, this list of conditions and the following disclaimer in the +% documentation and/or other materials provided with the distribution. +% * Neither the name of Intel Corporation nor the names of its contributors +% may be used to endorse or promote products derived from this software +% without specific prior written permission. +% +% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +% DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +% FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +% DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +% SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +% CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +% OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +% function [curr_dat_sz, curr_lab_sz] = store2hdf5(filename, data, labels, create, startloc, chunksz) % *data* is W*H*C*N matrix of images should be normalized (e.g. to lie between 0 and 1) beforehand % *label* is D*N matrix of labels (D labels per sample) diff --git a/mkldnn.commit b/mkldnn.commit new file mode 100644 index 00000000000..7eb0167ed19 --- /dev/null +++ b/mkldnn.commit @@ -0,0 +1 @@ +171572a205c71f5bbb08657de5660c9d06cf2d8f diff --git a/models/bvlc_alexnet/solver.prototxt b/models/bvlc_alexnet/solver.prototxt index 129265e679b..0a9f5b186a7 100644 --- a/models/bvlc_alexnet/solver.prototxt +++ b/models/bvlc_alexnet/solver.prototxt @@ -11,4 +11,4 @@ momentum: 0.9 weight_decay: 0.0005 snapshot: 10000 snapshot_prefix: "models/bvlc_alexnet/caffe_alexnet_train" -solver_mode: GPU +solver_mode: CPU diff --git a/models/bvlc_alexnet/solver_client.prototxt b/models/bvlc_alexnet/solver_client.prototxt new file mode 100644 index 00000000000..af6f2bee873 --- /dev/null +++ b/models/bvlc_alexnet/solver_client.prototxt @@ -0,0 +1,10 @@ +train_net: "models/bvlc_alexnet/train_val_client.prototxt" +base_lr: 0.01 +lr_policy: "step" +gamma: 0.1 +stepsize: 100000 +display: 1 +max_iter: 450000 +momentum: 0.9 +weight_decay: 0.0005 +solver_mode: CPU diff --git a/models/bvlc_alexnet/solver_param_server.prototxt b/models/bvlc_alexnet/solver_param_server.prototxt new file mode 100644 index 00000000000..2d341043870 --- /dev/null +++ b/models/bvlc_alexnet/solver_param_server.prototxt @@ -0,0 +1,14 @@ +net: "models/bvlc_alexnet/train_val_param_server.prototxt" +test_iter: 1000 +test_interval: 1000 +base_lr: 0.01 +lr_policy: "step" +gamma: 0.1 +stepsize: 100000 +display: 20 +max_iter: 450000 +momentum: 0.9 +weight_decay: 0.0005 +snapshot: 10000 +snapshot_prefix: "models/bvlc_alexnet/caffe_alexnet_train" +solver_mode: CPU diff --git a/models/bvlc_alexnet/train_val_client.prototxt b/models/bvlc_alexnet/train_val_client.prototxt new file mode 100644 index 00000000000..6f4b5529830 --- /dev/null +++ b/models/bvlc_alexnet/train_val_client.prototxt @@ -0,0 +1,367 @@ +name: "AlexNet" +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + image_data_param { + source: "data/ilsvrc12/train.txt" + batch_size: 64 + new_width: 227 + new_height: 227 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "norm1" + type: "LRN" + bottom: "conv1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "norm1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "norm2" + type: "LRN" + bottom: "conv2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "norm2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc7" + type: "InnerProduct" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "drop7" + type: "Dropout" + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8" + type: "InnerProduct" + bottom: "fc7" + top: "fc8" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8" + bottom: "label" + top: "loss" +} diff --git a/models/bvlc_alexnet/train_val_param_server.prototxt b/models/bvlc_alexnet/train_val_param_server.prototxt new file mode 100644 index 00000000000..eb0f15c9d0a --- /dev/null +++ b/models/bvlc_alexnet/train_val_param_server.prototxt @@ -0,0 +1,388 @@ +name: "AlexNet" +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + image_data_param { + source: "data/ilsvrc12/train.txt" + batch_size: 64 + new_width: 227 + new_height: 227 + } +} +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + image_data_param { + source: "data/ilsvrc12/val.txt" + batch_size: 50 + new_width: 227 + new_height: 227 + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "norm1" + type: "LRN" + bottom: "conv1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "norm1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "norm2" + type: "LRN" + bottom: "conv2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "norm2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc7" + type: "InnerProduct" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "drop7" + type: "Dropout" + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8" + type: "InnerProduct" + bottom: "fc7" + top: "fc8" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8" + bottom: "label" + top: "loss" +} diff --git a/models/bvlc_googlenet/solver_client.prototxt b/models/bvlc_googlenet/solver_client.prototxt new file mode 100644 index 00000000000..850cf90fbcf --- /dev/null +++ b/models/bvlc_googlenet/solver_client.prototxt @@ -0,0 +1,12 @@ +train_net: "models/bvlc_googlenet/train_val_client.prototxt" +display: 40 +average_loss: 40 +base_lr: 0.06 +lr_policy: "poly" +power: 0.5 +max_iter: 91000 +momentum: 0.9 +weight_decay: 0.0002 +solver_mode: CPU +snapshot: 10000 +snapshot_prefix: "multinode_googlenet_91k" diff --git a/models/bvlc_googlenet/solver_param_server.prototxt b/models/bvlc_googlenet/solver_param_server.prototxt new file mode 100644 index 00000000000..f0617f48ac9 --- /dev/null +++ b/models/bvlc_googlenet/solver_param_server.prototxt @@ -0,0 +1,15 @@ +net: "models/bvlc_googlenet/train_val_param_server.prototxt" +test_iter: 1000 +test_interval: 4000 +test_initialization: false +display: 40 +average_loss: 40 +base_lr: 0.08 +lr_policy: "poly" +power: 0.5 +max_iter: 2400000 +momentum: 0.9 +weight_decay: 0.0002 +snapshot: 40000 +snapshot_prefix: "models/bvlc_googlenet/bvlc_googlenet_multinode" +solver_mode: CPU diff --git a/models/bvlc_googlenet/train_val_client.prototxt b/models/bvlc_googlenet/train_val_client.prototxt new file mode 100644 index 00000000000..358e7126556 --- /dev/null +++ b/models/bvlc_googlenet/train_val_client.prototxt @@ -0,0 +1,2433 @@ +name: "GoogleNet" +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + image_data_param { + source: "data/ilsvrc12/train.txt" + batch_size: 512 + shuffle: true + } +} +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + image_data_param { + source: "data/ilsvrc12/val.txt" + batch_size: 50 + new_width: 256 + new_height: 256 + } +} +layer { + name: "conv1/7x7_s2" + type: "Convolution" + bottom: "data" + top: "conv1/7x7_s2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv1/relu_7x7" + type: "ReLU" + bottom: "conv1/7x7_s2" + top: "conv1/7x7_s2" +} +layer { + name: "pool1/3x3_s2" + type: "Pooling" + bottom: "conv1/7x7_s2" + top: "pool1/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "pool1/norm1" + type: "LRN" + bottom: "pool1/3x3_s2" + top: "pool1/norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2/3x3_reduce" + type: "Convolution" + bottom: "pool1/norm1" + top: "conv2/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv2/relu_3x3_reduce" + type: "ReLU" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3_reduce" +} +layer { + name: "conv2/3x3" + type: "Convolution" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv2/relu_3x3" + type: "ReLU" + bottom: "conv2/3x3" + top: "conv2/3x3" +} +layer { + name: "conv2/norm2" + type: "LRN" + bottom: "conv2/3x3" + top: "conv2/norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2/3x3_s2" + type: "Pooling" + bottom: "conv2/norm2" + top: "pool2/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_3a/1x1" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_1x1" + type: "ReLU" + bottom: "inception_3a/1x1" + top: "inception_3a/1x1" +} +layer { + name: "inception_3a/3x3_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3_reduce" +} +layer { + name: "inception_3a/3x3" + type: "Convolution" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_3x3" + type: "ReLU" + bottom: "inception_3a/3x3" + top: "inception_3a/3x3" +} +layer { + name: "inception_3a/5x5_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5_reduce" +} +layer { + name: "inception_3a/5x5" + type: "Convolution" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_5x5" + type: "ReLU" + bottom: "inception_3a/5x5" + top: "inception_3a/5x5" +} +layer { + name: "inception_3a/pool" + type: "Pooling" + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3a/pool_proj" + type: "Convolution" + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_pool_proj" + type: "ReLU" + bottom: "inception_3a/pool_proj" + top: "inception_3a/pool_proj" +} +layer { + name: "inception_3a/output" + type: "Concat" + bottom: "inception_3a/1x1" + bottom: "inception_3a/3x3" + bottom: "inception_3a/5x5" + bottom: "inception_3a/pool_proj" + top: "inception_3a/output" +} +layer { + name: "inception_3b/1x1" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_1x1" + type: "ReLU" + bottom: "inception_3b/1x1" + top: "inception_3b/1x1" +} +layer { + name: "inception_3b/3x3_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3_reduce" +} +layer { + name: "inception_3b/3x3" + type: "Convolution" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_3x3" + type: "ReLU" + bottom: "inception_3b/3x3" + top: "inception_3b/3x3" +} +layer { + name: "inception_3b/5x5_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5_reduce" +} +layer { + name: "inception_3b/5x5" + type: "Convolution" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_5x5" + type: "ReLU" + bottom: "inception_3b/5x5" + top: "inception_3b/5x5" +} +layer { + name: "inception_3b/pool" + type: "Pooling" + bottom: "inception_3a/output" + top: "inception_3b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3b/pool_proj" + type: "Convolution" + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_pool_proj" + type: "ReLU" + bottom: "inception_3b/pool_proj" + top: "inception_3b/pool_proj" +} +layer { + name: "inception_3b/output" + type: "Concat" + bottom: "inception_3b/1x1" + bottom: "inception_3b/3x3" + bottom: "inception_3b/5x5" + bottom: "inception_3b/pool_proj" + top: "inception_3b/output" +} +layer { + name: "pool3/3x3_s2" + type: "Pooling" + bottom: "inception_3b/output" + top: "pool3/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_4a/1x1" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_1x1" + type: "ReLU" + bottom: "inception_4a/1x1" + top: "inception_4a/1x1" +} +layer { + name: "inception_4a/3x3_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3_reduce" +} +layer { + name: "inception_4a/3x3" + type: "Convolution" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 208 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_3x3" + type: "ReLU" + bottom: "inception_4a/3x3" + top: "inception_4a/3x3" +} +layer { + name: "inception_4a/5x5_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5_reduce" +} +layer { + name: "inception_4a/5x5" + type: "Convolution" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_5x5" + type: "ReLU" + bottom: "inception_4a/5x5" + top: "inception_4a/5x5" +} +layer { + name: "inception_4a/pool" + type: "Pooling" + bottom: "pool3/3x3_s2" + top: "inception_4a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4a/pool_proj" + type: "Convolution" + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_pool_proj" + type: "ReLU" + bottom: "inception_4a/pool_proj" + top: "inception_4a/pool_proj" +} +layer { + name: "inception_4a/output" + type: "Concat" + bottom: "inception_4a/1x1" + bottom: "inception_4a/3x3" + bottom: "inception_4a/5x5" + bottom: "inception_4a/pool_proj" + top: "inception_4a/output" +} +layer { + name: "loss1/ave_pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "loss1/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss1/conv" + type: "Convolution" + bottom: "loss1/ave_pool" + top: "loss1/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss1/relu_conv" + type: "ReLU" + bottom: "loss1/conv" + top: "loss1/conv" +} +layer { + name: "loss1/fc" + type: "InnerProduct" + bottom: "loss1/conv" + top: "loss1/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss1/relu_fc" + type: "ReLU" + bottom: "loss1/fc" + top: "loss1/fc" +} +layer { + name: "loss1/drop_fc" + type: "Dropout" + bottom: "loss1/fc" + top: "loss1/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss1/classifier" + type: "InnerProduct" + bottom: "loss1/fc" + top: "loss1/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss1/loss" + type: "SoftmaxWithLoss" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss1" + loss_weight: 0.3 +} +layer { + name: "loss1/top-1" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-1" + include { + phase: TEST + } +} +layer { + name: "loss1/top-5" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4b/1x1" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_1x1" + type: "ReLU" + bottom: "inception_4b/1x1" + top: "inception_4b/1x1" +} +layer { + name: "inception_4b/3x3_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3_reduce" +} +layer { + name: "inception_4b/3x3" + type: "Convolution" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 224 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_3x3" + type: "ReLU" + bottom: "inception_4b/3x3" + top: "inception_4b/3x3" +} +layer { + name: "inception_4b/5x5_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5_reduce" +} +layer { + name: "inception_4b/5x5" + type: "Convolution" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_5x5" + type: "ReLU" + bottom: "inception_4b/5x5" + top: "inception_4b/5x5" +} +layer { + name: "inception_4b/pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "inception_4b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4b/pool_proj" + type: "Convolution" + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_pool_proj" + type: "ReLU" + bottom: "inception_4b/pool_proj" + top: "inception_4b/pool_proj" +} +layer { + name: "inception_4b/output" + type: "Concat" + bottom: "inception_4b/1x1" + bottom: "inception_4b/3x3" + bottom: "inception_4b/5x5" + bottom: "inception_4b/pool_proj" + top: "inception_4b/output" +} +layer { + name: "inception_4c/1x1" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_1x1" + type: "ReLU" + bottom: "inception_4c/1x1" + top: "inception_4c/1x1" +} +layer { + name: "inception_4c/3x3_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3_reduce" +} +layer { + name: "inception_4c/3x3" + type: "Convolution" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_3x3" + type: "ReLU" + bottom: "inception_4c/3x3" + top: "inception_4c/3x3" +} +layer { + name: "inception_4c/5x5_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5_reduce" +} +layer { + name: "inception_4c/5x5" + type: "Convolution" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_5x5" + type: "ReLU" + bottom: "inception_4c/5x5" + top: "inception_4c/5x5" +} +layer { + name: "inception_4c/pool" + type: "Pooling" + bottom: "inception_4b/output" + top: "inception_4c/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4c/pool_proj" + type: "Convolution" + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_pool_proj" + type: "ReLU" + bottom: "inception_4c/pool_proj" + top: "inception_4c/pool_proj" +} +layer { + name: "inception_4c/output" + type: "Concat" + bottom: "inception_4c/1x1" + bottom: "inception_4c/3x3" + bottom: "inception_4c/5x5" + bottom: "inception_4c/pool_proj" + top: "inception_4c/output" +} +layer { + name: "inception_4d/1x1" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_1x1" + type: "ReLU" + bottom: "inception_4d/1x1" + top: "inception_4d/1x1" +} +layer { + name: "inception_4d/3x3_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 144 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3_reduce" +} +layer { + name: "inception_4d/3x3" + type: "Convolution" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 288 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_3x3" + type: "ReLU" + bottom: "inception_4d/3x3" + top: "inception_4d/3x3" +} +layer { + name: "inception_4d/5x5_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5_reduce" +} +layer { + name: "inception_4d/5x5" + type: "Convolution" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_5x5" + type: "ReLU" + bottom: "inception_4d/5x5" + top: "inception_4d/5x5" +} +layer { + name: "inception_4d/pool" + type: "Pooling" + bottom: "inception_4c/output" + top: "inception_4d/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4d/pool_proj" + type: "Convolution" + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_pool_proj" + type: "ReLU" + bottom: "inception_4d/pool_proj" + top: "inception_4d/pool_proj" +} +layer { + name: "inception_4d/output" + type: "Concat" + bottom: "inception_4d/1x1" + bottom: "inception_4d/3x3" + bottom: "inception_4d/5x5" + bottom: "inception_4d/pool_proj" + top: "inception_4d/output" +} +layer { + name: "loss2/ave_pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "loss2/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss2/conv" + type: "Convolution" + bottom: "loss2/ave_pool" + top: "loss2/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss2/relu_conv" + type: "ReLU" + bottom: "loss2/conv" + top: "loss2/conv" +} +layer { + name: "loss2/fc" + type: "InnerProduct" + bottom: "loss2/conv" + top: "loss2/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss2/relu_fc" + type: "ReLU" + bottom: "loss2/fc" + top: "loss2/fc" +} +layer { + name: "loss2/drop_fc" + type: "Dropout" + bottom: "loss2/fc" + top: "loss2/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss2/classifier" + type: "InnerProduct" + bottom: "loss2/fc" + top: "loss2/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss2/loss" + type: "SoftmaxWithLoss" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss1" + loss_weight: 0.3 +} +layer { + name: "loss2/top-1" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-1" + include { + phase: TEST + } +} +layer { + name: "loss2/top-5" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4e/1x1" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_1x1" + type: "ReLU" + bottom: "inception_4e/1x1" + top: "inception_4e/1x1" +} +layer { + name: "inception_4e/3x3_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3_reduce" +} +layer { + name: "inception_4e/3x3" + type: "Convolution" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_3x3" + type: "ReLU" + bottom: "inception_4e/3x3" + top: "inception_4e/3x3" +} +layer { + name: "inception_4e/5x5_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5_reduce" +} +layer { + name: "inception_4e/5x5" + type: "Convolution" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_5x5" + type: "ReLU" + bottom: "inception_4e/5x5" + top: "inception_4e/5x5" +} +layer { + name: "inception_4e/pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "inception_4e/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4e/pool_proj" + type: "Convolution" + bottom: "inception_4e/pool" + top: "inception_4e/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_pool_proj" + type: "ReLU" + bottom: "inception_4e/pool_proj" + top: "inception_4e/pool_proj" +} +layer { + name: "inception_4e/output" + type: "Concat" + bottom: "inception_4e/1x1" + bottom: "inception_4e/3x3" + bottom: "inception_4e/5x5" + bottom: "inception_4e/pool_proj" + top: "inception_4e/output" +} +layer { + name: "pool4/3x3_s2" + type: "Pooling" + bottom: "inception_4e/output" + top: "pool4/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_5a/1x1" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_1x1" + type: "ReLU" + bottom: "inception_5a/1x1" + top: "inception_5a/1x1" +} +layer { + name: "inception_5a/3x3_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3_reduce" +} +layer { + name: "inception_5a/3x3" + type: "Convolution" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_3x3" + type: "ReLU" + bottom: "inception_5a/3x3" + top: "inception_5a/3x3" +} +layer { + name: "inception_5a/5x5_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5_reduce" +} +layer { + name: "inception_5a/5x5" + type: "Convolution" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_5x5" + type: "ReLU" + bottom: "inception_5a/5x5" + top: "inception_5a/5x5" +} +layer { + name: "inception_5a/pool" + type: "Pooling" + bottom: "pool4/3x3_s2" + top: "inception_5a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5a/pool_proj" + type: "Convolution" + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_pool_proj" + type: "ReLU" + bottom: "inception_5a/pool_proj" + top: "inception_5a/pool_proj" +} +layer { + name: "inception_5a/output" + type: "Concat" + bottom: "inception_5a/1x1" + bottom: "inception_5a/3x3" + bottom: "inception_5a/5x5" + bottom: "inception_5a/pool_proj" + top: "inception_5a/output" +} +layer { + name: "inception_5b/1x1" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_1x1" + type: "ReLU" + bottom: "inception_5b/1x1" + top: "inception_5b/1x1" +} +layer { + name: "inception_5b/3x3_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3_reduce" +} +layer { + name: "inception_5b/3x3" + type: "Convolution" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_3x3" + type: "ReLU" + bottom: "inception_5b/3x3" + top: "inception_5b/3x3" +} +layer { + name: "inception_5b/5x5_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5_reduce" +} +layer { + name: "inception_5b/5x5" + type: "Convolution" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_5x5" + type: "ReLU" + bottom: "inception_5b/5x5" + top: "inception_5b/5x5" +} +layer { + name: "inception_5b/pool" + type: "Pooling" + bottom: "inception_5a/output" + top: "inception_5b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5b/pool_proj" + type: "Convolution" + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_pool_proj" + type: "ReLU" + bottom: "inception_5b/pool_proj" + top: "inception_5b/pool_proj" +} +layer { + name: "inception_5b/output" + type: "Concat" + bottom: "inception_5b/1x1" + bottom: "inception_5b/3x3" + bottom: "inception_5b/5x5" + bottom: "inception_5b/pool_proj" + top: "inception_5b/output" +} +layer { + name: "pool5/7x7_s1" + type: "Pooling" + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + pooling_param { + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + name: "pool5/drop_7x7_s1" + type: "Dropout" + bottom: "pool5/7x7_s1" + top: "pool5/7x7_s1" + dropout_param { + dropout_ratio: 0.4 + } +} +layer { + name: "loss3/classifier" + type: "InnerProduct" + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss3/loss3" + type: "SoftmaxWithLoss" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss3" + loss_weight: 1 +} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-1" + include { + phase: TEST + } +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} diff --git a/models/bvlc_googlenet/train_val_param_server.prototxt b/models/bvlc_googlenet/train_val_param_server.prototxt new file mode 100644 index 00000000000..b5f1a5f3c4b --- /dev/null +++ b/models/bvlc_googlenet/train_val_param_server.prototxt @@ -0,0 +1,2432 @@ +name: "GoogleNet" +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + image_data_param { + source: "data/ilsvrc12/train_shuffled.txt" + batch_size: 512 + } +} +layer { + name: "data" + type: "ImageData" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + image_data_param { + source: "data/ilsvrc12/val.txt" + batch_size: 50 + new_width: 224 + new_height: 224 + } +} +layer { + name: "conv1/7x7_s2" + type: "Convolution" + bottom: "data" + top: "conv1/7x7_s2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv1/relu_7x7" + type: "ReLU" + bottom: "conv1/7x7_s2" + top: "conv1/7x7_s2" +} +layer { + name: "pool1/3x3_s2" + type: "Pooling" + bottom: "conv1/7x7_s2" + top: "pool1/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "pool1/norm1" + type: "LRN" + bottom: "pool1/3x3_s2" + top: "pool1/norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2/3x3_reduce" + type: "Convolution" + bottom: "pool1/norm1" + top: "conv2/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv2/relu_3x3_reduce" + type: "ReLU" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3_reduce" +} +layer { + name: "conv2/3x3" + type: "Convolution" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv2/relu_3x3" + type: "ReLU" + bottom: "conv2/3x3" + top: "conv2/3x3" +} +layer { + name: "conv2/norm2" + type: "LRN" + bottom: "conv2/3x3" + top: "conv2/norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2/3x3_s2" + type: "Pooling" + bottom: "conv2/norm2" + top: "pool2/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_3a/1x1" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_1x1" + type: "ReLU" + bottom: "inception_3a/1x1" + top: "inception_3a/1x1" +} +layer { + name: "inception_3a/3x3_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3_reduce" +} +layer { + name: "inception_3a/3x3" + type: "Convolution" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_3x3" + type: "ReLU" + bottom: "inception_3a/3x3" + top: "inception_3a/3x3" +} +layer { + name: "inception_3a/5x5_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5_reduce" +} +layer { + name: "inception_3a/5x5" + type: "Convolution" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_5x5" + type: "ReLU" + bottom: "inception_3a/5x5" + top: "inception_3a/5x5" +} +layer { + name: "inception_3a/pool" + type: "Pooling" + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3a/pool_proj" + type: "Convolution" + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_pool_proj" + type: "ReLU" + bottom: "inception_3a/pool_proj" + top: "inception_3a/pool_proj" +} +layer { + name: "inception_3a/output" + type: "Concat" + bottom: "inception_3a/1x1" + bottom: "inception_3a/3x3" + bottom: "inception_3a/5x5" + bottom: "inception_3a/pool_proj" + top: "inception_3a/output" +} +layer { + name: "inception_3b/1x1" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_1x1" + type: "ReLU" + bottom: "inception_3b/1x1" + top: "inception_3b/1x1" +} +layer { + name: "inception_3b/3x3_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3_reduce" +} +layer { + name: "inception_3b/3x3" + type: "Convolution" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_3x3" + type: "ReLU" + bottom: "inception_3b/3x3" + top: "inception_3b/3x3" +} +layer { + name: "inception_3b/5x5_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5_reduce" +} +layer { + name: "inception_3b/5x5" + type: "Convolution" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_5x5" + type: "ReLU" + bottom: "inception_3b/5x5" + top: "inception_3b/5x5" +} +layer { + name: "inception_3b/pool" + type: "Pooling" + bottom: "inception_3a/output" + top: "inception_3b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3b/pool_proj" + type: "Convolution" + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_pool_proj" + type: "ReLU" + bottom: "inception_3b/pool_proj" + top: "inception_3b/pool_proj" +} +layer { + name: "inception_3b/output" + type: "Concat" + bottom: "inception_3b/1x1" + bottom: "inception_3b/3x3" + bottom: "inception_3b/5x5" + bottom: "inception_3b/pool_proj" + top: "inception_3b/output" +} +layer { + name: "pool3/3x3_s2" + type: "Pooling" + bottom: "inception_3b/output" + top: "pool3/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_4a/1x1" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_1x1" + type: "ReLU" + bottom: "inception_4a/1x1" + top: "inception_4a/1x1" +} +layer { + name: "inception_4a/3x3_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3_reduce" +} +layer { + name: "inception_4a/3x3" + type: "Convolution" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 208 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_3x3" + type: "ReLU" + bottom: "inception_4a/3x3" + top: "inception_4a/3x3" +} +layer { + name: "inception_4a/5x5_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5_reduce" +} +layer { + name: "inception_4a/5x5" + type: "Convolution" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_5x5" + type: "ReLU" + bottom: "inception_4a/5x5" + top: "inception_4a/5x5" +} +layer { + name: "inception_4a/pool" + type: "Pooling" + bottom: "pool3/3x3_s2" + top: "inception_4a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4a/pool_proj" + type: "Convolution" + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_pool_proj" + type: "ReLU" + bottom: "inception_4a/pool_proj" + top: "inception_4a/pool_proj" +} +layer { + name: "inception_4a/output" + type: "Concat" + bottom: "inception_4a/1x1" + bottom: "inception_4a/3x3" + bottom: "inception_4a/5x5" + bottom: "inception_4a/pool_proj" + top: "inception_4a/output" +} +layer { + name: "loss1/ave_pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "loss1/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss1/conv" + type: "Convolution" + bottom: "loss1/ave_pool" + top: "loss1/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss1/relu_conv" + type: "ReLU" + bottom: "loss1/conv" + top: "loss1/conv" +} +layer { + name: "loss1/fc" + type: "InnerProduct" + bottom: "loss1/conv" + top: "loss1/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss1/relu_fc" + type: "ReLU" + bottom: "loss1/fc" + top: "loss1/fc" +} +layer { + name: "loss1/drop_fc" + type: "Dropout" + bottom: "loss1/fc" + top: "loss1/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss1/classifier" + type: "InnerProduct" + bottom: "loss1/fc" + top: "loss1/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss1/loss" + type: "SoftmaxWithLoss" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss1" + loss_weight: 0.3 +} +layer { + name: "loss1/top-1" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-1" + include { + phase: TEST + } +} +layer { + name: "loss1/top-5" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4b/1x1" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_1x1" + type: "ReLU" + bottom: "inception_4b/1x1" + top: "inception_4b/1x1" +} +layer { + name: "inception_4b/3x3_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3_reduce" +} +layer { + name: "inception_4b/3x3" + type: "Convolution" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 224 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_3x3" + type: "ReLU" + bottom: "inception_4b/3x3" + top: "inception_4b/3x3" +} +layer { + name: "inception_4b/5x5_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5_reduce" +} +layer { + name: "inception_4b/5x5" + type: "Convolution" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_5x5" + type: "ReLU" + bottom: "inception_4b/5x5" + top: "inception_4b/5x5" +} +layer { + name: "inception_4b/pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "inception_4b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4b/pool_proj" + type: "Convolution" + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_pool_proj" + type: "ReLU" + bottom: "inception_4b/pool_proj" + top: "inception_4b/pool_proj" +} +layer { + name: "inception_4b/output" + type: "Concat" + bottom: "inception_4b/1x1" + bottom: "inception_4b/3x3" + bottom: "inception_4b/5x5" + bottom: "inception_4b/pool_proj" + top: "inception_4b/output" +} +layer { + name: "inception_4c/1x1" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_1x1" + type: "ReLU" + bottom: "inception_4c/1x1" + top: "inception_4c/1x1" +} +layer { + name: "inception_4c/3x3_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3_reduce" +} +layer { + name: "inception_4c/3x3" + type: "Convolution" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_3x3" + type: "ReLU" + bottom: "inception_4c/3x3" + top: "inception_4c/3x3" +} +layer { + name: "inception_4c/5x5_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5_reduce" +} +layer { + name: "inception_4c/5x5" + type: "Convolution" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_5x5" + type: "ReLU" + bottom: "inception_4c/5x5" + top: "inception_4c/5x5" +} +layer { + name: "inception_4c/pool" + type: "Pooling" + bottom: "inception_4b/output" + top: "inception_4c/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4c/pool_proj" + type: "Convolution" + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_pool_proj" + type: "ReLU" + bottom: "inception_4c/pool_proj" + top: "inception_4c/pool_proj" +} +layer { + name: "inception_4c/output" + type: "Concat" + bottom: "inception_4c/1x1" + bottom: "inception_4c/3x3" + bottom: "inception_4c/5x5" + bottom: "inception_4c/pool_proj" + top: "inception_4c/output" +} +layer { + name: "inception_4d/1x1" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_1x1" + type: "ReLU" + bottom: "inception_4d/1x1" + top: "inception_4d/1x1" +} +layer { + name: "inception_4d/3x3_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 144 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3_reduce" +} +layer { + name: "inception_4d/3x3" + type: "Convolution" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 288 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_3x3" + type: "ReLU" + bottom: "inception_4d/3x3" + top: "inception_4d/3x3" +} +layer { + name: "inception_4d/5x5_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5_reduce" +} +layer { + name: "inception_4d/5x5" + type: "Convolution" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_5x5" + type: "ReLU" + bottom: "inception_4d/5x5" + top: "inception_4d/5x5" +} +layer { + name: "inception_4d/pool" + type: "Pooling" + bottom: "inception_4c/output" + top: "inception_4d/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4d/pool_proj" + type: "Convolution" + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_pool_proj" + type: "ReLU" + bottom: "inception_4d/pool_proj" + top: "inception_4d/pool_proj" +} +layer { + name: "inception_4d/output" + type: "Concat" + bottom: "inception_4d/1x1" + bottom: "inception_4d/3x3" + bottom: "inception_4d/5x5" + bottom: "inception_4d/pool_proj" + top: "inception_4d/output" +} +layer { + name: "loss2/ave_pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "loss2/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss2/conv" + type: "Convolution" + bottom: "loss2/ave_pool" + top: "loss2/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss2/relu_conv" + type: "ReLU" + bottom: "loss2/conv" + top: "loss2/conv" +} +layer { + name: "loss2/fc" + type: "InnerProduct" + bottom: "loss2/conv" + top: "loss2/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss2/relu_fc" + type: "ReLU" + bottom: "loss2/fc" + top: "loss2/fc" +} +layer { + name: "loss2/drop_fc" + type: "Dropout" + bottom: "loss2/fc" + top: "loss2/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss2/classifier" + type: "InnerProduct" + bottom: "loss2/fc" + top: "loss2/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss2/loss" + type: "SoftmaxWithLoss" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss1" + loss_weight: 0.3 +} +layer { + name: "loss2/top-1" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-1" + include { + phase: TEST + } +} +layer { + name: "loss2/top-5" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4e/1x1" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_1x1" + type: "ReLU" + bottom: "inception_4e/1x1" + top: "inception_4e/1x1" +} +layer { + name: "inception_4e/3x3_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3_reduce" +} +layer { + name: "inception_4e/3x3" + type: "Convolution" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_3x3" + type: "ReLU" + bottom: "inception_4e/3x3" + top: "inception_4e/3x3" +} +layer { + name: "inception_4e/5x5_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5_reduce" +} +layer { + name: "inception_4e/5x5" + type: "Convolution" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_5x5" + type: "ReLU" + bottom: "inception_4e/5x5" + top: "inception_4e/5x5" +} +layer { + name: "inception_4e/pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "inception_4e/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4e/pool_proj" + type: "Convolution" + bottom: "inception_4e/pool" + top: "inception_4e/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_pool_proj" + type: "ReLU" + bottom: "inception_4e/pool_proj" + top: "inception_4e/pool_proj" +} +layer { + name: "inception_4e/output" + type: "Concat" + bottom: "inception_4e/1x1" + bottom: "inception_4e/3x3" + bottom: "inception_4e/5x5" + bottom: "inception_4e/pool_proj" + top: "inception_4e/output" +} +layer { + name: "pool4/3x3_s2" + type: "Pooling" + bottom: "inception_4e/output" + top: "pool4/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_5a/1x1" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_1x1" + type: "ReLU" + bottom: "inception_5a/1x1" + top: "inception_5a/1x1" +} +layer { + name: "inception_5a/3x3_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3_reduce" +} +layer { + name: "inception_5a/3x3" + type: "Convolution" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_3x3" + type: "ReLU" + bottom: "inception_5a/3x3" + top: "inception_5a/3x3" +} +layer { + name: "inception_5a/5x5_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5_reduce" +} +layer { + name: "inception_5a/5x5" + type: "Convolution" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_5x5" + type: "ReLU" + bottom: "inception_5a/5x5" + top: "inception_5a/5x5" +} +layer { + name: "inception_5a/pool" + type: "Pooling" + bottom: "pool4/3x3_s2" + top: "inception_5a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5a/pool_proj" + type: "Convolution" + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_pool_proj" + type: "ReLU" + bottom: "inception_5a/pool_proj" + top: "inception_5a/pool_proj" +} +layer { + name: "inception_5a/output" + type: "Concat" + bottom: "inception_5a/1x1" + bottom: "inception_5a/3x3" + bottom: "inception_5a/5x5" + bottom: "inception_5a/pool_proj" + top: "inception_5a/output" +} +layer { + name: "inception_5b/1x1" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_1x1" + type: "ReLU" + bottom: "inception_5b/1x1" + top: "inception_5b/1x1" +} +layer { + name: "inception_5b/3x3_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3_reduce" +} +layer { + name: "inception_5b/3x3" + type: "Convolution" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_3x3" + type: "ReLU" + bottom: "inception_5b/3x3" + top: "inception_5b/3x3" +} +layer { + name: "inception_5b/5x5_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5_reduce" +} +layer { + name: "inception_5b/5x5" + type: "Convolution" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_5x5" + type: "ReLU" + bottom: "inception_5b/5x5" + top: "inception_5b/5x5" +} +layer { + name: "inception_5b/pool" + type: "Pooling" + bottom: "inception_5a/output" + top: "inception_5b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5b/pool_proj" + type: "Convolution" + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_pool_proj" + type: "ReLU" + bottom: "inception_5b/pool_proj" + top: "inception_5b/pool_proj" +} +layer { + name: "inception_5b/output" + type: "Concat" + bottom: "inception_5b/1x1" + bottom: "inception_5b/3x3" + bottom: "inception_5b/5x5" + bottom: "inception_5b/pool_proj" + top: "inception_5b/output" +} +layer { + name: "pool5/7x7_s1" + type: "Pooling" + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + pooling_param { + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + name: "pool5/drop_7x7_s1" + type: "Dropout" + bottom: "pool5/7x7_s1" + top: "pool5/7x7_s1" + dropout_param { + dropout_ratio: 0.4 + } +} +layer { + name: "loss3/classifier" + type: "InnerProduct" + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss3/loss3" + type: "SoftmaxWithLoss" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss3" + loss_weight: 1 +} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-1" + include { + phase: TEST + } +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} diff --git a/models/bvlc_reference_caffenet/solver.prototxt b/models/bvlc_reference_caffenet/solver.prototxt index af1315ba2ac..66f4481e1fa 100644 --- a/models/bvlc_reference_caffenet/solver.prototxt +++ b/models/bvlc_reference_caffenet/solver.prototxt @@ -11,4 +11,4 @@ momentum: 0.9 weight_decay: 0.0005 snapshot: 10000 snapshot_prefix: "models/bvlc_reference_caffenet/caffenet_train" -solver_mode: GPU +solver_mode: CPU diff --git a/models/default_googlenet_v2/solver.prototxt b/models/default_googlenet_v2/solver.prototxt new file mode 100644 index 00000000000..f7ad4084713 --- /dev/null +++ b/models/default_googlenet_v2/solver.prototxt @@ -0,0 +1,16 @@ +net: "models/default_googlenet_v2/train_val.prototxt" +test_iter: 1000 +test_interval: 10000 +test_initialization: false +display: 40 +average_loss: 40 +base_lr: 0.045 +lr_policy: "step" +stepsize: 6400 +gamma: 0.96 +max_iter: 1200000 +momentum: 0.9 +weight_decay: 0.0002 +snapshot: 50000 +snapshot_prefix: "models/default_googlenet_v2/default_googlenet_v2" +solver_mode: CPU diff --git a/models/default_googlenet_v2/train_val.prototxt b/models/default_googlenet_v2/train_val.prototxt new file mode 100644 index 00000000000..b4cc799cd42 --- /dev/null +++ b/models/default_googlenet_v2/train_val.prototxt @@ -0,0 +1,4044 @@ +# Inception Network (GoogLeNet Batch Normalization Network) +name: "InceptionNetwork" +### Training Set +layer { + top: "data" + top: "label" + name: "data" + type: "Data" + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 32 + backend: LMDB +# shuffle: true + } + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 +# mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + mean_value: 104 + mean_value: 117 + mean_value: 123 + } +} +### Validation Set +layer { + top: "data" + top: "label" + name: "data" + type: "Data" + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 32 + backend: LMDB + } + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 +# mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + mean_value: 104 + mean_value: 117 + mean_value: 123 + } +} + +layer { + bottom: "data" + top: "conv1/7x7_s2" + name: "conv1/7x7_s2" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv1/7x7_s2" + name: "conv1/7x7_s2/bn" + top: "conv1/7x7_s2/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv1/7x7_s2/bn" + top: "conv1/7x7_s2/bn/sc" + name: "conv1/7x7_s2/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv1/7x7_s2/bn/sc" + top: "conv1/7x7_s2/bn/sc" + name: "conv1/7x7_s2/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv1/7x7_s2/bn/sc" + top: "pool1/3x3_s2" + name: "pool1/3x3_s2" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "pool1/3x3_s2" + top: "conv2/3x3_reduce" + name: "conv2/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv2/3x3_reduce" + name: "conv2/3x3_reduce/bn" + top: "conv2/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv2/3x3_reduce/bn" + top: "conv2/3x3_reduce/bn/sc" + name: "conv2/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv2/3x3_reduce/bn/sc" + top: "conv2/3x3_reduce/bn/sc" + name: "conv2/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2/3x3_reduce/bn/sc" + top: "conv2/3x3" + name: "conv2/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv2/3x3" + name: "conv2/3x3/bn" + top: "conv2/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv2/3x3/bn" + top: "conv2/3x3/bn/sc" + name: "conv2/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv2/3x3/bn/sc" + top: "conv2/3x3/bn/sc" + name: "conv2/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2/3x3/bn/sc" + top: "pool2/3x3_s2" + name: "pool2/3x3_s2" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + name: "inception_3a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/1x1" + name: "inception_3a/1x1/bn" + top: "inception_3a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/1x1/bn" + top: "inception_3a/1x1/bn/sc" + name: "inception_3a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/1x1/bn/sc" + top: "inception_3a/1x1/bn/sc" + name: "inception_3a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + name: "inception_3a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/3x3_reduce" + name: "inception_3a/3x3_reduce/bn" + top: "inception_3a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn" + top: "inception_3a/3x3_reduce/bn/sc" + name: "inception_3a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn/sc" + top: "inception_3a/3x3_reduce/bn/sc" + name: "inception_3a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn/sc" + top: "inception_3a/3x3" + name: "inception_3a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/3x3" + name: "inception_3a/3x3/bn" + top: "inception_3a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/3x3/bn" + top: "inception_3a/3x3/bn/sc" + name: "inception_3a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/3x3/bn/sc" + top: "inception_3a/3x3/bn/sc" + name: "inception_3a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/double3x3_reduce" + name: "inception_3a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3_reduce" + name: "inception_3a/double3x3_reduce/bn" + top: "inception_3a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn" + top: "inception_3a/double3x3_reduce/bn/sc" + name: "inception_3a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn/sc" + top: "inception_3a/double3x3_reduce/bn/sc" + name: "inception_3a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn/sc" + top: "inception_3a/double3x3a" + name: "inception_3a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3a" + name: "inception_3a/double3x3a/bn" + top: "inception_3a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3a/bn" + top: "inception_3a/double3x3a/bn/sc" + name: "inception_3a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3a/bn/sc" + top: "inception_3a/double3x3a/bn/sc" + name: "inception_3a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/double3x3a/bn/sc" + top: "inception_3a/double3x3b" + name: "inception_3a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3b" + name: "inception_3a/double3x3b/bn" + top: "inception_3a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3b/bn" + top: "inception_3a/double3x3b/bn/sc" + name: "inception_3a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3b/bn/sc" + top: "inception_3a/double3x3b/bn/sc" + name: "inception_3a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + name: "inception_3a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + name: "inception_3a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 32 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/pool_proj" + name: "inception_3a/pool_proj/bn" + top: "inception_3a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/pool_proj/bn" + top: "inception_3a/pool_proj/bn/sc" + name: "inception_3a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/pool_proj/bn/sc" + top: "inception_3a/pool_proj/bn/sc" + name: "inception_3a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/1x1/bn/sc" + bottom: "inception_3a/3x3/bn/sc" + bottom: "inception_3a/double3x3b/bn/sc" + bottom: "inception_3a/pool_proj/bn/sc" + top: "inception_3a/output" + name: "inception_3a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/1x1" + name: "inception_3b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/1x1" + name: "inception_3b/1x1/bn" + top: "inception_3b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/1x1/bn" + top: "inception_3b/1x1/bn/sc" + name: "inception_3b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/1x1/bn/sc" + top: "inception_3b/1x1/bn/sc" + name: "inception_3b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + name: "inception_3b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/3x3_reduce" + name: "inception_3b/3x3_reduce/bn" + top: "inception_3b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn" + top: "inception_3b/3x3_reduce/bn/sc" + name: "inception_3b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn/sc" + top: "inception_3b/3x3_reduce/bn/sc" + name: "inception_3b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn/sc" + top: "inception_3b/3x3" + name: "inception_3b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/3x3" + name: "inception_3b/3x3/bn" + top: "inception_3b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/3x3/bn" + top: "inception_3b/3x3/bn/sc" + name: "inception_3b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/3x3/bn/sc" + top: "inception_3b/3x3/bn/sc" + name: "inception_3b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/double3x3_reduce" + name: "inception_3b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3_reduce" + name: "inception_3b/double3x3_reduce/bn" + top: "inception_3b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn" + top: "inception_3b/double3x3_reduce/bn/sc" + name: "inception_3b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn/sc" + top: "inception_3b/double3x3_reduce/bn/sc" + name: "inception_3b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn/sc" + top: "inception_3b/double3x3a" + name: "inception_3b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3a" + name: "inception_3b/double3x3a/bn" + top: "inception_3b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3a/bn" + top: "inception_3b/double3x3a/bn/sc" + name: "inception_3b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3a/bn/sc" + top: "inception_3b/double3x3a/bn/sc" + name: "inception_3b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/double3x3a/bn/sc" + top: "inception_3b/double3x3b" + name: "inception_3b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3b" + name: "inception_3b/double3x3b/bn" + top: "inception_3b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3b/bn" + top: "inception_3b/double3x3b/bn/sc" + name: "inception_3b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3b/bn/sc" + top: "inception_3b/double3x3b/bn/sc" + name: "inception_3b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/pool" + name: "inception_3b/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + name: "inception_3b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/pool_proj" + name: "inception_3b/pool_proj/bn" + top: "inception_3b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/pool_proj/bn" + top: "inception_3b/pool_proj/bn/sc" + name: "inception_3b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/pool_proj/bn/sc" + top: "inception_3b/pool_proj/bn/sc" + name: "inception_3b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/1x1/bn/sc" + bottom: "inception_3b/3x3/bn/sc" + bottom: "inception_3b/double3x3b/bn/sc" + bottom: "inception_3b/pool_proj/bn/sc" + top: "inception_3b/output" + name: "inception_3b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/3x3_reduce" + name: "inception_3c/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/3x3_reduce" + name: "inception_3c/3x3_reduce/bn" + top: "inception_3c/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn" + top: "inception_3c/3x3_reduce/bn/sc" + name: "inception_3c/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn/sc" + top: "inception_3c/3x3_reduce/bn/sc" + name: "inception_3c/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn/sc" + top: "inception_3c/3x3" + name: "inception_3c/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/3x3" + name: "inception_3c/3x3/bn" + top: "inception_3c/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/3x3/bn" + top: "inception_3c/3x3/bn/sc" + name: "inception_3c/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/3x3/bn/sc" + top: "inception_3c/3x3/bn/sc" + name: "inception_3c/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/double3x3_reduce" + name: "inception_3c/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3_reduce" + name: "inception_3c/double3x3_reduce/bn" + top: "inception_3c/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn" + top: "inception_3c/double3x3_reduce/bn/sc" + name: "inception_3c/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn/sc" + top: "inception_3c/double3x3_reduce/bn/sc" + name: "inception_3c/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn/sc" + top: "inception_3c/double3x3a" + name: "inception_3c/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3a" + name: "inception_3c/double3x3a/bn" + top: "inception_3c/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3a/bn" + top: "inception_3c/double3x3a/bn/sc" + name: "inception_3c/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3a/bn/sc" + top: "inception_3c/double3x3a/bn/sc" + name: "inception_3c/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/double3x3a/bn/sc" + top: "inception_3c/double3x3b" + name: "inception_3c/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3b" + name: "inception_3c/double3x3b/bn" + top: "inception_3c/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3b/bn" + top: "inception_3c/double3x3b/bn/sc" + name: "inception_3c/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3b/bn/sc" + top: "inception_3c/double3x3b/bn/sc" + name: "inception_3c/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/pool" + name: "inception_3c/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "inception_3c/3x3/bn/sc" + bottom: "inception_3c/double3x3b/bn/sc" + bottom: "inception_3c/pool" + top: "inception_3c/output" + name: "inception_3c/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "pool3/5x5_s3" + name: "pool3/5x5_s3" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + bottom: "pool3/5x5_s3" + top: "loss1/conv" + name: "loss1/conv" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss1/conv" + name: "loss1/conv/bn" + top: "loss1/conv/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss1/conv/bn" + top: "loss1/conv/bn/sc" + name: "loss1/conv/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss1/conv/bn/sc" + top: "loss1/conv/bn/sc" + name: "loss1/conv/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss1/conv/bn/sc" + top: "loss1/fc" + name: "loss1/fc" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss1/fc" + name: "loss1/fc/bn" + top: "loss1/fc/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss1/fc/bn" + top: "loss1/fc/bn/sc" + name: "loss1/fc/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss1/fc/bn/sc" + top: "loss1/fc/bn/sc" + name: "loss1/fc/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss1/fc/bn/sc" + top: "loss1/classifier" + name: "loss1/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss" + name: "loss1/loss" + type: "SoftmaxWithLoss" + loss_weight: 0.3 +} +layer { + bottom: "loss1/classifier" + top: "loss1/prob" + name: "loss1/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss1/prob" + bottom: "label" + top: "loss1/top-1" + name: "loss1/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss1/prob" + bottom: "label" + top: "loss1/top-5" + name: "loss1/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/1x1" + name: "inception_4a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/1x1" + name: "inception_4a/1x1/bn" + top: "inception_4a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/1x1/bn" + top: "inception_4a/1x1/bn/sc" + name: "inception_4a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/1x1/bn/sc" + top: "inception_4a/1x1/bn/sc" + name: "inception_4a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/3x3_reduce" + name: "inception_4a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/3x3_reduce" + name: "inception_4a/3x3_reduce/bn" + top: "inception_4a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn" + top: "inception_4a/3x3_reduce/bn/sc" + name: "inception_4a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn/sc" + top: "inception_4a/3x3_reduce/bn/sc" + name: "inception_4a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn/sc" + top: "inception_4a/3x3" + name: "inception_4a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/3x3" + name: "inception_4a/3x3/bn" + top: "inception_4a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/3x3/bn" + top: "inception_4a/3x3/bn/sc" + name: "inception_4a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/3x3/bn/sc" + top: "inception_4a/3x3/bn/sc" + name: "inception_4a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/double3x3_reduce" + name: "inception_4a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3_reduce" + name: "inception_4a/double3x3_reduce/bn" + top: "inception_4a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn" + top: "inception_4a/double3x3_reduce/bn/sc" + name: "inception_4a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn/sc" + top: "inception_4a/double3x3_reduce/bn/sc" + name: "inception_4a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn/sc" + top: "inception_4a/double3x3a" + name: "inception_4a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3a" + name: "inception_4a/double3x3a/bn" + top: "inception_4a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3a/bn" + top: "inception_4a/double3x3a/bn/sc" + name: "inception_4a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3a/bn/sc" + top: "inception_4a/double3x3a/bn/sc" + name: "inception_4a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/double3x3a/bn/sc" + top: "inception_4a/double3x3b" + name: "inception_4a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3b" + name: "inception_4a/double3x3b/bn" + top: "inception_4a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3b/bn" + top: "inception_4a/double3x3b/bn/sc" + name: "inception_4a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3b/bn/sc" + top: "inception_4a/double3x3b/bn/sc" + name: "inception_4a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/pool" + name: "inception_4a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + name: "inception_4a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/pool_proj" + name: "inception_4a/pool_proj/bn" + top: "inception_4a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/pool_proj/bn" + top: "inception_4a/pool_proj/bn/sc" + name: "inception_4a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/pool_proj/bn/sc" + top: "inception_4a/pool_proj/bn/sc" + name: "inception_4a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/1x1/bn/sc" + bottom: "inception_4a/3x3/bn/sc" + bottom: "inception_4a/double3x3b/bn/sc" + bottom: "inception_4a/pool_proj/bn/sc" + top: "inception_4a/output" + name: "inception_4a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/1x1" + name: "inception_4b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/1x1" + name: "inception_4b/1x1/bn" + top: "inception_4b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/1x1/bn" + top: "inception_4b/1x1/bn/sc" + name: "inception_4b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/1x1/bn/sc" + top: "inception_4b/1x1/bn/sc" + name: "inception_4b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + name: "inception_4b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/3x3_reduce" + name: "inception_4b/3x3_reduce/bn" + top: "inception_4b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn" + top: "inception_4b/3x3_reduce/bn/sc" + name: "inception_4b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn/sc" + top: "inception_4b/3x3_reduce/bn/sc" + name: "inception_4b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn/sc" + top: "inception_4b/3x3" + name: "inception_4b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/3x3" + name: "inception_4b/3x3/bn" + top: "inception_4b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/3x3/bn" + top: "inception_4b/3x3/bn/sc" + name: "inception_4b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/3x3/bn/sc" + top: "inception_4b/3x3/bn/sc" + name: "inception_4b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/double3x3_reduce" + name: "inception_4b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3_reduce" + name: "inception_4b/double3x3_reduce/bn" + top: "inception_4b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn" + top: "inception_4b/double3x3_reduce/bn/sc" + name: "inception_4b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn/sc" + top: "inception_4b/double3x3_reduce/bn/sc" + name: "inception_4b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn/sc" + top: "inception_4b/double3x3a" + name: "inception_4b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3a" + name: "inception_4b/double3x3a/bn" + top: "inception_4b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3a/bn" + top: "inception_4b/double3x3a/bn/sc" + name: "inception_4b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3a/bn/sc" + top: "inception_4b/double3x3a/bn/sc" + name: "inception_4b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/double3x3a/bn/sc" + top: "inception_4b/double3x3b" + name: "inception_4b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3b" + name: "inception_4b/double3x3b/bn" + top: "inception_4b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3b/bn" + top: "inception_4b/double3x3b/bn/sc" + name: "inception_4b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3b/bn/sc" + top: "inception_4b/double3x3b/bn/sc" + name: "inception_4b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/pool" + name: "inception_4b/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + name: "inception_4b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/pool_proj" + name: "inception_4b/pool_proj/bn" + top: "inception_4b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/pool_proj/bn" + top: "inception_4b/pool_proj/bn/sc" + name: "inception_4b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/pool_proj/bn/sc" + top: "inception_4b/pool_proj/bn/sc" + name: "inception_4b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/1x1/bn/sc" + bottom: "inception_4b/3x3/bn/sc" + bottom: "inception_4b/double3x3b/bn/sc" + bottom: "inception_4b/pool_proj/bn/sc" + top: "inception_4b/output" + name: "inception_4b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/1x1" + name: "inception_4c/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/1x1" + name: "inception_4c/1x1/bn" + top: "inception_4c/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/1x1/bn" + top: "inception_4c/1x1/bn/sc" + name: "inception_4c/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/1x1/bn/sc" + top: "inception_4c/1x1/bn/sc" + name: "inception_4c/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + name: "inception_4c/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/3x3_reduce" + name: "inception_4c/3x3_reduce/bn" + top: "inception_4c/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn" + top: "inception_4c/3x3_reduce/bn/sc" + name: "inception_4c/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn/sc" + top: "inception_4c/3x3_reduce/bn/sc" + name: "inception_4c/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn/sc" + top: "inception_4c/3x3" + name: "inception_4c/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/3x3" + name: "inception_4c/3x3/bn" + top: "inception_4c/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/3x3/bn" + top: "inception_4c/3x3/bn/sc" + name: "inception_4c/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/3x3/bn/sc" + top: "inception_4c/3x3/bn/sc" + name: "inception_4c/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/double3x3_reduce" + name: "inception_4c/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3_reduce" + name: "inception_4c/double3x3_reduce/bn" + top: "inception_4c/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn" + top: "inception_4c/double3x3_reduce/bn/sc" + name: "inception_4c/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn/sc" + top: "inception_4c/double3x3_reduce/bn/sc" + name: "inception_4c/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn/sc" + top: "inception_4c/double3x3a" + name: "inception_4c/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3a" + name: "inception_4c/double3x3a/bn" + top: "inception_4c/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3a/bn" + top: "inception_4c/double3x3a/bn/sc" + name: "inception_4c/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3a/bn/sc" + top: "inception_4c/double3x3a/bn/sc" + name: "inception_4c/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/double3x3a/bn/sc" + top: "inception_4c/double3x3b" + name: "inception_4c/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3b" + name: "inception_4c/double3x3b/bn" + top: "inception_4c/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3b/bn" + top: "inception_4c/double3x3b/bn/sc" + name: "inception_4c/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3b/bn/sc" + top: "inception_4c/double3x3b/bn/sc" + name: "inception_4c/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/pool" + name: "inception_4c/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + name: "inception_4c/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/pool_proj" + name: "inception_4c/pool_proj/bn" + top: "inception_4c/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/pool_proj/bn" + top: "inception_4c/pool_proj/bn/sc" + name: "inception_4c/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/pool_proj/bn/sc" + top: "inception_4c/pool_proj/bn/sc" + name: "inception_4c/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/1x1/bn/sc" + bottom: "inception_4c/3x3/bn/sc" + bottom: "inception_4c/double3x3b/bn/sc" + bottom: "inception_4c/pool_proj/bn/sc" + top: "inception_4c/output" + name: "inception_4c/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/1x1" + name: "inception_4d/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/1x1" + name: "inception_4d/1x1/bn" + top: "inception_4d/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/1x1/bn" + top: "inception_4d/1x1/bn/sc" + name: "inception_4d/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/1x1/bn/sc" + top: "inception_4d/1x1/bn/sc" + name: "inception_4d/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + name: "inception_4d/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/3x3_reduce" + name: "inception_4d/3x3_reduce/bn" + top: "inception_4d/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn" + top: "inception_4d/3x3_reduce/bn/sc" + name: "inception_4d/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn/sc" + top: "inception_4d/3x3_reduce/bn/sc" + name: "inception_4d/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn/sc" + top: "inception_4d/3x3" + name: "inception_4d/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/3x3" + name: "inception_4d/3x3/bn" + top: "inception_4d/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/3x3/bn" + top: "inception_4d/3x3/bn/sc" + name: "inception_4d/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/3x3/bn/sc" + top: "inception_4d/3x3/bn/sc" + name: "inception_4d/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/double3x3_reduce" + name: "inception_4d/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3_reduce" + name: "inception_4d/double3x3_reduce/bn" + top: "inception_4d/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn" + top: "inception_4d/double3x3_reduce/bn/sc" + name: "inception_4d/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn/sc" + top: "inception_4d/double3x3_reduce/bn/sc" + name: "inception_4d/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn/sc" + top: "inception_4d/double3x3a" + name: "inception_4d/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3a" + name: "inception_4d/double3x3a/bn" + top: "inception_4d/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3a/bn" + top: "inception_4d/double3x3a/bn/sc" + name: "inception_4d/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3a/bn/sc" + top: "inception_4d/double3x3a/bn/sc" + name: "inception_4d/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/double3x3a/bn/sc" + top: "inception_4d/double3x3b" + name: "inception_4d/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3b" + name: "inception_4d/double3x3b/bn" + top: "inception_4d/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3b/bn" + top: "inception_4d/double3x3b/bn/sc" + name: "inception_4d/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3b/bn/sc" + top: "inception_4d/double3x3b/bn/sc" + name: "inception_4d/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/pool" + name: "inception_4d/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + name: "inception_4d/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/pool_proj" + name: "inception_4d/pool_proj/bn" + top: "inception_4d/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/pool_proj/bn" + top: "inception_4d/pool_proj/bn/sc" + name: "inception_4d/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/pool_proj/bn/sc" + top: "inception_4d/pool_proj/bn/sc" + name: "inception_4d/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/1x1/bn/sc" + bottom: "inception_4d/3x3/bn/sc" + bottom: "inception_4d/double3x3b/bn/sc" + bottom: "inception_4d/pool_proj/bn/sc" + top: "inception_4d/output" + name: "inception_4d/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + name: "inception_4e/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/3x3_reduce" + name: "inception_4e/3x3_reduce/bn" + top: "inception_4e/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn" + top: "inception_4e/3x3_reduce/bn/sc" + name: "inception_4e/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn/sc" + top: "inception_4e/3x3_reduce/bn/sc" + name: "inception_4e/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn/sc" + top: "inception_4e/3x3" + name: "inception_4e/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/3x3" + name: "inception_4e/3x3/bn" + top: "inception_4e/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/3x3/bn" + top: "inception_4e/3x3/bn/sc" + name: "inception_4e/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/3x3/bn/sc" + top: "inception_4e/3x3/bn/sc" + name: "inception_4e/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/double3x3_reduce" + name: "inception_4e/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3_reduce" + name: "inception_4e/double3x3_reduce/bn" + top: "inception_4e/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn" + top: "inception_4e/double3x3_reduce/bn/sc" + name: "inception_4e/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn/sc" + top: "inception_4e/double3x3_reduce/bn/sc" + name: "inception_4e/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn/sc" + top: "inception_4e/double3x3a" + name: "inception_4e/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3a" + name: "inception_4e/double3x3a/bn" + top: "inception_4e/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3a/bn" + top: "inception_4e/double3x3a/bn/sc" + name: "inception_4e/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3a/bn/sc" + top: "inception_4e/double3x3a/bn/sc" + name: "inception_4e/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/double3x3a/bn/sc" + top: "inception_4e/double3x3b" + name: "inception_4e/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3b" + name: "inception_4e/double3x3b/bn" + top: "inception_4e/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3b/bn" + top: "inception_4e/double3x3b/bn/sc" + name: "inception_4e/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3b/bn/sc" + top: "inception_4e/double3x3b/bn/sc" + name: "inception_4e/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/pool" + name: "inception_4e/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "inception_4e/3x3/bn/sc" + bottom: "inception_4e/double3x3b/bn/sc" + bottom: "inception_4e/pool" + top: "inception_4e/output" + name: "inception_4e/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "pool4/5x5_s3" + name: "pool4/5x5_s3" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + bottom: "pool4/5x5_s3" + top: "loss2/conv" + name: "loss2/conv" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss2/conv" + name: "loss2/conv/bn" + top: "loss2/conv/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss2/conv/bn" + top: "loss2/conv/bn/sc" + name: "loss2/conv/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss2/conv/bn/sc" + top: "loss2/conv/bn/sc" + name: "loss2/conv/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss2/conv/bn/sc" + top: "loss2/fc" + name: "loss2/fc" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss2/fc" + name: "loss2/fc/bn" + top: "loss2/fc/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss2/fc/bn" + top: "loss2/fc/bn/sc" + name: "loss2/fc/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss2/fc/bn/sc" + top: "loss2/fc/bn/sc" + name: "loss2/fc/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss2/fc/bn/sc" + top: "loss2/classifier" + name: "loss2/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss" + name: "loss2/loss" + type: "SoftmaxWithLoss" + loss_weight: 0.3 +} +layer { + bottom: "loss2/classifier" + top: "loss2/prob" + name: "loss2/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss2/prob" + bottom: "label" + top: "loss2/top-1" + name: "loss2/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss2/prob" + bottom: "label" + top: "loss2/top-5" + name: "loss2/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/1x1" + name: "inception_5a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 352 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/1x1" + name: "inception_5a/1x1/bn" + top: "inception_5a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/1x1/bn" + top: "inception_5a/1x1/bn/sc" + name: "inception_5a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/1x1/bn/sc" + top: "inception_5a/1x1/bn/sc" + name: "inception_5a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/3x3_reduce" + name: "inception_5a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/3x3_reduce" + name: "inception_5a/3x3_reduce/bn" + top: "inception_5a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn" + top: "inception_5a/3x3_reduce/bn/sc" + name: "inception_5a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn/sc" + top: "inception_5a/3x3_reduce/bn/sc" + name: "inception_5a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn/sc" + top: "inception_5a/3x3" + name: "inception_5a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 320 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/3x3" + name: "inception_5a/3x3/bn" + top: "inception_5a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/3x3/bn" + top: "inception_5a/3x3/bn/sc" + name: "inception_5a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/3x3/bn/sc" + top: "inception_5a/3x3/bn/sc" + name: "inception_5a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/double3x3_reduce" + name: "inception_5a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3_reduce" + name: "inception_5a/double3x3_reduce/bn" + top: "inception_5a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn" + top: "inception_5a/double3x3_reduce/bn/sc" + name: "inception_5a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn/sc" + top: "inception_5a/double3x3_reduce/bn/sc" + name: "inception_5a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn/sc" + top: "inception_5a/double3x3a" + name: "inception_5a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3a" + name: "inception_5a/double3x3a/bn" + top: "inception_5a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3a/bn" + top: "inception_5a/double3x3a/bn/sc" + name: "inception_5a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3a/bn/sc" + top: "inception_5a/double3x3a/bn/sc" + name: "inception_5a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/double3x3a/bn/sc" + top: "inception_5a/double3x3b" + name: "inception_5a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3b" + name: "inception_5a/double3x3b/bn" + top: "inception_5a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3b/bn" + top: "inception_5a/double3x3b/bn/sc" + name: "inception_5a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3b/bn/sc" + top: "inception_5a/double3x3b/bn/sc" + name: "inception_5a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/pool" + name: "inception_5a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + name: "inception_5a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/pool_proj" + name: "inception_5a/pool_proj/bn" + top: "inception_5a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/pool_proj/bn" + top: "inception_5a/pool_proj/bn/sc" + name: "inception_5a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/pool_proj/bn/sc" + top: "inception_5a/pool_proj/bn/sc" + name: "inception_5a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/1x1/bn/sc" + bottom: "inception_5a/3x3/bn/sc" + bottom: "inception_5a/double3x3b/bn/sc" + bottom: "inception_5a/pool_proj/bn/sc" + top: "inception_5a/output" + name: "inception_5a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/1x1" + name: "inception_5b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 352 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/1x1" + name: "inception_5b/1x1/bn" + top: "inception_5b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/1x1/bn" + top: "inception_5b/1x1/bn/sc" + name: "inception_5b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/1x1/bn/sc" + top: "inception_5b/1x1/bn/sc" + name: "inception_5b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + name: "inception_5b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/3x3_reduce" + name: "inception_5b/3x3_reduce/bn" + top: "inception_5b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn" + top: "inception_5b/3x3_reduce/bn/sc" + name: "inception_5b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn/sc" + top: "inception_5b/3x3_reduce/bn/sc" + name: "inception_5b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn/sc" + top: "inception_5b/3x3" + name: "inception_5b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 320 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/3x3" + name: "inception_5b/3x3/bn" + top: "inception_5b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/3x3/bn" + top: "inception_5b/3x3/bn/sc" + name: "inception_5b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/3x3/bn/sc" + top: "inception_5b/3x3/bn/sc" + name: "inception_5b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/double3x3_reduce" + name: "inception_5b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3_reduce" + name: "inception_5b/double3x3_reduce/bn" + top: "inception_5b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn" + top: "inception_5b/double3x3_reduce/bn/sc" + name: "inception_5b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn/sc" + top: "inception_5b/double3x3_reduce/bn/sc" + name: "inception_5b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn/sc" + top: "inception_5b/double3x3a" + name: "inception_5b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3a" + name: "inception_5b/double3x3a/bn" + top: "inception_5b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3a/bn" + top: "inception_5b/double3x3a/bn/sc" + name: "inception_5b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3a/bn/sc" + top: "inception_5b/double3x3a/bn/sc" + name: "inception_5b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/double3x3a/bn/sc" + top: "inception_5b/double3x3b" + name: "inception_5b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3b" + name: "inception_5b/double3x3b/bn" + top: "inception_5b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3b/bn" + top: "inception_5b/double3x3b/bn/sc" + name: "inception_5b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3b/bn/sc" + top: "inception_5b/double3x3b/bn/sc" + name: "inception_5b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/pool" + name: "inception_5b/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + name: "inception_5b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/pool_proj" + name: "inception_5b/pool_proj/bn" + top: "inception_5b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/pool_proj/bn" + top: "inception_5b/pool_proj/bn/sc" + name: "inception_5b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/pool_proj/bn/sc" + top: "inception_5b/pool_proj/bn/sc" + name: "inception_5b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/1x1/bn/sc" + bottom: "inception_5b/3x3/bn/sc" + bottom: "inception_5b/double3x3b/bn/sc" + bottom: "inception_5b/pool_proj/bn/sc" + top: "inception_5b/output" + name: "inception_5b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + name: "pool5/7x7_s1" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + name: "loss3/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss" + name: "loss3/loss" + type: "SoftmaxWithLoss" + loss_weight: 1 +} +layer { + bottom: "loss3/classifier" + top: "loss3/prob" + name: "loss3/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss3/prob" + bottom: "label" + top: "loss3/top-1" + name: "loss3/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss3/prob" + bottom: "label" + top: "loss3/top-5" + name: "loss3/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} diff --git a/models/default_resnet_50/LICENSE b/models/default_resnet_50/LICENSE new file mode 100644 index 00000000000..3a514d5cc33 --- /dev/null +++ b/models/default_resnet_50/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Shaoqing Ren + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/models/default_resnet_50/solver.prototxt b/models/default_resnet_50/solver.prototxt new file mode 100644 index 00000000000..e736f5069a1 --- /dev/null +++ b/models/default_resnet_50/solver.prototxt @@ -0,0 +1,16 @@ +net: "models/default_resnet_50/train_val.prototxt" +test_iter: 100 +test_interval: 10000 +test_initialization: false +display: 40 +average_loss: 40 +base_lr: 0.1 +lr_policy: "step" +stepsize: 320000 +gamma: 0.96 +max_iter: 600000 +momentum: 0.9 +weight_decay: 0.0001 +snapshot: 50000 +snapshot_prefix: "models/default_resnet_50/default_resnet_50" +solver_mode: CPU diff --git a/models/default_resnet_50/train_val.prototxt b/models/default_resnet_50/train_val.prototxt new file mode 100644 index 00000000000..9b251c7f4f0 --- /dev/null +++ b/models/default_resnet_50/train_val.prototxt @@ -0,0 +1,3048 @@ +name: "ResNet-50" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 50 + backend: LMDB + prefetch: 2 + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} + +layer { + bottom: "data" + top: "conv1" + name: "conv1" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 7 + pad: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "conv1" + top: "conv1" + name: "bn_conv1" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "conv1" + top: "conv1" + name: "scale_conv1" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "conv1" + top: "conv1" + name: "conv1_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "conv1" + top: "pool1" + name: "pool1" + type: "Pooling" + pooling_param { + + kernel_size: 3 + stride: 2 + pool: MAX + } +} + +layer { + bottom: "pool1" + top: "res2a_branch1" + name: "res2a_branch1" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2a_branch1" + top: "res2a_branch1" + name: "bn2a_branch1" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2a_branch1" + top: "res2a_branch1" + name: "scale2a_branch1" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "pool1" + top: "res2a_branch2a" + name: "res2a_branch2a" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2a_branch2a" + top: "res2a_branch2a" + name: "bn2a_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2a_branch2a" + top: "res2a_branch2a" + name: "scale2a_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2a_branch2a" + top: "res2a_branch2a" + name: "res2a_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2a_branch2a" + top: "res2a_branch2b" + name: "res2a_branch2b" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2a_branch2b" + top: "res2a_branch2b" + name: "bn2a_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2a_branch2b" + top: "res2a_branch2b" + name: "scale2a_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2a_branch2b" + top: "res2a_branch2b" + name: "res2a_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2a_branch2b" + top: "res2a_branch2c" + name: "res2a_branch2c" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2a_branch2c" + top: "res2a_branch2c" + name: "bn2a_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2a_branch2c" + top: "res2a_branch2c" + name: "scale2a_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2a_branch1" + bottom: "res2a_branch2c" + top: "res2a" + name: "res2a" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res2a" + top: "res2a" + name: "res2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2a" + top: "res2b_branch2a" + name: "res2b_branch2a" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2b_branch2a" + top: "res2b_branch2a" + name: "bn2b_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2b_branch2a" + top: "res2b_branch2a" + name: "scale2b_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2b_branch2a" + top: "res2b_branch2a" + name: "res2b_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2b_branch2a" + top: "res2b_branch2b" + name: "res2b_branch2b" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2b_branch2b" + top: "res2b_branch2b" + name: "bn2b_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2b_branch2b" + top: "res2b_branch2b" + name: "scale2b_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2b_branch2b" + top: "res2b_branch2b" + name: "res2b_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2b_branch2b" + top: "res2b_branch2c" + name: "res2b_branch2c" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2b_branch2c" + top: "res2b_branch2c" + name: "bn2b_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2b_branch2c" + top: "res2b_branch2c" + name: "scale2b_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2a" + bottom: "res2b_branch2c" + top: "res2b" + name: "res2b" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res2b" + top: "res2b" + name: "res2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2b" + top: "res2c_branch2a" + name: "res2c_branch2a" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2c_branch2a" + top: "res2c_branch2a" + name: "bn2c_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2c_branch2a" + top: "res2c_branch2a" + name: "scale2c_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2c_branch2a" + top: "res2c_branch2a" + name: "res2c_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2c_branch2a" + top: "res2c_branch2b" + name: "res2c_branch2b" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2c_branch2b" + top: "res2c_branch2b" + name: "bn2c_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2c_branch2b" + top: "res2c_branch2b" + name: "scale2c_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2c_branch2b" + top: "res2c_branch2b" + name: "res2c_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2c_branch2b" + top: "res2c_branch2c" + name: "res2c_branch2c" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res2c_branch2c" + top: "res2c_branch2c" + name: "bn2c_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res2c_branch2c" + top: "res2c_branch2c" + name: "scale2c_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2b" + bottom: "res2c_branch2c" + top: "res2c" + name: "res2c" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res2c" + top: "res2c" + name: "res2c_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2c" + top: "res3a_branch1" + name: "res3a_branch1" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3a_branch1" + top: "res3a_branch1" + name: "bn3a_branch1" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3a_branch1" + top: "res3a_branch1" + name: "scale3a_branch1" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2c" + top: "res3a_branch2a" + name: "res3a_branch2a" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3a_branch2a" + top: "res3a_branch2a" + name: "bn3a_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3a_branch2a" + top: "res3a_branch2a" + name: "scale3a_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3a_branch2a" + top: "res3a_branch2a" + name: "res3a_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3a_branch2a" + top: "res3a_branch2b" + name: "res3a_branch2b" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3a_branch2b" + top: "res3a_branch2b" + name: "bn3a_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3a_branch2b" + top: "res3a_branch2b" + name: "scale3a_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3a_branch2b" + top: "res3a_branch2b" + name: "res3a_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3a_branch2b" + top: "res3a_branch2c" + name: "res3a_branch2c" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3a_branch2c" + top: "res3a_branch2c" + name: "bn3a_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3a_branch2c" + top: "res3a_branch2c" + name: "scale3a_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3a_branch1" + bottom: "res3a_branch2c" + top: "res3a" + name: "res3a" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res3a" + top: "res3a" + name: "res3a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3a" + top: "res3b_branch2a" + name: "res3b_branch2a" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3b_branch2a" + top: "res3b_branch2a" + name: "bn3b_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3b_branch2a" + top: "res3b_branch2a" + name: "scale3b_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3b_branch2a" + top: "res3b_branch2a" + name: "res3b_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3b_branch2a" + top: "res3b_branch2b" + name: "res3b_branch2b" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3b_branch2b" + top: "res3b_branch2b" + name: "bn3b_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3b_branch2b" + top: "res3b_branch2b" + name: "scale3b_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3b_branch2b" + top: "res3b_branch2b" + name: "res3b_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3b_branch2b" + top: "res3b_branch2c" + name: "res3b_branch2c" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3b_branch2c" + top: "res3b_branch2c" + name: "bn3b_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3b_branch2c" + top: "res3b_branch2c" + name: "scale3b_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3a" + bottom: "res3b_branch2c" + top: "res3b" + name: "res3b" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res3b" + top: "res3b" + name: "res3b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3b" + top: "res3c_branch2a" + name: "res3c_branch2a" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3c_branch2a" + top: "res3c_branch2a" + name: "bn3c_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3c_branch2a" + top: "res3c_branch2a" + name: "scale3c_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3c_branch2a" + top: "res3c_branch2a" + name: "res3c_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3c_branch2a" + top: "res3c_branch2b" + name: "res3c_branch2b" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3c_branch2b" + top: "res3c_branch2b" + name: "bn3c_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3c_branch2b" + top: "res3c_branch2b" + name: "scale3c_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3c_branch2b" + top: "res3c_branch2b" + name: "res3c_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3c_branch2b" + top: "res3c_branch2c" + name: "res3c_branch2c" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3c_branch2c" + top: "res3c_branch2c" + name: "bn3c_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3c_branch2c" + top: "res3c_branch2c" + name: "scale3c_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3b" + bottom: "res3c_branch2c" + top: "res3c" + name: "res3c" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res3c" + top: "res3c" + name: "res3c_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3c" + top: "res3d_branch2a" + name: "res3d_branch2a" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3d_branch2a" + top: "res3d_branch2a" + name: "bn3d_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3d_branch2a" + top: "res3d_branch2a" + name: "scale3d_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3d_branch2a" + top: "res3d_branch2a" + name: "res3d_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3d_branch2a" + top: "res3d_branch2b" + name: "res3d_branch2b" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3d_branch2b" + top: "res3d_branch2b" + name: "bn3d_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3d_branch2b" + top: "res3d_branch2b" + name: "scale3d_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3d_branch2b" + top: "res3d_branch2b" + name: "res3d_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3d_branch2b" + top: "res3d_branch2c" + name: "res3d_branch2c" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res3d_branch2c" + top: "res3d_branch2c" + name: "bn3d_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res3d_branch2c" + top: "res3d_branch2c" + name: "scale3d_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3c" + bottom: "res3d_branch2c" + top: "res3d" + name: "res3d" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res3d" + top: "res3d" + name: "res3d_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3d" + top: "res4a_branch1" + name: "res4a_branch1" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4a_branch1" + top: "res4a_branch1" + name: "bn4a_branch1" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4a_branch1" + top: "res4a_branch1" + name: "scale4a_branch1" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3d" + top: "res4a_branch2a" + name: "res4a_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4a_branch2a" + top: "res4a_branch2a" + name: "bn4a_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4a_branch2a" + top: "res4a_branch2a" + name: "scale4a_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4a_branch2a" + top: "res4a_branch2a" + name: "res4a_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4a_branch2a" + top: "res4a_branch2b" + name: "res4a_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4a_branch2b" + top: "res4a_branch2b" + name: "bn4a_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4a_branch2b" + top: "res4a_branch2b" + name: "scale4a_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4a_branch2b" + top: "res4a_branch2b" + name: "res4a_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4a_branch2b" + top: "res4a_branch2c" + name: "res4a_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4a_branch2c" + top: "res4a_branch2c" + name: "bn4a_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4a_branch2c" + top: "res4a_branch2c" + name: "scale4a_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4a_branch1" + bottom: "res4a_branch2c" + top: "res4a" + name: "res4a" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4a" + top: "res4a" + name: "res4a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4a" + top: "res4b_branch2a" + name: "res4b_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4b_branch2a" + top: "res4b_branch2a" + name: "bn4b_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4b_branch2a" + top: "res4b_branch2a" + name: "scale4b_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4b_branch2a" + top: "res4b_branch2a" + name: "res4b_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4b_branch2a" + top: "res4b_branch2b" + name: "res4b_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4b_branch2b" + top: "res4b_branch2b" + name: "bn4b_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4b_branch2b" + top: "res4b_branch2b" + name: "scale4b_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4b_branch2b" + top: "res4b_branch2b" + name: "res4b_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4b_branch2b" + top: "res4b_branch2c" + name: "res4b_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4b_branch2c" + top: "res4b_branch2c" + name: "bn4b_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4b_branch2c" + top: "res4b_branch2c" + name: "scale4b_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4a" + bottom: "res4b_branch2c" + top: "res4b" + name: "res4b" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4b" + top: "res4b" + name: "res4b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4b" + top: "res4c_branch2a" + name: "res4c_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4c_branch2a" + top: "res4c_branch2a" + name: "bn4c_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4c_branch2a" + top: "res4c_branch2a" + name: "scale4c_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4c_branch2a" + top: "res4c_branch2a" + name: "res4c_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4c_branch2a" + top: "res4c_branch2b" + name: "res4c_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4c_branch2b" + top: "res4c_branch2b" + name: "bn4c_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4c_branch2b" + top: "res4c_branch2b" + name: "scale4c_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4c_branch2b" + top: "res4c_branch2b" + name: "res4c_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4c_branch2b" + top: "res4c_branch2c" + name: "res4c_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4c_branch2c" + top: "res4c_branch2c" + name: "bn4c_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4c_branch2c" + top: "res4c_branch2c" + name: "scale4c_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4b" + bottom: "res4c_branch2c" + top: "res4c" + name: "res4c" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4c" + top: "res4c" + name: "res4c_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4c" + top: "res4d_branch2a" + name: "res4d_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4d_branch2a" + top: "res4d_branch2a" + name: "bn4d_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4d_branch2a" + top: "res4d_branch2a" + name: "scale4d_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4d_branch2a" + top: "res4d_branch2a" + name: "res4d_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4d_branch2a" + top: "res4d_branch2b" + name: "res4d_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4d_branch2b" + top: "res4d_branch2b" + name: "bn4d_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4d_branch2b" + top: "res4d_branch2b" + name: "scale4d_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4d_branch2b" + top: "res4d_branch2b" + name: "res4d_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4d_branch2b" + top: "res4d_branch2c" + name: "res4d_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4d_branch2c" + top: "res4d_branch2c" + name: "bn4d_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4d_branch2c" + top: "res4d_branch2c" + name: "scale4d_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4c" + bottom: "res4d_branch2c" + top: "res4d" + name: "res4d" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4d" + top: "res4d" + name: "res4d_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4d" + top: "res4e_branch2a" + name: "res4e_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4e_branch2a" + top: "res4e_branch2a" + name: "bn4e_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4e_branch2a" + top: "res4e_branch2a" + name: "scale4e_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4e_branch2a" + top: "res4e_branch2a" + name: "res4e_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4e_branch2a" + top: "res4e_branch2b" + name: "res4e_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4e_branch2b" + top: "res4e_branch2b" + name: "bn4e_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4e_branch2b" + top: "res4e_branch2b" + name: "scale4e_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4e_branch2b" + top: "res4e_branch2b" + name: "res4e_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4e_branch2b" + top: "res4e_branch2c" + name: "res4e_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4e_branch2c" + top: "res4e_branch2c" + name: "bn4e_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4e_branch2c" + top: "res4e_branch2c" + name: "scale4e_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4d" + bottom: "res4e_branch2c" + top: "res4e" + name: "res4e" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4e" + top: "res4e" + name: "res4e_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4e" + top: "res4f_branch2a" + name: "res4f_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4f_branch2a" + top: "res4f_branch2a" + name: "bn4f_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4f_branch2a" + top: "res4f_branch2a" + name: "scale4f_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4f_branch2a" + top: "res4f_branch2a" + name: "res4f_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4f_branch2a" + top: "res4f_branch2b" + name: "res4f_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4f_branch2b" + top: "res4f_branch2b" + name: "bn4f_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4f_branch2b" + top: "res4f_branch2b" + name: "scale4f_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4f_branch2b" + top: "res4f_branch2b" + name: "res4f_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4f_branch2b" + top: "res4f_branch2c" + name: "res4f_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res4f_branch2c" + top: "res4f_branch2c" + name: "bn4f_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res4f_branch2c" + top: "res4f_branch2c" + name: "scale4f_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4e" + bottom: "res4f_branch2c" + top: "res4f" + name: "res4f" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4f" + top: "res4f" + name: "res4f_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4f" + top: "res5a_branch1" + name: "res5a_branch1" + type: "Convolution" + convolution_param { + + num_output: 2048 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5a_branch1" + top: "res5a_branch1" + name: "bn5a_branch1" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5a_branch1" + top: "res5a_branch1" + name: "scale5a_branch1" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4f" + top: "res5a_branch2a" + name: "res5a_branch2a" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5a_branch2a" + top: "res5a_branch2a" + name: "bn5a_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5a_branch2a" + top: "res5a_branch2a" + name: "scale5a_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5a_branch2a" + top: "res5a_branch2a" + name: "res5a_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5a_branch2a" + top: "res5a_branch2b" + name: "res5a_branch2b" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5a_branch2b" + top: "res5a_branch2b" + name: "bn5a_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5a_branch2b" + top: "res5a_branch2b" + name: "scale5a_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5a_branch2b" + top: "res5a_branch2b" + name: "res5a_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5a_branch2b" + top: "res5a_branch2c" + name: "res5a_branch2c" + type: "Convolution" + convolution_param { + + num_output: 2048 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5a_branch2c" + top: "res5a_branch2c" + name: "bn5a_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5a_branch2c" + top: "res5a_branch2c" + name: "scale5a_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5a_branch1" + bottom: "res5a_branch2c" + top: "res5a" + name: "res5a" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res5a" + top: "res5a" + name: "res5a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5a" + top: "res5b_branch2a" + name: "res5b_branch2a" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5b_branch2a" + top: "res5b_branch2a" + name: "bn5b_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5b_branch2a" + top: "res5b_branch2a" + name: "scale5b_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5b_branch2a" + top: "res5b_branch2a" + name: "res5b_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5b_branch2a" + top: "res5b_branch2b" + name: "res5b_branch2b" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5b_branch2b" + top: "res5b_branch2b" + name: "bn5b_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5b_branch2b" + top: "res5b_branch2b" + name: "scale5b_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5b_branch2b" + top: "res5b_branch2b" + name: "res5b_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5b_branch2b" + top: "res5b_branch2c" + name: "res5b_branch2c" + type: "Convolution" + convolution_param { + + num_output: 2048 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5b_branch2c" + top: "res5b_branch2c" + name: "bn5b_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5b_branch2c" + top: "res5b_branch2c" + name: "scale5b_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5a" + bottom: "res5b_branch2c" + top: "res5b" + name: "res5b" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res5b" + top: "res5b" + name: "res5b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5b" + top: "res5c_branch2a" + name: "res5c_branch2a" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5c_branch2a" + top: "res5c_branch2a" + name: "bn5c_branch2a" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5c_branch2a" + top: "res5c_branch2a" + name: "scale5c_branch2a" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5c_branch2a" + top: "res5c_branch2a" + name: "res5c_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5c_branch2a" + top: "res5c_branch2b" + name: "res5c_branch2b" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5c_branch2b" + top: "res5c_branch2b" + name: "bn5c_branch2b" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5c_branch2b" + top: "res5c_branch2b" + name: "scale5c_branch2b" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5c_branch2b" + top: "res5c_branch2b" + name: "res5c_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5c_branch2b" + top: "res5c_branch2c" + name: "res5c_branch2c" + type: "Convolution" + convolution_param { + + num_output: 2048 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} + +layer { + bottom: "res5c_branch2c" + top: "res5c_branch2c" + name: "bn5c_branch2c" + type: "BatchNorm" + batch_norm_param { + + + } +} + +layer { + bottom: "res5c_branch2c" + top: "res5c_branch2c" + name: "scale5c_branch2c" + type: "Scale" + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5b" + bottom: "res5c_branch2c" + top: "res5c" + name: "res5c" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res5c" + top: "res5c" + name: "res5c_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5c" + top: "pool5" + name: "pool5" + type: "Pooling" + pooling_param { + + kernel_size: 7 + stride: 1 + pool: AVE + } +} + +layer { + bottom: "pool5" + top: "fc1000" + name: "fc1000" + type: "InnerProduct" + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "fc1000" + bottom: "label" + top: "prob" + name: "prob" + type: "SoftmaxWithLoss" + include { + phase: TRAIN + } +} +layer { + name: "probt" + type: "Softmax" + bottom: "fc1000" + top: "probt" + include { + phase: TEST + } +} + diff --git a/models/default_vgg_16/solver.prototxt b/models/default_vgg_16/solver.prototxt new file mode 100644 index 00000000000..bdaf4259fe3 --- /dev/null +++ b/models/default_vgg_16/solver.prototxt @@ -0,0 +1,16 @@ +net: "models/default_vgg_16/train_val.prototxt" + +test_iter: 100 +test_interval: 10000 +momentum: 0.9 +weight_decay: 0.0005 +base_lr: 0.01 +lr_policy: "step" +gamma: 0.1 +stepsize: 100000 +max_iter: 350000 +display: 20 +snapshot: 35000 +solver_mode: CPU +snapshot_prefix: "models/default_vgg_16/default_vgg_16" + diff --git a/models/default_vgg_16/train_val.prototxt b/models/default_vgg_16/train_val.prototxt new file mode 100644 index 00000000000..9ec8487e6c1 --- /dev/null +++ b/models/default_vgg_16/train_val.prototxt @@ -0,0 +1,612 @@ +name: "VGG_ILSVRC_16_layer" +layer { + name: "data" + type: "Data" + include { + phase: TRAIN + } + transform_param { + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 124 + mirror: true + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 64 + backend: LMDB + } + top: "data" + top: "label" +} +layer { + name: "data" + type: "Data" + include { + phase: TEST + } + transform_param { + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 124 + mirror: false + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } + top: "data" + top: "label" +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + convolution_param { + + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv1_1" + top: "conv1_1" + name: "relu1_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv1_1" + top: "conv1_2" + name: "conv1_2" + type: "Convolution" + convolution_param { + + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv1_2" + top: "conv1_2" + name: "relu1_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv1_2" + top: "pool1" + name: "pool1" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool1" + top: "conv2_1" + name: "conv2_1" + type: "Convolution" + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv2_1" + top: "conv2_1" + name: "relu2_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2_1" + top: "conv2_2" + name: "conv2_2" + type: "Convolution" + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv2_2" + top: "conv2_2" + name: "relu2_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2_2" + top: "pool2" + name: "pool2" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool2" + top: "conv3_1" + name: "conv3_1" + type: "Convolution" + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv3_1" + top: "conv3_1" + name: "relu3_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv3_1" + top: "conv3_2" + name: "conv3_2" + type: "Convolution" + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv3_2" + top: "conv3_2" + name: "relu3_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv3_2" + top: "conv3_3" + name: "conv3_3" + type: "Convolution" + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv3_3" + top: "conv3_3" + name: "relu3_3" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv3_3" + top: "pool3" + name: "pool3" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool3" + top: "conv4_1" + name: "conv4_1" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv4_1" + top: "conv4_1" + name: "relu4_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv4_1" + top: "conv4_2" + name: "conv4_2" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv4_2" + top: "conv4_2" + name: "relu4_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv4_2" + top: "conv4_3" + name: "conv4_3" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv4_3" + top: "conv4_3" + name: "relu4_3" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv4_3" + top: "pool4" + name: "pool4" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool4" + top: "conv5_1" + name: "conv5_1" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv5_1" + top: "conv5_1" + name: "relu5_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv5_1" + top: "conv5_2" + name: "conv5_2" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + + +} +layer { + bottom: "conv5_2" + top: "conv5_2" + name: "relu5_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv5_2" + top: "conv5_3" + name: "conv5_3" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.0 + } + } + + +} +layer { + bottom: "conv5_3" + top: "conv5_3" + name: "relu5_3" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv5_3" + top: "pool5" + name: "pool5" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool5" + top: "fc6" + name: "fc6" + type: "InnerProduct" + inner_product_param { + num_output: 4096 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } + + +} +layer { + bottom: "fc6" + top: "fc6" + name: "relu6" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "fc6" + top: "fc6" + name: "drop6" + type: "Dropout" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + bottom: "fc6" + top: "fc7" + name: "fc7" + type: "InnerProduct" + inner_product_param { + num_output: 4096 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } + + +} +layer { + bottom: "fc7" + top: "fc7" + name: "relu7" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "fc7" + top: "fc7" + name: "drop7" + type: "Dropout" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8" + bottom: "fc7" + top: "fc8" + type: "InnerProduct" + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } + + +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8" + bottom: "label" + top: "loss/loss" +} +layer { + name: "accuracy/top1" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy@1" + include: { phase: TEST } + accuracy_param { + top_k: 1 + } +} +layer { + name: "accuracy/top5" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy@5" + include: { phase: TEST } + accuracy_param { + top_k: 5 + } +} diff --git a/models/default_vgg_19/solver.prototxt b/models/default_vgg_19/solver.prototxt new file mode 100644 index 00000000000..0e51f0c83af --- /dev/null +++ b/models/default_vgg_19/solver.prototxt @@ -0,0 +1,16 @@ +net: "models/default_vgg_19/train_val.prototxt" + +test_iter: 100 +test_interval: 10000 +momentum: 0.9 +weight_decay: 0.0005 +base_lr: 0.01 +lr_policy: "step" +gamma: 0.1 +stepsize: 100000 +max_iter: 350000 +display: 20 +snapshot: 35000 +solver_mode: CPU +snapshot_prefix: "models/default_vgg_19/default_vgg_19" + diff --git a/models/default_vgg_19/train_val.prototxt b/models/default_vgg_19/train_val.prototxt new file mode 100644 index 00000000000..7dce42affb8 --- /dev/null +++ b/models/default_vgg_19/train_val.prototxt @@ -0,0 +1,664 @@ +name: "VGG_ILSVRC_19_layer" +layer { + name: "data" + type: "Data" + include { + phase: TRAIN + } + transform_param { + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 124 + mirror: true + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 64 + backend: LMDB + } + top: "data" + top: "label" +} +layer { + name: "data" + type: "Data" + include { + phase: TEST + } + transform_param { + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 124 + mirror: false + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } + top: "data" + top: "label" +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + convolution_param { + + num_output: 64 + pad: 1 + kernel_size: 3 +weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv1_1" + top: "conv1_1" + name: "relu1_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv1_1" + top: "conv1_2" + name: "conv1_2" + type: "Convolution" + convolution_param { + + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv1_2" + top: "conv1_2" + name: "relu1_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv1_2" + top: "pool1" + name: "pool1" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool1" + top: "conv2_1" + name: "conv2_1" + type: "Convolution" + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv2_1" + top: "conv2_1" + name: "relu2_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2_1" + top: "conv2_2" + name: "conv2_2" + type: "Convolution" + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv2_2" + top: "conv2_2" + name: "relu2_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2_2" + top: "pool2" + name: "pool2" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool2" + top: "conv3_1" + name: "conv3_1" + type: "Convolution" + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv3_1" + top: "conv3_1" + name: "relu3_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv3_1" + top: "conv3_2" + name: "conv3_2" + type: "Convolution" + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv3_2" + top: "conv3_2" + name: "relu3_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv3_2" + top: "conv3_3" + name: "conv3_3" + type: "Convolution" + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv3_3" + top: "conv3_3" + name: "relu3_3" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv3_3" + top: "conv3_4" + name: "conv3_4" + type: "Convolution" + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv3_4" + top: "conv3_4" + name: "relu3_4" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv3_4" + top: "pool3" + name: "pool3" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool3" + top: "conv4_1" + name: "conv4_1" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv4_1" + top: "conv4_1" + name: "relu4_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv4_1" + top: "conv4_2" + name: "conv4_2" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv4_2" + top: "conv4_2" + name: "relu4_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv4_2" + top: "conv4_3" + name: "conv4_3" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv4_3" + top: "conv4_3" + name: "relu4_3" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv4_3" + top: "conv4_4" + name: "conv4_4" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv4_4" + top: "conv4_4" + name: "relu4_4" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv4_4" + top: "pool4" + name: "pool4" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool4" + top: "conv5_1" + name: "conv5_1" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv5_1" + top: "conv5_1" + name: "relu5_1" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv5_1" + top: "conv5_2" + name: "conv5_2" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv5_2" + top: "conv5_2" + name: "relu5_2" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv5_2" + top: "conv5_3" + name: "conv5_3" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.0 + } + } +} +layer { + bottom: "conv5_3" + top: "conv5_3" + name: "relu5_3" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv5_3" + top: "conv5_4" + name: "conv5_4" + type: "Convolution" + convolution_param { + + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "conv5_4" + top: "conv5_4" + name: "relu5_4" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv5_4" + top: "pool5" + name: "pool5" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + bottom: "pool5" + top: "fc6" + name: "fc6" + type: "InnerProduct" + inner_product_param { + num_output: 4096 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + bottom: "fc6" + top: "fc6" + name: "relu6" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "fc6" + top: "fc6" + name: "drop6" + type: "Dropout" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + bottom: "fc6" + top: "fc7" + name: "fc7" + type: "InnerProduct" + inner_product_param { + num_output: 4096 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + bottom: "fc7" + top: "fc7" + name: "relu7" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "fc7" + top: "fc7" + name: "drop7" + type: "Dropout" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8" + bottom: "fc7" + top: "fc8" + type: "InnerProduct" + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8" + bottom: "label" + top: "loss/loss" +} +layer { + name: "accuracy/top1" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy@1" + include: { phase: TEST } + accuracy_param { + top_k: 1 + } +} +layer { + name: "accuracy/top5" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy@5" + include: { phase: TEST } + accuracy_param { + top_k: 5 + } +} diff --git a/models/finetune_flickr_style/solver.prototxt b/models/finetune_flickr_style/solver.prototxt index 5e189bc93c0..b4208d88f47 100644 --- a/models/finetune_flickr_style/solver.prototxt +++ b/models/finetune_flickr_style/solver.prototxt @@ -14,4 +14,4 @@ weight_decay: 0.0005 snapshot: 10000 snapshot_prefix: "models/finetune_flickr_style/finetune_flickr_style" # uncomment the following to default to CPU mode solving -# solver_mode: CPU +solver_mode: CPU diff --git a/models/intel_optimized_models/alexnet/solver.prototxt b/models/intel_optimized_models/alexnet/solver.prototxt new file mode 100644 index 00000000000..49c788f5435 --- /dev/null +++ b/models/intel_optimized_models/alexnet/solver.prototxt @@ -0,0 +1,25 @@ +#This is Intel(R) optimized (in terms of time to train) version of solver for model described in the [AlexNet](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) publication. +#Original solver.prototxt can be found in /models/bvlc_alexnet/ directory of this repository. +#Differences: +#- lr_policy is set to poly instead of step +#- base_lr is decreased to 0.007 +#- max_iter is decreased to 250000 +#- power is set to 0.6 +# +#Top-5 and Top-1 results achieved with this version of solver: +#Top-5: 80.4% +#Top-1: 57.4% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. +net: "models/intel_optimized_models/alexnet/train_val.prototxt" +test_iter: 1000 +test_interval: 10000 +base_lr: 0.007 +lr_policy: "poly" +power: 0.6 +display: 20 +max_iter: 250000 +momentum: 0.9 +weight_decay: 0.0005 +snapshot: 50000 +snapshot_prefix: "models/intel_optimized_models/alexnet/alexnet_train" +solver_mode: CPU diff --git a/models/intel_optimized_models/alexnet/solver_gabor_init.prototxt b/models/intel_optimized_models/alexnet/solver_gabor_init.prototxt new file mode 100644 index 00000000000..dfb4b811bb3 --- /dev/null +++ b/models/intel_optimized_models/alexnet/solver_gabor_init.prototxt @@ -0,0 +1,26 @@ +#This is Intel(R) optimized (in terms of time to train) version of solver for model described in the [AlexNet](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) publication. +#Original solver.prototxt can be found in /models/bvlc_alexnet/ directory of this repository. +#Differences: +#- lr_policy is set to poly instead of step +#- base_lr is decreased to 0.007 +#- max_iter is decreased to 200000 +#- power is set to 0.65 +#- net parameter is set to train_val_gabor_init.prototxt +# +#Top-5 and Top-1 results achieved with this version of solver: +#Top-5: 80.02% +#Top-1: 56.8% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. +net: "models/intel_optimized_models/alexnet/train_val_gabor_init.prototxt" +test_iter: 1000 +test_interval: 10000 +base_lr: 0.007 +lr_policy: "poly" +power: 0.65 +display: 20 +max_iter: 200000 +momentum: 0.9 +weight_decay: 0.0005 +snapshot: 50000 +snapshot_prefix: "models/intel_optimized_models/alexnet/alexnet_train" +solver_mode: CPU diff --git a/models/intel_optimized_models/alexnet/train_val.prototxt b/models/intel_optimized_models/alexnet/train_val.prototxt new file mode 100644 index 00000000000..588b4ea7cb5 --- /dev/null +++ b/models/intel_optimized_models/alexnet/train_val.prototxt @@ -0,0 +1,384 @@ +name: "AlexNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 256 + backend: LMDB + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "norm1" + type: "LRN" + bottom: "conv1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "norm1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "norm2" + type: "LRN" + bottom: "conv2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "norm2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc7" + type: "InnerProduct" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "drop7" + type: "Dropout" + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8" + type: "InnerProduct" + bottom: "fc7" + top: "fc8" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8" + bottom: "label" + top: "loss" +} diff --git a/models/intel_optimized_models/alexnet/train_val_gabor_init.prototxt b/models/intel_optimized_models/alexnet/train_val_gabor_init.prototxt new file mode 100644 index 00000000000..641d016e315 --- /dev/null +++ b/models/intel_optimized_models/alexnet/train_val_gabor_init.prototxt @@ -0,0 +1,389 @@ +#This is Intel(R) optimized (in terms of time to train) version of model described in the [AlexNet](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) publication. +#Original train_val.prototxt can be found in /models/bvlc_alexnet/ directory of this repository. +#Differences: +#- weights of first convolution are initialized by Intel(R) implementation of Gabor filters +# +name: "AlexNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 256 + backend: LMDB + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "gabor" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "norm1" + type: "LRN" + bottom: "conv1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "norm1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "norm2" + type: "LRN" + bottom: "conv2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "norm2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc7" + type: "InnerProduct" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "drop7" + type: "Dropout" + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8" + type: "InnerProduct" + bottom: "fc7" + top: "fc8" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "accuracy" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "accuracy" + include { + phase: TEST + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8" + bottom: "label" + top: "loss" +} diff --git a/models/intel_optimized_models/googlenet/solver.prototxt b/models/intel_optimized_models/googlenet/solver.prototxt new file mode 100644 index 00000000000..9363952d7b2 --- /dev/null +++ b/models/intel_optimized_models/googlenet/solver.prototxt @@ -0,0 +1,15 @@ +net: "models/intel_optimized_models/googlenet/train_val.prototxt" +test_iter: 1000 +test_interval: 10000 +test_initialization: false +display: 40 +average_loss: 40 +base_lr: 0.012 +lr_policy: "poly" +power: 0.5 +max_iter: 800000 +momentum: 0.9 +weight_decay: 0.0002 +snapshot: 40000 +snapshot_prefix: "models/intel_optimized_models/googlenet/googlenet" +solver_mode: CPU diff --git a/models/intel_optimized_models/googlenet/train_val.prototxt b/models/intel_optimized_models/googlenet/train_val.prototxt new file mode 100644 index 00000000000..6f9b6fded35 --- /dev/null +++ b/models/intel_optimized_models/googlenet/train_val.prototxt @@ -0,0 +1,2433 @@ +name: "GoogleNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 96 + backend: LMDB + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} +layer { + name: "conv1/7x7_s2" + type: "Convolution" + bottom: "data" + top: "conv1/7x7_s2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv1/relu_7x7" + type: "ReLU" + bottom: "conv1/7x7_s2" + top: "conv1/7x7_s2" +} +layer { + name: "pool1/3x3_s2" + type: "Pooling" + bottom: "conv1/7x7_s2" + top: "pool1/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "pool1/norm1" + type: "LRN" + bottom: "pool1/3x3_s2" + top: "pool1/norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2/3x3_reduce" + type: "Convolution" + bottom: "pool1/norm1" + top: "conv2/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv2/relu_3x3_reduce" + type: "ReLU" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3_reduce" +} +layer { + name: "conv2/3x3" + type: "Convolution" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "conv2/relu_3x3" + type: "ReLU" + bottom: "conv2/3x3" + top: "conv2/3x3" +} +layer { + name: "conv2/norm2" + type: "LRN" + bottom: "conv2/3x3" + top: "conv2/norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2/3x3_s2" + type: "Pooling" + bottom: "conv2/norm2" + top: "pool2/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_3a/1x1" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_1x1" + type: "ReLU" + bottom: "inception_3a/1x1" + top: "inception_3a/1x1" +} +layer { + name: "inception_3a/3x3_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3_reduce" +} +layer { + name: "inception_3a/3x3" + type: "Convolution" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_3x3" + type: "ReLU" + bottom: "inception_3a/3x3" + top: "inception_3a/3x3" +} +layer { + name: "inception_3a/5x5_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5_reduce" +} +layer { + name: "inception_3a/5x5" + type: "Convolution" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_5x5" + type: "ReLU" + bottom: "inception_3a/5x5" + top: "inception_3a/5x5" +} +layer { + name: "inception_3a/pool" + type: "Pooling" + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3a/pool_proj" + type: "Convolution" + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3a/relu_pool_proj" + type: "ReLU" + bottom: "inception_3a/pool_proj" + top: "inception_3a/pool_proj" +} +layer { + name: "inception_3a/output" + type: "Concat" + bottom: "inception_3a/1x1" + bottom: "inception_3a/3x3" + bottom: "inception_3a/5x5" + bottom: "inception_3a/pool_proj" + top: "inception_3a/output" +} +layer { + name: "inception_3b/1x1" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_1x1" + type: "ReLU" + bottom: "inception_3b/1x1" + top: "inception_3b/1x1" +} +layer { + name: "inception_3b/3x3_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3_reduce" +} +layer { + name: "inception_3b/3x3" + type: "Convolution" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_3x3" + type: "ReLU" + bottom: "inception_3b/3x3" + top: "inception_3b/3x3" +} +layer { + name: "inception_3b/5x5_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5_reduce" +} +layer { + name: "inception_3b/5x5" + type: "Convolution" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_5x5" + type: "ReLU" + bottom: "inception_3b/5x5" + top: "inception_3b/5x5" +} +layer { + name: "inception_3b/pool" + type: "Pooling" + bottom: "inception_3a/output" + top: "inception_3b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3b/pool_proj" + type: "Convolution" + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_3b/relu_pool_proj" + type: "ReLU" + bottom: "inception_3b/pool_proj" + top: "inception_3b/pool_proj" +} +layer { + name: "inception_3b/output" + type: "Concat" + bottom: "inception_3b/1x1" + bottom: "inception_3b/3x3" + bottom: "inception_3b/5x5" + bottom: "inception_3b/pool_proj" + top: "inception_3b/output" +} +layer { + name: "pool3/3x3_s2" + type: "Pooling" + bottom: "inception_3b/output" + top: "pool3/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_4a/1x1" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_1x1" + type: "ReLU" + bottom: "inception_4a/1x1" + top: "inception_4a/1x1" +} +layer { + name: "inception_4a/3x3_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3_reduce" +} +layer { + name: "inception_4a/3x3" + type: "Convolution" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 208 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_3x3" + type: "ReLU" + bottom: "inception_4a/3x3" + top: "inception_4a/3x3" +} +layer { + name: "inception_4a/5x5_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5_reduce" +} +layer { + name: "inception_4a/5x5" + type: "Convolution" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_5x5" + type: "ReLU" + bottom: "inception_4a/5x5" + top: "inception_4a/5x5" +} +layer { + name: "inception_4a/pool" + type: "Pooling" + bottom: "pool3/3x3_s2" + top: "inception_4a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4a/pool_proj" + type: "Convolution" + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4a/relu_pool_proj" + type: "ReLU" + bottom: "inception_4a/pool_proj" + top: "inception_4a/pool_proj" +} +layer { + name: "inception_4a/output" + type: "Concat" + bottom: "inception_4a/1x1" + bottom: "inception_4a/3x3" + bottom: "inception_4a/5x5" + bottom: "inception_4a/pool_proj" + top: "inception_4a/output" +} +layer { + name: "loss1/ave_pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "loss1/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss1/conv" + type: "Convolution" + bottom: "loss1/ave_pool" + top: "loss1/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss1/relu_conv" + type: "ReLU" + bottom: "loss1/conv" + top: "loss1/conv" +} +layer { + name: "loss1/fc" + type: "InnerProduct" + bottom: "loss1/conv" + top: "loss1/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss1/relu_fc" + type: "ReLU" + bottom: "loss1/fc" + top: "loss1/fc" +} +layer { + name: "loss1/drop_fc" + type: "Dropout" + bottom: "loss1/fc" + top: "loss1/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss1/classifier" + type: "InnerProduct" + bottom: "loss1/fc" + top: "loss1/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss1/loss" + type: "SoftmaxWithLoss" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss1" + loss_weight: 0.3 +} +layer { + name: "loss1/top-1" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-1" + include { + phase: TEST + } +} +layer { + name: "loss1/top-5" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4b/1x1" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_1x1" + type: "ReLU" + bottom: "inception_4b/1x1" + top: "inception_4b/1x1" +} +layer { + name: "inception_4b/3x3_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3_reduce" +} +layer { + name: "inception_4b/3x3" + type: "Convolution" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 224 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_3x3" + type: "ReLU" + bottom: "inception_4b/3x3" + top: "inception_4b/3x3" +} +layer { + name: "inception_4b/5x5_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5_reduce" +} +layer { + name: "inception_4b/5x5" + type: "Convolution" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_5x5" + type: "ReLU" + bottom: "inception_4b/5x5" + top: "inception_4b/5x5" +} +layer { + name: "inception_4b/pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "inception_4b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4b/pool_proj" + type: "Convolution" + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4b/relu_pool_proj" + type: "ReLU" + bottom: "inception_4b/pool_proj" + top: "inception_4b/pool_proj" +} +layer { + name: "inception_4b/output" + type: "Concat" + bottom: "inception_4b/1x1" + bottom: "inception_4b/3x3" + bottom: "inception_4b/5x5" + bottom: "inception_4b/pool_proj" + top: "inception_4b/output" +} +layer { + name: "inception_4c/1x1" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_1x1" + type: "ReLU" + bottom: "inception_4c/1x1" + top: "inception_4c/1x1" +} +layer { + name: "inception_4c/3x3_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3_reduce" +} +layer { + name: "inception_4c/3x3" + type: "Convolution" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_3x3" + type: "ReLU" + bottom: "inception_4c/3x3" + top: "inception_4c/3x3" +} +layer { + name: "inception_4c/5x5_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5_reduce" +} +layer { + name: "inception_4c/5x5" + type: "Convolution" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_5x5" + type: "ReLU" + bottom: "inception_4c/5x5" + top: "inception_4c/5x5" +} +layer { + name: "inception_4c/pool" + type: "Pooling" + bottom: "inception_4b/output" + top: "inception_4c/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4c/pool_proj" + type: "Convolution" + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4c/relu_pool_proj" + type: "ReLU" + bottom: "inception_4c/pool_proj" + top: "inception_4c/pool_proj" +} +layer { + name: "inception_4c/output" + type: "Concat" + bottom: "inception_4c/1x1" + bottom: "inception_4c/3x3" + bottom: "inception_4c/5x5" + bottom: "inception_4c/pool_proj" + top: "inception_4c/output" +} +layer { + name: "inception_4d/1x1" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_1x1" + type: "ReLU" + bottom: "inception_4d/1x1" + top: "inception_4d/1x1" +} +layer { + name: "inception_4d/3x3_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 144 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3_reduce" +} +layer { + name: "inception_4d/3x3" + type: "Convolution" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 288 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_3x3" + type: "ReLU" + bottom: "inception_4d/3x3" + top: "inception_4d/3x3" +} +layer { + name: "inception_4d/5x5_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5_reduce" +} +layer { + name: "inception_4d/5x5" + type: "Convolution" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_5x5" + type: "ReLU" + bottom: "inception_4d/5x5" + top: "inception_4d/5x5" +} +layer { + name: "inception_4d/pool" + type: "Pooling" + bottom: "inception_4c/output" + top: "inception_4d/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4d/pool_proj" + type: "Convolution" + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4d/relu_pool_proj" + type: "ReLU" + bottom: "inception_4d/pool_proj" + top: "inception_4d/pool_proj" +} +layer { + name: "inception_4d/output" + type: "Concat" + bottom: "inception_4d/1x1" + bottom: "inception_4d/3x3" + bottom: "inception_4d/5x5" + bottom: "inception_4d/pool_proj" + top: "inception_4d/output" +} +layer { + name: "loss2/ave_pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "loss2/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss2/conv" + type: "Convolution" + bottom: "loss2/ave_pool" + top: "loss2/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss2/relu_conv" + type: "ReLU" + bottom: "loss2/conv" + top: "loss2/conv" +} +layer { + name: "loss2/fc" + type: "InnerProduct" + bottom: "loss2/conv" + top: "loss2/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "loss2/relu_fc" + type: "ReLU" + bottom: "loss2/fc" + top: "loss2/fc" +} +layer { + name: "loss2/drop_fc" + type: "Dropout" + bottom: "loss2/fc" + top: "loss2/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss2/classifier" + type: "InnerProduct" + bottom: "loss2/fc" + top: "loss2/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss2/loss" + type: "SoftmaxWithLoss" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss1" + loss_weight: 0.3 +} +layer { + name: "loss2/top-1" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-1" + include { + phase: TEST + } +} +layer { + name: "loss2/top-5" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4e/1x1" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_1x1" + type: "ReLU" + bottom: "inception_4e/1x1" + top: "inception_4e/1x1" +} +layer { + name: "inception_4e/3x3_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3_reduce" +} +layer { + name: "inception_4e/3x3" + type: "Convolution" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_3x3" + type: "ReLU" + bottom: "inception_4e/3x3" + top: "inception_4e/3x3" +} +layer { + name: "inception_4e/5x5_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5_reduce" +} +layer { + name: "inception_4e/5x5" + type: "Convolution" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_5x5" + type: "ReLU" + bottom: "inception_4e/5x5" + top: "inception_4e/5x5" +} +layer { + name: "inception_4e/pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "inception_4e/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4e/pool_proj" + type: "Convolution" + bottom: "inception_4e/pool" + top: "inception_4e/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_4e/relu_pool_proj" + type: "ReLU" + bottom: "inception_4e/pool_proj" + top: "inception_4e/pool_proj" +} +layer { + name: "inception_4e/output" + type: "Concat" + bottom: "inception_4e/1x1" + bottom: "inception_4e/3x3" + bottom: "inception_4e/5x5" + bottom: "inception_4e/pool_proj" + top: "inception_4e/output" +} +layer { + name: "pool4/3x3_s2" + type: "Pooling" + bottom: "inception_4e/output" + top: "pool4/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_5a/1x1" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_1x1" + type: "ReLU" + bottom: "inception_5a/1x1" + top: "inception_5a/1x1" +} +layer { + name: "inception_5a/3x3_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3_reduce" +} +layer { + name: "inception_5a/3x3" + type: "Convolution" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_3x3" + type: "ReLU" + bottom: "inception_5a/3x3" + top: "inception_5a/3x3" +} +layer { + name: "inception_5a/5x5_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5_reduce" +} +layer { + name: "inception_5a/5x5" + type: "Convolution" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_5x5" + type: "ReLU" + bottom: "inception_5a/5x5" + top: "inception_5a/5x5" +} +layer { + name: "inception_5a/pool" + type: "Pooling" + bottom: "pool4/3x3_s2" + top: "inception_5a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5a/pool_proj" + type: "Convolution" + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5a/relu_pool_proj" + type: "ReLU" + bottom: "inception_5a/pool_proj" + top: "inception_5a/pool_proj" +} +layer { + name: "inception_5a/output" + type: "Concat" + bottom: "inception_5a/1x1" + bottom: "inception_5a/3x3" + bottom: "inception_5a/5x5" + bottom: "inception_5a/pool_proj" + top: "inception_5a/output" +} +layer { + name: "inception_5b/1x1" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_1x1" + type: "ReLU" + bottom: "inception_5b/1x1" + top: "inception_5b/1x1" +} +layer { + name: "inception_5b/3x3_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3_reduce" +} +layer { + name: "inception_5b/3x3" + type: "Convolution" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_3x3" + type: "ReLU" + bottom: "inception_5b/3x3" + top: "inception_5b/3x3" +} +layer { + name: "inception_5b/5x5_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5_reduce" +} +layer { + name: "inception_5b/5x5" + type: "Convolution" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_5x5" + type: "ReLU" + bottom: "inception_5b/5x5" + top: "inception_5b/5x5" +} +layer { + name: "inception_5b/pool" + type: "Pooling" + bottom: "inception_5a/output" + top: "inception_5b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5b/pool_proj" + type: "Convolution" + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.2 + } + } +} +layer { + name: "inception_5b/relu_pool_proj" + type: "ReLU" + bottom: "inception_5b/pool_proj" + top: "inception_5b/pool_proj" +} +layer { + name: "inception_5b/output" + type: "Concat" + bottom: "inception_5b/1x1" + bottom: "inception_5b/3x3" + bottom: "inception_5b/5x5" + bottom: "inception_5b/pool_proj" + top: "inception_5b/output" +} +layer { + name: "pool5/7x7_s1" + type: "Pooling" + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + pooling_param { + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + name: "pool5/drop_7x7_s1" + type: "Dropout" + bottom: "pool5/7x7_s1" + top: "pool5/7x7_s1" + dropout_param { + dropout_ratio: 0.4 + } +} +layer { + name: "loss3/classifier" + type: "InnerProduct" + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss3/loss3" + type: "SoftmaxWithLoss" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss3" + loss_weight: 1 +} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-1" + include { + phase: TEST + } +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} diff --git a/models/intel_optimized_models/googlenet_v2/solver.prototxt b/models/intel_optimized_models/googlenet_v2/solver.prototxt new file mode 100644 index 00000000000..8c2f161c16f --- /dev/null +++ b/models/intel_optimized_models/googlenet_v2/solver.prototxt @@ -0,0 +1,16 @@ +net: "models/intel_optimized_models/googlenet_v2/train_val.prototxt" +test_iter: 1000 +test_interval: 10000 +test_initialization: false +display: 40 +average_loss: 40 +base_lr: 0.045 +lr_policy: "step" +stepsize: 2133 +gamma: 0.96 +max_iter: 400000 +momentum: 0.9 +weight_decay: 0.0002 +snapshot: 50000 +snapshot_prefix: "models/intel_optimized_models/googlenet_v2/googlenet_v2" +solver_mode: CPU diff --git a/models/intel_optimized_models/googlenet_v2/train_val.prototxt b/models/intel_optimized_models/googlenet_v2/train_val.prototxt new file mode 100644 index 00000000000..e54132adb75 --- /dev/null +++ b/models/intel_optimized_models/googlenet_v2/train_val.prototxt @@ -0,0 +1,4044 @@ +# Inception Network (GoogLeNet Batch Normalization Network) +name: "InceptionNetwork" +### Training Set +layer { + top: "data" + top: "label" + name: "data" + type: "Data" + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 96 + backend: LMDB +# shuffle: true + } + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 +# mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + mean_value: 104 + mean_value: 117 + mean_value: 123 + } +} +### Validation Set +layer { + top: "data" + top: "label" + name: "data" + type: "Data" + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 32 + backend: LMDB + } + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 +# mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + mean_value: 104 + mean_value: 117 + mean_value: 123 + } +} + +layer { + bottom: "data" + top: "conv1/7x7_s2" + name: "conv1/7x7_s2" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv1/7x7_s2" + name: "conv1/7x7_s2/bn" + top: "conv1/7x7_s2/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv1/7x7_s2/bn" + top: "conv1/7x7_s2/bn/sc" + name: "conv1/7x7_s2/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv1/7x7_s2/bn/sc" + top: "conv1/7x7_s2/bn/sc" + name: "conv1/7x7_s2/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv1/7x7_s2/bn/sc" + top: "pool1/3x3_s2" + name: "pool1/3x3_s2" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "pool1/3x3_s2" + top: "conv2/3x3_reduce" + name: "conv2/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv2/3x3_reduce" + name: "conv2/3x3_reduce/bn" + top: "conv2/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv2/3x3_reduce/bn" + top: "conv2/3x3_reduce/bn/sc" + name: "conv2/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv2/3x3_reduce/bn/sc" + top: "conv2/3x3_reduce/bn/sc" + name: "conv2/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2/3x3_reduce/bn/sc" + top: "conv2/3x3" + name: "conv2/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv2/3x3" + name: "conv2/3x3/bn" + top: "conv2/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv2/3x3/bn" + top: "conv2/3x3/bn/sc" + name: "conv2/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv2/3x3/bn/sc" + top: "conv2/3x3/bn/sc" + name: "conv2/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2/3x3/bn/sc" + top: "pool2/3x3_s2" + name: "pool2/3x3_s2" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + name: "inception_3a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/1x1" + name: "inception_3a/1x1/bn" + top: "inception_3a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/1x1/bn" + top: "inception_3a/1x1/bn/sc" + name: "inception_3a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/1x1/bn/sc" + top: "inception_3a/1x1/bn/sc" + name: "inception_3a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + name: "inception_3a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/3x3_reduce" + name: "inception_3a/3x3_reduce/bn" + top: "inception_3a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn" + top: "inception_3a/3x3_reduce/bn/sc" + name: "inception_3a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn/sc" + top: "inception_3a/3x3_reduce/bn/sc" + name: "inception_3a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn/sc" + top: "inception_3a/3x3" + name: "inception_3a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/3x3" + name: "inception_3a/3x3/bn" + top: "inception_3a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/3x3/bn" + top: "inception_3a/3x3/bn/sc" + name: "inception_3a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/3x3/bn/sc" + top: "inception_3a/3x3/bn/sc" + name: "inception_3a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/double3x3_reduce" + name: "inception_3a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3_reduce" + name: "inception_3a/double3x3_reduce/bn" + top: "inception_3a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn" + top: "inception_3a/double3x3_reduce/bn/sc" + name: "inception_3a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn/sc" + top: "inception_3a/double3x3_reduce/bn/sc" + name: "inception_3a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn/sc" + top: "inception_3a/double3x3a" + name: "inception_3a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3a" + name: "inception_3a/double3x3a/bn" + top: "inception_3a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3a/bn" + top: "inception_3a/double3x3a/bn/sc" + name: "inception_3a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3a/bn/sc" + top: "inception_3a/double3x3a/bn/sc" + name: "inception_3a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/double3x3a/bn/sc" + top: "inception_3a/double3x3b" + name: "inception_3a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3b" + name: "inception_3a/double3x3b/bn" + top: "inception_3a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3b/bn" + top: "inception_3a/double3x3b/bn/sc" + name: "inception_3a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3b/bn/sc" + top: "inception_3a/double3x3b/bn/sc" + name: "inception_3a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + name: "inception_3a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + name: "inception_3a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 32 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/pool_proj" + name: "inception_3a/pool_proj/bn" + top: "inception_3a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/pool_proj/bn" + top: "inception_3a/pool_proj/bn/sc" + name: "inception_3a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/pool_proj/bn/sc" + top: "inception_3a/pool_proj/bn/sc" + name: "inception_3a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/1x1/bn/sc" + bottom: "inception_3a/3x3/bn/sc" + bottom: "inception_3a/double3x3b/bn/sc" + bottom: "inception_3a/pool_proj/bn/sc" + top: "inception_3a/output" + name: "inception_3a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/1x1" + name: "inception_3b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/1x1" + name: "inception_3b/1x1/bn" + top: "inception_3b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/1x1/bn" + top: "inception_3b/1x1/bn/sc" + name: "inception_3b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/1x1/bn/sc" + top: "inception_3b/1x1/bn/sc" + name: "inception_3b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + name: "inception_3b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/3x3_reduce" + name: "inception_3b/3x3_reduce/bn" + top: "inception_3b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn" + top: "inception_3b/3x3_reduce/bn/sc" + name: "inception_3b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn/sc" + top: "inception_3b/3x3_reduce/bn/sc" + name: "inception_3b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn/sc" + top: "inception_3b/3x3" + name: "inception_3b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/3x3" + name: "inception_3b/3x3/bn" + top: "inception_3b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/3x3/bn" + top: "inception_3b/3x3/bn/sc" + name: "inception_3b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/3x3/bn/sc" + top: "inception_3b/3x3/bn/sc" + name: "inception_3b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/double3x3_reduce" + name: "inception_3b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3_reduce" + name: "inception_3b/double3x3_reduce/bn" + top: "inception_3b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn" + top: "inception_3b/double3x3_reduce/bn/sc" + name: "inception_3b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn/sc" + top: "inception_3b/double3x3_reduce/bn/sc" + name: "inception_3b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn/sc" + top: "inception_3b/double3x3a" + name: "inception_3b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3a" + name: "inception_3b/double3x3a/bn" + top: "inception_3b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3a/bn" + top: "inception_3b/double3x3a/bn/sc" + name: "inception_3b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3a/bn/sc" + top: "inception_3b/double3x3a/bn/sc" + name: "inception_3b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/double3x3a/bn/sc" + top: "inception_3b/double3x3b" + name: "inception_3b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3b" + name: "inception_3b/double3x3b/bn" + top: "inception_3b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3b/bn" + top: "inception_3b/double3x3b/bn/sc" + name: "inception_3b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3b/bn/sc" + top: "inception_3b/double3x3b/bn/sc" + name: "inception_3b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/pool" + name: "inception_3b/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + name: "inception_3b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/pool_proj" + name: "inception_3b/pool_proj/bn" + top: "inception_3b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/pool_proj/bn" + top: "inception_3b/pool_proj/bn/sc" + name: "inception_3b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/pool_proj/bn/sc" + top: "inception_3b/pool_proj/bn/sc" + name: "inception_3b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/1x1/bn/sc" + bottom: "inception_3b/3x3/bn/sc" + bottom: "inception_3b/double3x3b/bn/sc" + bottom: "inception_3b/pool_proj/bn/sc" + top: "inception_3b/output" + name: "inception_3b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/3x3_reduce" + name: "inception_3c/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/3x3_reduce" + name: "inception_3c/3x3_reduce/bn" + top: "inception_3c/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn" + top: "inception_3c/3x3_reduce/bn/sc" + name: "inception_3c/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn/sc" + top: "inception_3c/3x3_reduce/bn/sc" + name: "inception_3c/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn/sc" + top: "inception_3c/3x3" + name: "inception_3c/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/3x3" + name: "inception_3c/3x3/bn" + top: "inception_3c/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/3x3/bn" + top: "inception_3c/3x3/bn/sc" + name: "inception_3c/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/3x3/bn/sc" + top: "inception_3c/3x3/bn/sc" + name: "inception_3c/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/double3x3_reduce" + name: "inception_3c/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3_reduce" + name: "inception_3c/double3x3_reduce/bn" + top: "inception_3c/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn" + top: "inception_3c/double3x3_reduce/bn/sc" + name: "inception_3c/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn/sc" + top: "inception_3c/double3x3_reduce/bn/sc" + name: "inception_3c/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn/sc" + top: "inception_3c/double3x3a" + name: "inception_3c/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3a" + name: "inception_3c/double3x3a/bn" + top: "inception_3c/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3a/bn" + top: "inception_3c/double3x3a/bn/sc" + name: "inception_3c/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3a/bn/sc" + top: "inception_3c/double3x3a/bn/sc" + name: "inception_3c/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/double3x3a/bn/sc" + top: "inception_3c/double3x3b" + name: "inception_3c/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3b" + name: "inception_3c/double3x3b/bn" + top: "inception_3c/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3b/bn" + top: "inception_3c/double3x3b/bn/sc" + name: "inception_3c/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3b/bn/sc" + top: "inception_3c/double3x3b/bn/sc" + name: "inception_3c/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/pool" + name: "inception_3c/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "inception_3c/3x3/bn/sc" + bottom: "inception_3c/double3x3b/bn/sc" + bottom: "inception_3c/pool" + top: "inception_3c/output" + name: "inception_3c/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "pool3/5x5_s3" + name: "pool3/5x5_s3" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + bottom: "pool3/5x5_s3" + top: "loss1/conv" + name: "loss1/conv" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss1/conv" + name: "loss1/conv/bn" + top: "loss1/conv/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss1/conv/bn" + top: "loss1/conv/bn/sc" + name: "loss1/conv/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss1/conv/bn/sc" + top: "loss1/conv/bn/sc" + name: "loss1/conv/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss1/conv/bn/sc" + top: "loss1/fc" + name: "loss1/fc" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss1/fc" + name: "loss1/fc/bn" + top: "loss1/fc/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss1/fc/bn" + top: "loss1/fc/bn/sc" + name: "loss1/fc/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss1/fc/bn/sc" + top: "loss1/fc/bn/sc" + name: "loss1/fc/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss1/fc/bn/sc" + top: "loss1/classifier" + name: "loss1/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss" + name: "loss1/loss" + type: "SoftmaxWithLoss" + loss_weight: 0.3 +} +layer { + bottom: "loss1/classifier" + top: "loss1/prob" + name: "loss1/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss1/prob" + bottom: "label" + top: "loss1/top-1" + name: "loss1/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss1/prob" + bottom: "label" + top: "loss1/top-5" + name: "loss1/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/1x1" + name: "inception_4a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/1x1" + name: "inception_4a/1x1/bn" + top: "inception_4a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/1x1/bn" + top: "inception_4a/1x1/bn/sc" + name: "inception_4a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/1x1/bn/sc" + top: "inception_4a/1x1/bn/sc" + name: "inception_4a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/3x3_reduce" + name: "inception_4a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/3x3_reduce" + name: "inception_4a/3x3_reduce/bn" + top: "inception_4a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn" + top: "inception_4a/3x3_reduce/bn/sc" + name: "inception_4a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn/sc" + top: "inception_4a/3x3_reduce/bn/sc" + name: "inception_4a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn/sc" + top: "inception_4a/3x3" + name: "inception_4a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/3x3" + name: "inception_4a/3x3/bn" + top: "inception_4a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/3x3/bn" + top: "inception_4a/3x3/bn/sc" + name: "inception_4a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/3x3/bn/sc" + top: "inception_4a/3x3/bn/sc" + name: "inception_4a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/double3x3_reduce" + name: "inception_4a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3_reduce" + name: "inception_4a/double3x3_reduce/bn" + top: "inception_4a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn" + top: "inception_4a/double3x3_reduce/bn/sc" + name: "inception_4a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn/sc" + top: "inception_4a/double3x3_reduce/bn/sc" + name: "inception_4a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn/sc" + top: "inception_4a/double3x3a" + name: "inception_4a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3a" + name: "inception_4a/double3x3a/bn" + top: "inception_4a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3a/bn" + top: "inception_4a/double3x3a/bn/sc" + name: "inception_4a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3a/bn/sc" + top: "inception_4a/double3x3a/bn/sc" + name: "inception_4a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/double3x3a/bn/sc" + top: "inception_4a/double3x3b" + name: "inception_4a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3b" + name: "inception_4a/double3x3b/bn" + top: "inception_4a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3b/bn" + top: "inception_4a/double3x3b/bn/sc" + name: "inception_4a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3b/bn/sc" + top: "inception_4a/double3x3b/bn/sc" + name: "inception_4a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/pool" + name: "inception_4a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + name: "inception_4a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/pool_proj" + name: "inception_4a/pool_proj/bn" + top: "inception_4a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/pool_proj/bn" + top: "inception_4a/pool_proj/bn/sc" + name: "inception_4a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/pool_proj/bn/sc" + top: "inception_4a/pool_proj/bn/sc" + name: "inception_4a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/1x1/bn/sc" + bottom: "inception_4a/3x3/bn/sc" + bottom: "inception_4a/double3x3b/bn/sc" + bottom: "inception_4a/pool_proj/bn/sc" + top: "inception_4a/output" + name: "inception_4a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/1x1" + name: "inception_4b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/1x1" + name: "inception_4b/1x1/bn" + top: "inception_4b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/1x1/bn" + top: "inception_4b/1x1/bn/sc" + name: "inception_4b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/1x1/bn/sc" + top: "inception_4b/1x1/bn/sc" + name: "inception_4b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + name: "inception_4b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/3x3_reduce" + name: "inception_4b/3x3_reduce/bn" + top: "inception_4b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn" + top: "inception_4b/3x3_reduce/bn/sc" + name: "inception_4b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn/sc" + top: "inception_4b/3x3_reduce/bn/sc" + name: "inception_4b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn/sc" + top: "inception_4b/3x3" + name: "inception_4b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/3x3" + name: "inception_4b/3x3/bn" + top: "inception_4b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/3x3/bn" + top: "inception_4b/3x3/bn/sc" + name: "inception_4b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/3x3/bn/sc" + top: "inception_4b/3x3/bn/sc" + name: "inception_4b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/double3x3_reduce" + name: "inception_4b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3_reduce" + name: "inception_4b/double3x3_reduce/bn" + top: "inception_4b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn" + top: "inception_4b/double3x3_reduce/bn/sc" + name: "inception_4b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn/sc" + top: "inception_4b/double3x3_reduce/bn/sc" + name: "inception_4b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn/sc" + top: "inception_4b/double3x3a" + name: "inception_4b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3a" + name: "inception_4b/double3x3a/bn" + top: "inception_4b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3a/bn" + top: "inception_4b/double3x3a/bn/sc" + name: "inception_4b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3a/bn/sc" + top: "inception_4b/double3x3a/bn/sc" + name: "inception_4b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/double3x3a/bn/sc" + top: "inception_4b/double3x3b" + name: "inception_4b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3b" + name: "inception_4b/double3x3b/bn" + top: "inception_4b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3b/bn" + top: "inception_4b/double3x3b/bn/sc" + name: "inception_4b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3b/bn/sc" + top: "inception_4b/double3x3b/bn/sc" + name: "inception_4b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/pool" + name: "inception_4b/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + name: "inception_4b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/pool_proj" + name: "inception_4b/pool_proj/bn" + top: "inception_4b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/pool_proj/bn" + top: "inception_4b/pool_proj/bn/sc" + name: "inception_4b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/pool_proj/bn/sc" + top: "inception_4b/pool_proj/bn/sc" + name: "inception_4b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/1x1/bn/sc" + bottom: "inception_4b/3x3/bn/sc" + bottom: "inception_4b/double3x3b/bn/sc" + bottom: "inception_4b/pool_proj/bn/sc" + top: "inception_4b/output" + name: "inception_4b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/1x1" + name: "inception_4c/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/1x1" + name: "inception_4c/1x1/bn" + top: "inception_4c/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/1x1/bn" + top: "inception_4c/1x1/bn/sc" + name: "inception_4c/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/1x1/bn/sc" + top: "inception_4c/1x1/bn/sc" + name: "inception_4c/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + name: "inception_4c/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/3x3_reduce" + name: "inception_4c/3x3_reduce/bn" + top: "inception_4c/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn" + top: "inception_4c/3x3_reduce/bn/sc" + name: "inception_4c/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn/sc" + top: "inception_4c/3x3_reduce/bn/sc" + name: "inception_4c/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn/sc" + top: "inception_4c/3x3" + name: "inception_4c/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/3x3" + name: "inception_4c/3x3/bn" + top: "inception_4c/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/3x3/bn" + top: "inception_4c/3x3/bn/sc" + name: "inception_4c/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/3x3/bn/sc" + top: "inception_4c/3x3/bn/sc" + name: "inception_4c/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/double3x3_reduce" + name: "inception_4c/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3_reduce" + name: "inception_4c/double3x3_reduce/bn" + top: "inception_4c/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn" + top: "inception_4c/double3x3_reduce/bn/sc" + name: "inception_4c/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn/sc" + top: "inception_4c/double3x3_reduce/bn/sc" + name: "inception_4c/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn/sc" + top: "inception_4c/double3x3a" + name: "inception_4c/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3a" + name: "inception_4c/double3x3a/bn" + top: "inception_4c/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3a/bn" + top: "inception_4c/double3x3a/bn/sc" + name: "inception_4c/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3a/bn/sc" + top: "inception_4c/double3x3a/bn/sc" + name: "inception_4c/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/double3x3a/bn/sc" + top: "inception_4c/double3x3b" + name: "inception_4c/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3b" + name: "inception_4c/double3x3b/bn" + top: "inception_4c/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3b/bn" + top: "inception_4c/double3x3b/bn/sc" + name: "inception_4c/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3b/bn/sc" + top: "inception_4c/double3x3b/bn/sc" + name: "inception_4c/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/pool" + name: "inception_4c/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + name: "inception_4c/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/pool_proj" + name: "inception_4c/pool_proj/bn" + top: "inception_4c/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/pool_proj/bn" + top: "inception_4c/pool_proj/bn/sc" + name: "inception_4c/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/pool_proj/bn/sc" + top: "inception_4c/pool_proj/bn/sc" + name: "inception_4c/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/1x1/bn/sc" + bottom: "inception_4c/3x3/bn/sc" + bottom: "inception_4c/double3x3b/bn/sc" + bottom: "inception_4c/pool_proj/bn/sc" + top: "inception_4c/output" + name: "inception_4c/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/1x1" + name: "inception_4d/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/1x1" + name: "inception_4d/1x1/bn" + top: "inception_4d/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/1x1/bn" + top: "inception_4d/1x1/bn/sc" + name: "inception_4d/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/1x1/bn/sc" + top: "inception_4d/1x1/bn/sc" + name: "inception_4d/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + name: "inception_4d/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/3x3_reduce" + name: "inception_4d/3x3_reduce/bn" + top: "inception_4d/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn" + top: "inception_4d/3x3_reduce/bn/sc" + name: "inception_4d/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn/sc" + top: "inception_4d/3x3_reduce/bn/sc" + name: "inception_4d/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn/sc" + top: "inception_4d/3x3" + name: "inception_4d/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/3x3" + name: "inception_4d/3x3/bn" + top: "inception_4d/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/3x3/bn" + top: "inception_4d/3x3/bn/sc" + name: "inception_4d/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/3x3/bn/sc" + top: "inception_4d/3x3/bn/sc" + name: "inception_4d/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/double3x3_reduce" + name: "inception_4d/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3_reduce" + name: "inception_4d/double3x3_reduce/bn" + top: "inception_4d/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn" + top: "inception_4d/double3x3_reduce/bn/sc" + name: "inception_4d/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn/sc" + top: "inception_4d/double3x3_reduce/bn/sc" + name: "inception_4d/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn/sc" + top: "inception_4d/double3x3a" + name: "inception_4d/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3a" + name: "inception_4d/double3x3a/bn" + top: "inception_4d/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3a/bn" + top: "inception_4d/double3x3a/bn/sc" + name: "inception_4d/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3a/bn/sc" + top: "inception_4d/double3x3a/bn/sc" + name: "inception_4d/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/double3x3a/bn/sc" + top: "inception_4d/double3x3b" + name: "inception_4d/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3b" + name: "inception_4d/double3x3b/bn" + top: "inception_4d/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3b/bn" + top: "inception_4d/double3x3b/bn/sc" + name: "inception_4d/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3b/bn/sc" + top: "inception_4d/double3x3b/bn/sc" + name: "inception_4d/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/pool" + name: "inception_4d/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + name: "inception_4d/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/pool_proj" + name: "inception_4d/pool_proj/bn" + top: "inception_4d/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/pool_proj/bn" + top: "inception_4d/pool_proj/bn/sc" + name: "inception_4d/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/pool_proj/bn/sc" + top: "inception_4d/pool_proj/bn/sc" + name: "inception_4d/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/1x1/bn/sc" + bottom: "inception_4d/3x3/bn/sc" + bottom: "inception_4d/double3x3b/bn/sc" + bottom: "inception_4d/pool_proj/bn/sc" + top: "inception_4d/output" + name: "inception_4d/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + name: "inception_4e/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/3x3_reduce" + name: "inception_4e/3x3_reduce/bn" + top: "inception_4e/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn" + top: "inception_4e/3x3_reduce/bn/sc" + name: "inception_4e/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn/sc" + top: "inception_4e/3x3_reduce/bn/sc" + name: "inception_4e/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn/sc" + top: "inception_4e/3x3" + name: "inception_4e/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/3x3" + name: "inception_4e/3x3/bn" + top: "inception_4e/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/3x3/bn" + top: "inception_4e/3x3/bn/sc" + name: "inception_4e/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/3x3/bn/sc" + top: "inception_4e/3x3/bn/sc" + name: "inception_4e/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/double3x3_reduce" + name: "inception_4e/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3_reduce" + name: "inception_4e/double3x3_reduce/bn" + top: "inception_4e/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn" + top: "inception_4e/double3x3_reduce/bn/sc" + name: "inception_4e/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn/sc" + top: "inception_4e/double3x3_reduce/bn/sc" + name: "inception_4e/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn/sc" + top: "inception_4e/double3x3a" + name: "inception_4e/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3a" + name: "inception_4e/double3x3a/bn" + top: "inception_4e/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3a/bn" + top: "inception_4e/double3x3a/bn/sc" + name: "inception_4e/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3a/bn/sc" + top: "inception_4e/double3x3a/bn/sc" + name: "inception_4e/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/double3x3a/bn/sc" + top: "inception_4e/double3x3b" + name: "inception_4e/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3b" + name: "inception_4e/double3x3b/bn" + top: "inception_4e/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3b/bn" + top: "inception_4e/double3x3b/bn/sc" + name: "inception_4e/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3b/bn/sc" + top: "inception_4e/double3x3b/bn/sc" + name: "inception_4e/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/pool" + name: "inception_4e/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "inception_4e/3x3/bn/sc" + bottom: "inception_4e/double3x3b/bn/sc" + bottom: "inception_4e/pool" + top: "inception_4e/output" + name: "inception_4e/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "pool4/5x5_s3" + name: "pool4/5x5_s3" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + bottom: "pool4/5x5_s3" + top: "loss2/conv" + name: "loss2/conv" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss2/conv" + name: "loss2/conv/bn" + top: "loss2/conv/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss2/conv/bn" + top: "loss2/conv/bn/sc" + name: "loss2/conv/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss2/conv/bn/sc" + top: "loss2/conv/bn/sc" + name: "loss2/conv/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss2/conv/bn/sc" + top: "loss2/fc" + name: "loss2/fc" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss2/fc" + name: "loss2/fc/bn" + top: "loss2/fc/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss2/fc/bn" + top: "loss2/fc/bn/sc" + name: "loss2/fc/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss2/fc/bn/sc" + top: "loss2/fc/bn/sc" + name: "loss2/fc/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss2/fc/bn/sc" + top: "loss2/classifier" + name: "loss2/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss" + name: "loss2/loss" + type: "SoftmaxWithLoss" + loss_weight: 0.3 +} +layer { + bottom: "loss2/classifier" + top: "loss2/prob" + name: "loss2/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss2/prob" + bottom: "label" + top: "loss2/top-1" + name: "loss2/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss2/prob" + bottom: "label" + top: "loss2/top-5" + name: "loss2/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/1x1" + name: "inception_5a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 352 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/1x1" + name: "inception_5a/1x1/bn" + top: "inception_5a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/1x1/bn" + top: "inception_5a/1x1/bn/sc" + name: "inception_5a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/1x1/bn/sc" + top: "inception_5a/1x1/bn/sc" + name: "inception_5a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/3x3_reduce" + name: "inception_5a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/3x3_reduce" + name: "inception_5a/3x3_reduce/bn" + top: "inception_5a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn" + top: "inception_5a/3x3_reduce/bn/sc" + name: "inception_5a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn/sc" + top: "inception_5a/3x3_reduce/bn/sc" + name: "inception_5a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn/sc" + top: "inception_5a/3x3" + name: "inception_5a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 320 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/3x3" + name: "inception_5a/3x3/bn" + top: "inception_5a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/3x3/bn" + top: "inception_5a/3x3/bn/sc" + name: "inception_5a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/3x3/bn/sc" + top: "inception_5a/3x3/bn/sc" + name: "inception_5a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/double3x3_reduce" + name: "inception_5a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3_reduce" + name: "inception_5a/double3x3_reduce/bn" + top: "inception_5a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn" + top: "inception_5a/double3x3_reduce/bn/sc" + name: "inception_5a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn/sc" + top: "inception_5a/double3x3_reduce/bn/sc" + name: "inception_5a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn/sc" + top: "inception_5a/double3x3a" + name: "inception_5a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3a" + name: "inception_5a/double3x3a/bn" + top: "inception_5a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3a/bn" + top: "inception_5a/double3x3a/bn/sc" + name: "inception_5a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3a/bn/sc" + top: "inception_5a/double3x3a/bn/sc" + name: "inception_5a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/double3x3a/bn/sc" + top: "inception_5a/double3x3b" + name: "inception_5a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3b" + name: "inception_5a/double3x3b/bn" + top: "inception_5a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3b/bn" + top: "inception_5a/double3x3b/bn/sc" + name: "inception_5a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3b/bn/sc" + top: "inception_5a/double3x3b/bn/sc" + name: "inception_5a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/pool" + name: "inception_5a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + name: "inception_5a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/pool_proj" + name: "inception_5a/pool_proj/bn" + top: "inception_5a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/pool_proj/bn" + top: "inception_5a/pool_proj/bn/sc" + name: "inception_5a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/pool_proj/bn/sc" + top: "inception_5a/pool_proj/bn/sc" + name: "inception_5a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/1x1/bn/sc" + bottom: "inception_5a/3x3/bn/sc" + bottom: "inception_5a/double3x3b/bn/sc" + bottom: "inception_5a/pool_proj/bn/sc" + top: "inception_5a/output" + name: "inception_5a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/1x1" + name: "inception_5b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 352 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/1x1" + name: "inception_5b/1x1/bn" + top: "inception_5b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/1x1/bn" + top: "inception_5b/1x1/bn/sc" + name: "inception_5b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/1x1/bn/sc" + top: "inception_5b/1x1/bn/sc" + name: "inception_5b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + name: "inception_5b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/3x3_reduce" + name: "inception_5b/3x3_reduce/bn" + top: "inception_5b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn" + top: "inception_5b/3x3_reduce/bn/sc" + name: "inception_5b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn/sc" + top: "inception_5b/3x3_reduce/bn/sc" + name: "inception_5b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn/sc" + top: "inception_5b/3x3" + name: "inception_5b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 320 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/3x3" + name: "inception_5b/3x3/bn" + top: "inception_5b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/3x3/bn" + top: "inception_5b/3x3/bn/sc" + name: "inception_5b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/3x3/bn/sc" + top: "inception_5b/3x3/bn/sc" + name: "inception_5b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/double3x3_reduce" + name: "inception_5b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3_reduce" + name: "inception_5b/double3x3_reduce/bn" + top: "inception_5b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn" + top: "inception_5b/double3x3_reduce/bn/sc" + name: "inception_5b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn/sc" + top: "inception_5b/double3x3_reduce/bn/sc" + name: "inception_5b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn/sc" + top: "inception_5b/double3x3a" + name: "inception_5b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3a" + name: "inception_5b/double3x3a/bn" + top: "inception_5b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3a/bn" + top: "inception_5b/double3x3a/bn/sc" + name: "inception_5b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3a/bn/sc" + top: "inception_5b/double3x3a/bn/sc" + name: "inception_5b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/double3x3a/bn/sc" + top: "inception_5b/double3x3b" + name: "inception_5b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3b" + name: "inception_5b/double3x3b/bn" + top: "inception_5b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3b/bn" + top: "inception_5b/double3x3b/bn/sc" + name: "inception_5b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3b/bn/sc" + top: "inception_5b/double3x3b/bn/sc" + name: "inception_5b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/pool" + name: "inception_5b/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + name: "inception_5b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/pool_proj" + name: "inception_5b/pool_proj/bn" + top: "inception_5b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/pool_proj/bn" + top: "inception_5b/pool_proj/bn/sc" + name: "inception_5b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/pool_proj/bn/sc" + top: "inception_5b/pool_proj/bn/sc" + name: "inception_5b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/1x1/bn/sc" + bottom: "inception_5b/3x3/bn/sc" + bottom: "inception_5b/double3x3b/bn/sc" + bottom: "inception_5b/pool_proj/bn/sc" + top: "inception_5b/output" + name: "inception_5b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + name: "pool5/7x7_s1" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + name: "loss3/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss" + name: "loss3/loss" + type: "SoftmaxWithLoss" + loss_weight: 1 +} +layer { + bottom: "loss3/classifier" + top: "loss3/prob" + name: "loss3/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss3/prob" + bottom: "label" + top: "loss3/top-1" + name: "loss3/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss3/prob" + bottom: "label" + top: "loss3/top-5" + name: "loss3/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} diff --git a/models/intel_optimized_models/multinode/alexnet_4nodes/solver.prototxt b/models/intel_optimized_models/multinode/alexnet_4nodes/solver.prototxt new file mode 100644 index 00000000000..053429a920a --- /dev/null +++ b/models/intel_optimized_models/multinode/alexnet_4nodes/solver.prototxt @@ -0,0 +1,25 @@ +#This is Intel(R) optimized (in terms of time to train) version of solver for model described in the [AlexNet](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) publication. +#Original solver.prototxt can be found in /models/bvlc_alexnet/ directory of this repository. +#Differences: +#- lr_policy is set to poly instead of step +#- base_lr is decreased to 0.06 +#- max_iter is decreased to 110000 +#- power is set to 0.6 +# +#Top-5 and Top-1 results achieved with this version of solver: +#Top-5: 80.36% +#Top-1: 57.51% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. +net: "models/intel_optimized_models/multinode/alexnet_4nodes/train_val.prototxt" +#test_iter: 1000 +#test_interval: 10000 +base_lr: 0.06 +lr_policy: "poly" +power: 0.6 +display: 200 +max_iter: 110000 +momentum: 0.9 +weight_decay: 0.0005 +snapshot: 25000 +snapshot_prefix: "models/intel_optimized_models/multinode/alexnet_4nodes/alexnet_train" +solver_mode: CPU diff --git a/models/intel_optimized_models/multinode/alexnet_4nodes/train_val.prototxt b/models/intel_optimized_models/multinode/alexnet_4nodes/train_val.prototxt new file mode 100644 index 00000000000..ff58839f4d2 --- /dev/null +++ b/models/intel_optimized_models/multinode/alexnet_4nodes/train_val.prototxt @@ -0,0 +1,398 @@ +name: "AlexNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 256 + backend: LMDB + shuffle: true + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 227 + mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} +layer { + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + name: "norm1" + type: "LRN" + bottom: "conv1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool1" + type: "Pooling" + bottom: "norm1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + name: "norm2" + type: "LRN" + bottom: "conv2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2" + type: "Pooling" + bottom: "norm2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "conv3" + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "fc6" + type: "InnerProduct" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "drop6" + type: "Dropout" + bottom: "fc6" + top: "fc6" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc7" + type: "InnerProduct" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 4096 + weight_filler { + type: "gaussian" + std: 0.005 + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "drop7" + type: "Dropout" + bottom: "fc7" + top: "fc7" + dropout_param { + dropout_ratio: 0.5 + } +} +layer { + name: "fc8" + type: "InnerProduct" + bottom: "fc7" + top: "fc8" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss" + type: "SoftmaxWithLoss" + bottom: "fc8" + bottom: "label" + top: "loss" +} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "loss3/top-1" + include { + phase: TEST + } +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "fc8" + bottom: "label" + top: "loss3/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} \ No newline at end of file diff --git a/models/intel_optimized_models/multinode/googlenet_16nodes/solver.prototxt b/models/intel_optimized_models/multinode/googlenet_16nodes/solver.prototxt new file mode 100644 index 00000000000..4c9b59fc4a4 --- /dev/null +++ b/models/intel_optimized_models/multinode/googlenet_16nodes/solver.prototxt @@ -0,0 +1,27 @@ +#This is Intel(R) optimized (in terms of time to train) version of solver for model described in the [GoogLeNet](http://arxiv.org/abs/1409.4842) publication. +#Original solver.prototxt can be found in /models/bvlc_googlenet/ directory of this repository. +#Differences: +#- base_lr is set to 0.065 +#- max_iter is set to 100000 +# +#- bias_filler value changed to 0.1 +# +#Top-5 and Top-1 results achieved with this version of solver: +#Top-5: 88.74% +#Top-1: 68.35% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. +net: "models/intel_optimized_models/multinode/googlenet_16nodes/train_val.prototxt" +#test_iter: 1000 +#test_interval: 10000 +#test_initialization: false +display: 40 +average_loss: 40 +base_lr: 0.065 +lr_policy: "poly" +power: 0.5 +max_iter: 100000 +momentum: 0.9 +weight_decay: 0.0002 +snapshot: 50000 +snapshot_prefix: "models/intel_optimized_models/multinode/googlenet_16nodes/googlenet" +solver_mode: CPU diff --git a/models/intel_optimized_models/multinode/googlenet_16nodes/train_val.prototxt b/models/intel_optimized_models/multinode/googlenet_16nodes/train_val.prototxt new file mode 100644 index 00000000000..f5276ab9732 --- /dev/null +++ b/models/intel_optimized_models/multinode/googlenet_16nodes/train_val.prototxt @@ -0,0 +1,2434 @@ +name: "GoogleNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 64 + backend: LMDB + shuffle: true + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} +layer { + name: "conv1/7x7_s2" + type: "Convolution" + bottom: "data" + top: "conv1/7x7_s2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv1/relu_7x7" + type: "ReLU" + bottom: "conv1/7x7_s2" + top: "conv1/7x7_s2" +} +layer { + name: "pool1/3x3_s2" + type: "Pooling" + bottom: "conv1/7x7_s2" + top: "pool1/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "pool1/norm1" + type: "LRN" + bottom: "pool1/3x3_s2" + top: "pool1/norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2/3x3_reduce" + type: "Convolution" + bottom: "pool1/norm1" + top: "conv2/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv2/relu_3x3_reduce" + type: "ReLU" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3_reduce" +} +layer { + name: "conv2/3x3" + type: "Convolution" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv2/relu_3x3" + type: "ReLU" + bottom: "conv2/3x3" + top: "conv2/3x3" +} +layer { + name: "conv2/norm2" + type: "LRN" + bottom: "conv2/3x3" + top: "conv2/norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2/3x3_s2" + type: "Pooling" + bottom: "conv2/norm2" + top: "pool2/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_3a/1x1" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_1x1" + type: "ReLU" + bottom: "inception_3a/1x1" + top: "inception_3a/1x1" +} +layer { + name: "inception_3a/3x3_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3_reduce" +} +layer { + name: "inception_3a/3x3" + type: "Convolution" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_3x3" + type: "ReLU" + bottom: "inception_3a/3x3" + top: "inception_3a/3x3" +} +layer { + name: "inception_3a/5x5_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5_reduce" +} +layer { + name: "inception_3a/5x5" + type: "Convolution" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_5x5" + type: "ReLU" + bottom: "inception_3a/5x5" + top: "inception_3a/5x5" +} +layer { + name: "inception_3a/pool" + type: "Pooling" + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3a/pool_proj" + type: "Convolution" + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_pool_proj" + type: "ReLU" + bottom: "inception_3a/pool_proj" + top: "inception_3a/pool_proj" +} +layer { + name: "inception_3a/output" + type: "Concat" + bottom: "inception_3a/1x1" + bottom: "inception_3a/3x3" + bottom: "inception_3a/5x5" + bottom: "inception_3a/pool_proj" + top: "inception_3a/output" +} +layer { + name: "inception_3b/1x1" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_1x1" + type: "ReLU" + bottom: "inception_3b/1x1" + top: "inception_3b/1x1" +} +layer { + name: "inception_3b/3x3_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3_reduce" +} +layer { + name: "inception_3b/3x3" + type: "Convolution" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_3x3" + type: "ReLU" + bottom: "inception_3b/3x3" + top: "inception_3b/3x3" +} +layer { + name: "inception_3b/5x5_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5_reduce" +} +layer { + name: "inception_3b/5x5" + type: "Convolution" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_5x5" + type: "ReLU" + bottom: "inception_3b/5x5" + top: "inception_3b/5x5" +} +layer { + name: "inception_3b/pool" + type: "Pooling" + bottom: "inception_3a/output" + top: "inception_3b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3b/pool_proj" + type: "Convolution" + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_pool_proj" + type: "ReLU" + bottom: "inception_3b/pool_proj" + top: "inception_3b/pool_proj" +} +layer { + name: "inception_3b/output" + type: "Concat" + bottom: "inception_3b/1x1" + bottom: "inception_3b/3x3" + bottom: "inception_3b/5x5" + bottom: "inception_3b/pool_proj" + top: "inception_3b/output" +} +layer { + name: "pool3/3x3_s2" + type: "Pooling" + bottom: "inception_3b/output" + top: "pool3/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_4a/1x1" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_1x1" + type: "ReLU" + bottom: "inception_4a/1x1" + top: "inception_4a/1x1" +} +layer { + name: "inception_4a/3x3_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3_reduce" +} +layer { + name: "inception_4a/3x3" + type: "Convolution" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 208 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_3x3" + type: "ReLU" + bottom: "inception_4a/3x3" + top: "inception_4a/3x3" +} +layer { + name: "inception_4a/5x5_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5_reduce" +} +layer { + name: "inception_4a/5x5" + type: "Convolution" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_5x5" + type: "ReLU" + bottom: "inception_4a/5x5" + top: "inception_4a/5x5" +} +layer { + name: "inception_4a/pool" + type: "Pooling" + bottom: "pool3/3x3_s2" + top: "inception_4a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4a/pool_proj" + type: "Convolution" + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_pool_proj" + type: "ReLU" + bottom: "inception_4a/pool_proj" + top: "inception_4a/pool_proj" +} +layer { + name: "inception_4a/output" + type: "Concat" + bottom: "inception_4a/1x1" + bottom: "inception_4a/3x3" + bottom: "inception_4a/5x5" + bottom: "inception_4a/pool_proj" + top: "inception_4a/output" +} +layer { + name: "loss1/ave_pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "loss1/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss1/conv" + type: "Convolution" + bottom: "loss1/ave_pool" + top: "loss1/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss1/relu_conv" + type: "ReLU" + bottom: "loss1/conv" + top: "loss1/conv" +} +layer { + name: "loss1/fc" + type: "InnerProduct" + bottom: "loss1/conv" + top: "loss1/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss1/relu_fc" + type: "ReLU" + bottom: "loss1/fc" + top: "loss1/fc" +} +layer { + name: "loss1/drop_fc" + type: "Dropout" + bottom: "loss1/fc" + top: "loss1/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss1/classifier" + type: "InnerProduct" + bottom: "loss1/fc" + top: "loss1/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss1/loss" + type: "SoftmaxWithLoss" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss1" + loss_weight: 0.3 +} +layer { + name: "loss1/top-1" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-1" + include { + phase: TEST + } +} +layer { + name: "loss1/top-5" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4b/1x1" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_1x1" + type: "ReLU" + bottom: "inception_4b/1x1" + top: "inception_4b/1x1" +} +layer { + name: "inception_4b/3x3_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3_reduce" +} +layer { + name: "inception_4b/3x3" + type: "Convolution" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 224 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_3x3" + type: "ReLU" + bottom: "inception_4b/3x3" + top: "inception_4b/3x3" +} +layer { + name: "inception_4b/5x5_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5_reduce" +} +layer { + name: "inception_4b/5x5" + type: "Convolution" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_5x5" + type: "ReLU" + bottom: "inception_4b/5x5" + top: "inception_4b/5x5" +} +layer { + name: "inception_4b/pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "inception_4b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4b/pool_proj" + type: "Convolution" + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_pool_proj" + type: "ReLU" + bottom: "inception_4b/pool_proj" + top: "inception_4b/pool_proj" +} +layer { + name: "inception_4b/output" + type: "Concat" + bottom: "inception_4b/1x1" + bottom: "inception_4b/3x3" + bottom: "inception_4b/5x5" + bottom: "inception_4b/pool_proj" + top: "inception_4b/output" +} +layer { + name: "inception_4c/1x1" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_1x1" + type: "ReLU" + bottom: "inception_4c/1x1" + top: "inception_4c/1x1" +} +layer { + name: "inception_4c/3x3_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3_reduce" +} +layer { + name: "inception_4c/3x3" + type: "Convolution" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_3x3" + type: "ReLU" + bottom: "inception_4c/3x3" + top: "inception_4c/3x3" +} +layer { + name: "inception_4c/5x5_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5_reduce" +} +layer { + name: "inception_4c/5x5" + type: "Convolution" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_5x5" + type: "ReLU" + bottom: "inception_4c/5x5" + top: "inception_4c/5x5" +} +layer { + name: "inception_4c/pool" + type: "Pooling" + bottom: "inception_4b/output" + top: "inception_4c/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4c/pool_proj" + type: "Convolution" + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_pool_proj" + type: "ReLU" + bottom: "inception_4c/pool_proj" + top: "inception_4c/pool_proj" +} +layer { + name: "inception_4c/output" + type: "Concat" + bottom: "inception_4c/1x1" + bottom: "inception_4c/3x3" + bottom: "inception_4c/5x5" + bottom: "inception_4c/pool_proj" + top: "inception_4c/output" +} +layer { + name: "inception_4d/1x1" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_1x1" + type: "ReLU" + bottom: "inception_4d/1x1" + top: "inception_4d/1x1" +} +layer { + name: "inception_4d/3x3_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 144 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3_reduce" +} +layer { + name: "inception_4d/3x3" + type: "Convolution" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 288 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_3x3" + type: "ReLU" + bottom: "inception_4d/3x3" + top: "inception_4d/3x3" +} +layer { + name: "inception_4d/5x5_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5_reduce" +} +layer { + name: "inception_4d/5x5" + type: "Convolution" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_5x5" + type: "ReLU" + bottom: "inception_4d/5x5" + top: "inception_4d/5x5" +} +layer { + name: "inception_4d/pool" + type: "Pooling" + bottom: "inception_4c/output" + top: "inception_4d/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4d/pool_proj" + type: "Convolution" + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_pool_proj" + type: "ReLU" + bottom: "inception_4d/pool_proj" + top: "inception_4d/pool_proj" +} +layer { + name: "inception_4d/output" + type: "Concat" + bottom: "inception_4d/1x1" + bottom: "inception_4d/3x3" + bottom: "inception_4d/5x5" + bottom: "inception_4d/pool_proj" + top: "inception_4d/output" +} +layer { + name: "loss2/ave_pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "loss2/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss2/conv" + type: "Convolution" + bottom: "loss2/ave_pool" + top: "loss2/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss2/relu_conv" + type: "ReLU" + bottom: "loss2/conv" + top: "loss2/conv" +} +layer { + name: "loss2/fc" + type: "InnerProduct" + bottom: "loss2/conv" + top: "loss2/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss2/relu_fc" + type: "ReLU" + bottom: "loss2/fc" + top: "loss2/fc" +} +layer { + name: "loss2/drop_fc" + type: "Dropout" + bottom: "loss2/fc" + top: "loss2/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss2/classifier" + type: "InnerProduct" + bottom: "loss2/fc" + top: "loss2/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss2/loss" + type: "SoftmaxWithLoss" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss1" + loss_weight: 0.3 +} +layer { + name: "loss2/top-1" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-1" + include { + phase: TEST + } +} +layer { + name: "loss2/top-5" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4e/1x1" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_1x1" + type: "ReLU" + bottom: "inception_4e/1x1" + top: "inception_4e/1x1" +} +layer { + name: "inception_4e/3x3_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3_reduce" +} +layer { + name: "inception_4e/3x3" + type: "Convolution" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_3x3" + type: "ReLU" + bottom: "inception_4e/3x3" + top: "inception_4e/3x3" +} +layer { + name: "inception_4e/5x5_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5_reduce" +} +layer { + name: "inception_4e/5x5" + type: "Convolution" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_5x5" + type: "ReLU" + bottom: "inception_4e/5x5" + top: "inception_4e/5x5" +} +layer { + name: "inception_4e/pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "inception_4e/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4e/pool_proj" + type: "Convolution" + bottom: "inception_4e/pool" + top: "inception_4e/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_pool_proj" + type: "ReLU" + bottom: "inception_4e/pool_proj" + top: "inception_4e/pool_proj" +} +layer { + name: "inception_4e/output" + type: "Concat" + bottom: "inception_4e/1x1" + bottom: "inception_4e/3x3" + bottom: "inception_4e/5x5" + bottom: "inception_4e/pool_proj" + top: "inception_4e/output" +} +layer { + name: "pool4/3x3_s2" + type: "Pooling" + bottom: "inception_4e/output" + top: "pool4/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_5a/1x1" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_1x1" + type: "ReLU" + bottom: "inception_5a/1x1" + top: "inception_5a/1x1" +} +layer { + name: "inception_5a/3x3_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3_reduce" +} +layer { + name: "inception_5a/3x3" + type: "Convolution" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_3x3" + type: "ReLU" + bottom: "inception_5a/3x3" + top: "inception_5a/3x3" +} +layer { + name: "inception_5a/5x5_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5_reduce" +} +layer { + name: "inception_5a/5x5" + type: "Convolution" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_5x5" + type: "ReLU" + bottom: "inception_5a/5x5" + top: "inception_5a/5x5" +} +layer { + name: "inception_5a/pool" + type: "Pooling" + bottom: "pool4/3x3_s2" + top: "inception_5a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5a/pool_proj" + type: "Convolution" + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_pool_proj" + type: "ReLU" + bottom: "inception_5a/pool_proj" + top: "inception_5a/pool_proj" +} +layer { + name: "inception_5a/output" + type: "Concat" + bottom: "inception_5a/1x1" + bottom: "inception_5a/3x3" + bottom: "inception_5a/5x5" + bottom: "inception_5a/pool_proj" + top: "inception_5a/output" +} +layer { + name: "inception_5b/1x1" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_1x1" + type: "ReLU" + bottom: "inception_5b/1x1" + top: "inception_5b/1x1" +} +layer { + name: "inception_5b/3x3_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3_reduce" +} +layer { + name: "inception_5b/3x3" + type: "Convolution" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_3x3" + type: "ReLU" + bottom: "inception_5b/3x3" + top: "inception_5b/3x3" +} +layer { + name: "inception_5b/5x5_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5_reduce" +} +layer { + name: "inception_5b/5x5" + type: "Convolution" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_5x5" + type: "ReLU" + bottom: "inception_5b/5x5" + top: "inception_5b/5x5" +} +layer { + name: "inception_5b/pool" + type: "Pooling" + bottom: "inception_5a/output" + top: "inception_5b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5b/pool_proj" + type: "Convolution" + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_pool_proj" + type: "ReLU" + bottom: "inception_5b/pool_proj" + top: "inception_5b/pool_proj" +} +layer { + name: "inception_5b/output" + type: "Concat" + bottom: "inception_5b/1x1" + bottom: "inception_5b/3x3" + bottom: "inception_5b/5x5" + bottom: "inception_5b/pool_proj" + top: "inception_5b/output" +} +layer { + name: "pool5/7x7_s1" + type: "Pooling" + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + pooling_param { + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + name: "pool5/drop_7x7_s1" + type: "Dropout" + bottom: "pool5/7x7_s1" + top: "pool5/7x7_s1" + dropout_param { + dropout_ratio: 0.4 + } +} +layer { + name: "loss3/classifier" + type: "InnerProduct" + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss3/loss3" + type: "SoftmaxWithLoss" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss3" + loss_weight: 1 +} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-1" + include { + phase: TEST + } +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} diff --git a/models/intel_optimized_models/multinode/googlenet_4nodes/solver.prototxt b/models/intel_optimized_models/multinode/googlenet_4nodes/solver.prototxt new file mode 100644 index 00000000000..589971c103e --- /dev/null +++ b/models/intel_optimized_models/multinode/googlenet_4nodes/solver.prototxt @@ -0,0 +1,27 @@ +#This is Intel(R) optimized (in terms of time to train) version of solver for model described in the [GoogLeNet](http://arxiv.org/abs/1409.4842) publication. +#Original solver.prototxt can be found in /models/bvlc_googlenet/ directory of this repository. +#Differences: +#- base_lr is set to 0.04 +#- max_iter is set to 350000 +# +#- bias_filler value changed to 0.1 +# +#Top-5 and Top-1 results achieved with this version of solver: +#Top-5: 89.06% +#Top-1: 68.74% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. +net: "models/intel_optimized_models/multinode/googlenet_4nodes/train_val.prototxt" +#test_iter: 1000 +#test_interval: 10000 +#test_initialization: false +display: 40 +average_loss: 40 +base_lr: 0.04 +lr_policy: "poly" +power: 0.5 +max_iter: 350000 +momentum: 0.9 +weight_decay: 0.0002 +snapshot: 50000 +snapshot_prefix: "models/intel_optimized_models/multinode/googlenet_4nodes/googlenet" +solver_mode: CPU diff --git a/models/intel_optimized_models/multinode/googlenet_4nodes/train_val.prototxt b/models/intel_optimized_models/multinode/googlenet_4nodes/train_val.prototxt new file mode 100644 index 00000000000..f5276ab9732 --- /dev/null +++ b/models/intel_optimized_models/multinode/googlenet_4nodes/train_val.prototxt @@ -0,0 +1,2434 @@ +name: "GoogleNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 64 + backend: LMDB + shuffle: true + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} +layer { + name: "conv1/7x7_s2" + type: "Convolution" + bottom: "data" + top: "conv1/7x7_s2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv1/relu_7x7" + type: "ReLU" + bottom: "conv1/7x7_s2" + top: "conv1/7x7_s2" +} +layer { + name: "pool1/3x3_s2" + type: "Pooling" + bottom: "conv1/7x7_s2" + top: "pool1/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "pool1/norm1" + type: "LRN" + bottom: "pool1/3x3_s2" + top: "pool1/norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2/3x3_reduce" + type: "Convolution" + bottom: "pool1/norm1" + top: "conv2/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv2/relu_3x3_reduce" + type: "ReLU" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3_reduce" +} +layer { + name: "conv2/3x3" + type: "Convolution" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv2/relu_3x3" + type: "ReLU" + bottom: "conv2/3x3" + top: "conv2/3x3" +} +layer { + name: "conv2/norm2" + type: "LRN" + bottom: "conv2/3x3" + top: "conv2/norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2/3x3_s2" + type: "Pooling" + bottom: "conv2/norm2" + top: "pool2/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_3a/1x1" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_1x1" + type: "ReLU" + bottom: "inception_3a/1x1" + top: "inception_3a/1x1" +} +layer { + name: "inception_3a/3x3_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3_reduce" +} +layer { + name: "inception_3a/3x3" + type: "Convolution" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_3x3" + type: "ReLU" + bottom: "inception_3a/3x3" + top: "inception_3a/3x3" +} +layer { + name: "inception_3a/5x5_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5_reduce" +} +layer { + name: "inception_3a/5x5" + type: "Convolution" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_5x5" + type: "ReLU" + bottom: "inception_3a/5x5" + top: "inception_3a/5x5" +} +layer { + name: "inception_3a/pool" + type: "Pooling" + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3a/pool_proj" + type: "Convolution" + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_pool_proj" + type: "ReLU" + bottom: "inception_3a/pool_proj" + top: "inception_3a/pool_proj" +} +layer { + name: "inception_3a/output" + type: "Concat" + bottom: "inception_3a/1x1" + bottom: "inception_3a/3x3" + bottom: "inception_3a/5x5" + bottom: "inception_3a/pool_proj" + top: "inception_3a/output" +} +layer { + name: "inception_3b/1x1" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_1x1" + type: "ReLU" + bottom: "inception_3b/1x1" + top: "inception_3b/1x1" +} +layer { + name: "inception_3b/3x3_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3_reduce" +} +layer { + name: "inception_3b/3x3" + type: "Convolution" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_3x3" + type: "ReLU" + bottom: "inception_3b/3x3" + top: "inception_3b/3x3" +} +layer { + name: "inception_3b/5x5_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5_reduce" +} +layer { + name: "inception_3b/5x5" + type: "Convolution" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_5x5" + type: "ReLU" + bottom: "inception_3b/5x5" + top: "inception_3b/5x5" +} +layer { + name: "inception_3b/pool" + type: "Pooling" + bottom: "inception_3a/output" + top: "inception_3b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3b/pool_proj" + type: "Convolution" + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_pool_proj" + type: "ReLU" + bottom: "inception_3b/pool_proj" + top: "inception_3b/pool_proj" +} +layer { + name: "inception_3b/output" + type: "Concat" + bottom: "inception_3b/1x1" + bottom: "inception_3b/3x3" + bottom: "inception_3b/5x5" + bottom: "inception_3b/pool_proj" + top: "inception_3b/output" +} +layer { + name: "pool3/3x3_s2" + type: "Pooling" + bottom: "inception_3b/output" + top: "pool3/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_4a/1x1" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_1x1" + type: "ReLU" + bottom: "inception_4a/1x1" + top: "inception_4a/1x1" +} +layer { + name: "inception_4a/3x3_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3_reduce" +} +layer { + name: "inception_4a/3x3" + type: "Convolution" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 208 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_3x3" + type: "ReLU" + bottom: "inception_4a/3x3" + top: "inception_4a/3x3" +} +layer { + name: "inception_4a/5x5_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5_reduce" +} +layer { + name: "inception_4a/5x5" + type: "Convolution" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_5x5" + type: "ReLU" + bottom: "inception_4a/5x5" + top: "inception_4a/5x5" +} +layer { + name: "inception_4a/pool" + type: "Pooling" + bottom: "pool3/3x3_s2" + top: "inception_4a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4a/pool_proj" + type: "Convolution" + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_pool_proj" + type: "ReLU" + bottom: "inception_4a/pool_proj" + top: "inception_4a/pool_proj" +} +layer { + name: "inception_4a/output" + type: "Concat" + bottom: "inception_4a/1x1" + bottom: "inception_4a/3x3" + bottom: "inception_4a/5x5" + bottom: "inception_4a/pool_proj" + top: "inception_4a/output" +} +layer { + name: "loss1/ave_pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "loss1/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss1/conv" + type: "Convolution" + bottom: "loss1/ave_pool" + top: "loss1/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss1/relu_conv" + type: "ReLU" + bottom: "loss1/conv" + top: "loss1/conv" +} +layer { + name: "loss1/fc" + type: "InnerProduct" + bottom: "loss1/conv" + top: "loss1/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss1/relu_fc" + type: "ReLU" + bottom: "loss1/fc" + top: "loss1/fc" +} +layer { + name: "loss1/drop_fc" + type: "Dropout" + bottom: "loss1/fc" + top: "loss1/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss1/classifier" + type: "InnerProduct" + bottom: "loss1/fc" + top: "loss1/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss1/loss" + type: "SoftmaxWithLoss" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss1" + loss_weight: 0.3 +} +layer { + name: "loss1/top-1" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-1" + include { + phase: TEST + } +} +layer { + name: "loss1/top-5" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4b/1x1" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_1x1" + type: "ReLU" + bottom: "inception_4b/1x1" + top: "inception_4b/1x1" +} +layer { + name: "inception_4b/3x3_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3_reduce" +} +layer { + name: "inception_4b/3x3" + type: "Convolution" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 224 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_3x3" + type: "ReLU" + bottom: "inception_4b/3x3" + top: "inception_4b/3x3" +} +layer { + name: "inception_4b/5x5_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5_reduce" +} +layer { + name: "inception_4b/5x5" + type: "Convolution" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_5x5" + type: "ReLU" + bottom: "inception_4b/5x5" + top: "inception_4b/5x5" +} +layer { + name: "inception_4b/pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "inception_4b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4b/pool_proj" + type: "Convolution" + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_pool_proj" + type: "ReLU" + bottom: "inception_4b/pool_proj" + top: "inception_4b/pool_proj" +} +layer { + name: "inception_4b/output" + type: "Concat" + bottom: "inception_4b/1x1" + bottom: "inception_4b/3x3" + bottom: "inception_4b/5x5" + bottom: "inception_4b/pool_proj" + top: "inception_4b/output" +} +layer { + name: "inception_4c/1x1" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_1x1" + type: "ReLU" + bottom: "inception_4c/1x1" + top: "inception_4c/1x1" +} +layer { + name: "inception_4c/3x3_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3_reduce" +} +layer { + name: "inception_4c/3x3" + type: "Convolution" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_3x3" + type: "ReLU" + bottom: "inception_4c/3x3" + top: "inception_4c/3x3" +} +layer { + name: "inception_4c/5x5_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5_reduce" +} +layer { + name: "inception_4c/5x5" + type: "Convolution" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_5x5" + type: "ReLU" + bottom: "inception_4c/5x5" + top: "inception_4c/5x5" +} +layer { + name: "inception_4c/pool" + type: "Pooling" + bottom: "inception_4b/output" + top: "inception_4c/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4c/pool_proj" + type: "Convolution" + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_pool_proj" + type: "ReLU" + bottom: "inception_4c/pool_proj" + top: "inception_4c/pool_proj" +} +layer { + name: "inception_4c/output" + type: "Concat" + bottom: "inception_4c/1x1" + bottom: "inception_4c/3x3" + bottom: "inception_4c/5x5" + bottom: "inception_4c/pool_proj" + top: "inception_4c/output" +} +layer { + name: "inception_4d/1x1" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_1x1" + type: "ReLU" + bottom: "inception_4d/1x1" + top: "inception_4d/1x1" +} +layer { + name: "inception_4d/3x3_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 144 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3_reduce" +} +layer { + name: "inception_4d/3x3" + type: "Convolution" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 288 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_3x3" + type: "ReLU" + bottom: "inception_4d/3x3" + top: "inception_4d/3x3" +} +layer { + name: "inception_4d/5x5_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5_reduce" +} +layer { + name: "inception_4d/5x5" + type: "Convolution" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_5x5" + type: "ReLU" + bottom: "inception_4d/5x5" + top: "inception_4d/5x5" +} +layer { + name: "inception_4d/pool" + type: "Pooling" + bottom: "inception_4c/output" + top: "inception_4d/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4d/pool_proj" + type: "Convolution" + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_pool_proj" + type: "ReLU" + bottom: "inception_4d/pool_proj" + top: "inception_4d/pool_proj" +} +layer { + name: "inception_4d/output" + type: "Concat" + bottom: "inception_4d/1x1" + bottom: "inception_4d/3x3" + bottom: "inception_4d/5x5" + bottom: "inception_4d/pool_proj" + top: "inception_4d/output" +} +layer { + name: "loss2/ave_pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "loss2/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss2/conv" + type: "Convolution" + bottom: "loss2/ave_pool" + top: "loss2/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss2/relu_conv" + type: "ReLU" + bottom: "loss2/conv" + top: "loss2/conv" +} +layer { + name: "loss2/fc" + type: "InnerProduct" + bottom: "loss2/conv" + top: "loss2/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss2/relu_fc" + type: "ReLU" + bottom: "loss2/fc" + top: "loss2/fc" +} +layer { + name: "loss2/drop_fc" + type: "Dropout" + bottom: "loss2/fc" + top: "loss2/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss2/classifier" + type: "InnerProduct" + bottom: "loss2/fc" + top: "loss2/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss2/loss" + type: "SoftmaxWithLoss" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss1" + loss_weight: 0.3 +} +layer { + name: "loss2/top-1" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-1" + include { + phase: TEST + } +} +layer { + name: "loss2/top-5" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4e/1x1" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_1x1" + type: "ReLU" + bottom: "inception_4e/1x1" + top: "inception_4e/1x1" +} +layer { + name: "inception_4e/3x3_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3_reduce" +} +layer { + name: "inception_4e/3x3" + type: "Convolution" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_3x3" + type: "ReLU" + bottom: "inception_4e/3x3" + top: "inception_4e/3x3" +} +layer { + name: "inception_4e/5x5_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5_reduce" +} +layer { + name: "inception_4e/5x5" + type: "Convolution" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_5x5" + type: "ReLU" + bottom: "inception_4e/5x5" + top: "inception_4e/5x5" +} +layer { + name: "inception_4e/pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "inception_4e/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4e/pool_proj" + type: "Convolution" + bottom: "inception_4e/pool" + top: "inception_4e/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_pool_proj" + type: "ReLU" + bottom: "inception_4e/pool_proj" + top: "inception_4e/pool_proj" +} +layer { + name: "inception_4e/output" + type: "Concat" + bottom: "inception_4e/1x1" + bottom: "inception_4e/3x3" + bottom: "inception_4e/5x5" + bottom: "inception_4e/pool_proj" + top: "inception_4e/output" +} +layer { + name: "pool4/3x3_s2" + type: "Pooling" + bottom: "inception_4e/output" + top: "pool4/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_5a/1x1" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_1x1" + type: "ReLU" + bottom: "inception_5a/1x1" + top: "inception_5a/1x1" +} +layer { + name: "inception_5a/3x3_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3_reduce" +} +layer { + name: "inception_5a/3x3" + type: "Convolution" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_3x3" + type: "ReLU" + bottom: "inception_5a/3x3" + top: "inception_5a/3x3" +} +layer { + name: "inception_5a/5x5_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5_reduce" +} +layer { + name: "inception_5a/5x5" + type: "Convolution" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_5x5" + type: "ReLU" + bottom: "inception_5a/5x5" + top: "inception_5a/5x5" +} +layer { + name: "inception_5a/pool" + type: "Pooling" + bottom: "pool4/3x3_s2" + top: "inception_5a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5a/pool_proj" + type: "Convolution" + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_pool_proj" + type: "ReLU" + bottom: "inception_5a/pool_proj" + top: "inception_5a/pool_proj" +} +layer { + name: "inception_5a/output" + type: "Concat" + bottom: "inception_5a/1x1" + bottom: "inception_5a/3x3" + bottom: "inception_5a/5x5" + bottom: "inception_5a/pool_proj" + top: "inception_5a/output" +} +layer { + name: "inception_5b/1x1" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_1x1" + type: "ReLU" + bottom: "inception_5b/1x1" + top: "inception_5b/1x1" +} +layer { + name: "inception_5b/3x3_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3_reduce" +} +layer { + name: "inception_5b/3x3" + type: "Convolution" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_3x3" + type: "ReLU" + bottom: "inception_5b/3x3" + top: "inception_5b/3x3" +} +layer { + name: "inception_5b/5x5_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5_reduce" +} +layer { + name: "inception_5b/5x5" + type: "Convolution" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_5x5" + type: "ReLU" + bottom: "inception_5b/5x5" + top: "inception_5b/5x5" +} +layer { + name: "inception_5b/pool" + type: "Pooling" + bottom: "inception_5a/output" + top: "inception_5b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5b/pool_proj" + type: "Convolution" + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_pool_proj" + type: "ReLU" + bottom: "inception_5b/pool_proj" + top: "inception_5b/pool_proj" +} +layer { + name: "inception_5b/output" + type: "Concat" + bottom: "inception_5b/1x1" + bottom: "inception_5b/3x3" + bottom: "inception_5b/5x5" + bottom: "inception_5b/pool_proj" + top: "inception_5b/output" +} +layer { + name: "pool5/7x7_s1" + type: "Pooling" + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + pooling_param { + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + name: "pool5/drop_7x7_s1" + type: "Dropout" + bottom: "pool5/7x7_s1" + top: "pool5/7x7_s1" + dropout_param { + dropout_ratio: 0.4 + } +} +layer { + name: "loss3/classifier" + type: "InnerProduct" + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss3/loss3" + type: "SoftmaxWithLoss" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss3" + loss_weight: 1 +} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-1" + include { + phase: TEST + } +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} diff --git a/models/intel_optimized_models/multinode/googlenet_8nodes/solver.prototxt b/models/intel_optimized_models/multinode/googlenet_8nodes/solver.prototxt new file mode 100644 index 00000000000..83307bcb530 --- /dev/null +++ b/models/intel_optimized_models/multinode/googlenet_8nodes/solver.prototxt @@ -0,0 +1,27 @@ +#This is Intel(R) optimized (in terms of time to train) version of solver for model described in the [GoogLeNet](http://arxiv.org/abs/1409.4842) publication. +#Original solver.prototxt can be found in /models/bvlc_googlenet/ directory of this repository. +#Differences: +#- base_lr is set to 0.06 +#- max_iter is set to 182000 +# +#- bias_filler value changed to 0.1 +# +#Top-5 and Top-1 results achieved with this version of solver: +#Top-5: 88.85% +#Top-1: 68.58% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. +net: "models/intel_optimized_models/multinode/googlenet_8nodes/train_val.prototxt" +#test_iter: 1000 +#test_interval: 10000 +#test_initialization: false +display: 40 +average_loss: 40 +base_lr: 0.06 +lr_policy: "poly" +power: 0.5 +max_iter: 182000 +momentum: 0.9 +weight_decay: 0.0002 +snapshot: 50000 +snapshot_prefix: "models/intel_optimized_models/multinode/googlenet_8nodes/googlenet" +solver_mode: CPU diff --git a/models/intel_optimized_models/multinode/googlenet_8nodes/train_val.prototxt b/models/intel_optimized_models/multinode/googlenet_8nodes/train_val.prototxt new file mode 100644 index 00000000000..f5276ab9732 --- /dev/null +++ b/models/intel_optimized_models/multinode/googlenet_8nodes/train_val.prototxt @@ -0,0 +1,2434 @@ +name: "GoogleNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 64 + backend: LMDB + shuffle: true + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} +layer { + name: "conv1/7x7_s2" + type: "Convolution" + bottom: "data" + top: "conv1/7x7_s2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv1/relu_7x7" + type: "ReLU" + bottom: "conv1/7x7_s2" + top: "conv1/7x7_s2" +} +layer { + name: "pool1/3x3_s2" + type: "Pooling" + bottom: "conv1/7x7_s2" + top: "pool1/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "pool1/norm1" + type: "LRN" + bottom: "pool1/3x3_s2" + top: "pool1/norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "conv2/3x3_reduce" + type: "Convolution" + bottom: "pool1/norm1" + top: "conv2/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv2/relu_3x3_reduce" + type: "ReLU" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3_reduce" +} +layer { + name: "conv2/3x3" + type: "Convolution" + bottom: "conv2/3x3_reduce" + top: "conv2/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "conv2/relu_3x3" + type: "ReLU" + bottom: "conv2/3x3" + top: "conv2/3x3" +} +layer { + name: "conv2/norm2" + type: "LRN" + bottom: "conv2/3x3" + top: "conv2/norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + } +} +layer { + name: "pool2/3x3_s2" + type: "Pooling" + bottom: "conv2/norm2" + top: "pool2/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_3a/1x1" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_1x1" + type: "ReLU" + bottom: "inception_3a/1x1" + top: "inception_3a/1x1" +} +layer { + name: "inception_3a/3x3_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3_reduce" +} +layer { + name: "inception_3a/3x3" + type: "Convolution" + bottom: "inception_3a/3x3_reduce" + top: "inception_3a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_3x3" + type: "ReLU" + bottom: "inception_3a/3x3" + top: "inception_3a/3x3" +} +layer { + name: "inception_3a/5x5_reduce" + type: "Convolution" + bottom: "pool2/3x3_s2" + top: "inception_3a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5_reduce" +} +layer { + name: "inception_3a/5x5" + type: "Convolution" + bottom: "inception_3a/5x5_reduce" + top: "inception_3a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_5x5" + type: "ReLU" + bottom: "inception_3a/5x5" + top: "inception_3a/5x5" +} +layer { + name: "inception_3a/pool" + type: "Pooling" + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3a/pool_proj" + type: "Convolution" + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3a/relu_pool_proj" + type: "ReLU" + bottom: "inception_3a/pool_proj" + top: "inception_3a/pool_proj" +} +layer { + name: "inception_3a/output" + type: "Concat" + bottom: "inception_3a/1x1" + bottom: "inception_3a/3x3" + bottom: "inception_3a/5x5" + bottom: "inception_3a/pool_proj" + top: "inception_3a/output" +} +layer { + name: "inception_3b/1x1" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_1x1" + type: "ReLU" + bottom: "inception_3b/1x1" + top: "inception_3b/1x1" +} +layer { + name: "inception_3b/3x3_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3_reduce" +} +layer { + name: "inception_3b/3x3" + type: "Convolution" + bottom: "inception_3b/3x3_reduce" + top: "inception_3b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_3x3" + type: "ReLU" + bottom: "inception_3b/3x3" + top: "inception_3b/3x3" +} +layer { + name: "inception_3b/5x5_reduce" + type: "Convolution" + bottom: "inception_3a/output" + top: "inception_3b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5_reduce" +} +layer { + name: "inception_3b/5x5" + type: "Convolution" + bottom: "inception_3b/5x5_reduce" + top: "inception_3b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_5x5" + type: "ReLU" + bottom: "inception_3b/5x5" + top: "inception_3b/5x5" +} +layer { + name: "inception_3b/pool" + type: "Pooling" + bottom: "inception_3a/output" + top: "inception_3b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_3b/pool_proj" + type: "Convolution" + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_3b/relu_pool_proj" + type: "ReLU" + bottom: "inception_3b/pool_proj" + top: "inception_3b/pool_proj" +} +layer { + name: "inception_3b/output" + type: "Concat" + bottom: "inception_3b/1x1" + bottom: "inception_3b/3x3" + bottom: "inception_3b/5x5" + bottom: "inception_3b/pool_proj" + top: "inception_3b/output" +} +layer { + name: "pool3/3x3_s2" + type: "Pooling" + bottom: "inception_3b/output" + top: "pool3/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_4a/1x1" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_1x1" + type: "ReLU" + bottom: "inception_4a/1x1" + top: "inception_4a/1x1" +} +layer { + name: "inception_4a/3x3_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3_reduce" +} +layer { + name: "inception_4a/3x3" + type: "Convolution" + bottom: "inception_4a/3x3_reduce" + top: "inception_4a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 208 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_3x3" + type: "ReLU" + bottom: "inception_4a/3x3" + top: "inception_4a/3x3" +} +layer { + name: "inception_4a/5x5_reduce" + type: "Convolution" + bottom: "pool3/3x3_s2" + top: "inception_4a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5_reduce" +} +layer { + name: "inception_4a/5x5" + type: "Convolution" + bottom: "inception_4a/5x5_reduce" + top: "inception_4a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_5x5" + type: "ReLU" + bottom: "inception_4a/5x5" + top: "inception_4a/5x5" +} +layer { + name: "inception_4a/pool" + type: "Pooling" + bottom: "pool3/3x3_s2" + top: "inception_4a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4a/pool_proj" + type: "Convolution" + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4a/relu_pool_proj" + type: "ReLU" + bottom: "inception_4a/pool_proj" + top: "inception_4a/pool_proj" +} +layer { + name: "inception_4a/output" + type: "Concat" + bottom: "inception_4a/1x1" + bottom: "inception_4a/3x3" + bottom: "inception_4a/5x5" + bottom: "inception_4a/pool_proj" + top: "inception_4a/output" +} +layer { + name: "loss1/ave_pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "loss1/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss1/conv" + type: "Convolution" + bottom: "loss1/ave_pool" + top: "loss1/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss1/relu_conv" + type: "ReLU" + bottom: "loss1/conv" + top: "loss1/conv" +} +layer { + name: "loss1/fc" + type: "InnerProduct" + bottom: "loss1/conv" + top: "loss1/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss1/relu_fc" + type: "ReLU" + bottom: "loss1/fc" + top: "loss1/fc" +} +layer { + name: "loss1/drop_fc" + type: "Dropout" + bottom: "loss1/fc" + top: "loss1/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss1/classifier" + type: "InnerProduct" + bottom: "loss1/fc" + top: "loss1/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss1/loss" + type: "SoftmaxWithLoss" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss1" + loss_weight: 0.3 +} +layer { + name: "loss1/top-1" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-1" + include { + phase: TEST + } +} +layer { + name: "loss1/top-5" + type: "Accuracy" + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4b/1x1" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_1x1" + type: "ReLU" + bottom: "inception_4b/1x1" + top: "inception_4b/1x1" +} +layer { + name: "inception_4b/3x3_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3_reduce" +} +layer { + name: "inception_4b/3x3" + type: "Convolution" + bottom: "inception_4b/3x3_reduce" + top: "inception_4b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 224 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_3x3" + type: "ReLU" + bottom: "inception_4b/3x3" + top: "inception_4b/3x3" +} +layer { + name: "inception_4b/5x5_reduce" + type: "Convolution" + bottom: "inception_4a/output" + top: "inception_4b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5_reduce" +} +layer { + name: "inception_4b/5x5" + type: "Convolution" + bottom: "inception_4b/5x5_reduce" + top: "inception_4b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_5x5" + type: "ReLU" + bottom: "inception_4b/5x5" + top: "inception_4b/5x5" +} +layer { + name: "inception_4b/pool" + type: "Pooling" + bottom: "inception_4a/output" + top: "inception_4b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4b/pool_proj" + type: "Convolution" + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4b/relu_pool_proj" + type: "ReLU" + bottom: "inception_4b/pool_proj" + top: "inception_4b/pool_proj" +} +layer { + name: "inception_4b/output" + type: "Concat" + bottom: "inception_4b/1x1" + bottom: "inception_4b/3x3" + bottom: "inception_4b/5x5" + bottom: "inception_4b/pool_proj" + top: "inception_4b/output" +} +layer { + name: "inception_4c/1x1" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_1x1" + type: "ReLU" + bottom: "inception_4c/1x1" + top: "inception_4c/1x1" +} +layer { + name: "inception_4c/3x3_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3_reduce" +} +layer { + name: "inception_4c/3x3" + type: "Convolution" + bottom: "inception_4c/3x3_reduce" + top: "inception_4c/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_3x3" + type: "ReLU" + bottom: "inception_4c/3x3" + top: "inception_4c/3x3" +} +layer { + name: "inception_4c/5x5_reduce" + type: "Convolution" + bottom: "inception_4b/output" + top: "inception_4c/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5_reduce" +} +layer { + name: "inception_4c/5x5" + type: "Convolution" + bottom: "inception_4c/5x5_reduce" + top: "inception_4c/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_5x5" + type: "ReLU" + bottom: "inception_4c/5x5" + top: "inception_4c/5x5" +} +layer { + name: "inception_4c/pool" + type: "Pooling" + bottom: "inception_4b/output" + top: "inception_4c/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4c/pool_proj" + type: "Convolution" + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4c/relu_pool_proj" + type: "ReLU" + bottom: "inception_4c/pool_proj" + top: "inception_4c/pool_proj" +} +layer { + name: "inception_4c/output" + type: "Concat" + bottom: "inception_4c/1x1" + bottom: "inception_4c/3x3" + bottom: "inception_4c/5x5" + bottom: "inception_4c/pool_proj" + top: "inception_4c/output" +} +layer { + name: "inception_4d/1x1" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 112 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_1x1" + type: "ReLU" + bottom: "inception_4d/1x1" + top: "inception_4d/1x1" +} +layer { + name: "inception_4d/3x3_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 144 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3_reduce" +} +layer { + name: "inception_4d/3x3" + type: "Convolution" + bottom: "inception_4d/3x3_reduce" + top: "inception_4d/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 288 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_3x3" + type: "ReLU" + bottom: "inception_4d/3x3" + top: "inception_4d/3x3" +} +layer { + name: "inception_4d/5x5_reduce" + type: "Convolution" + bottom: "inception_4c/output" + top: "inception_4d/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5_reduce" +} +layer { + name: "inception_4d/5x5" + type: "Convolution" + bottom: "inception_4d/5x5_reduce" + top: "inception_4d/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_5x5" + type: "ReLU" + bottom: "inception_4d/5x5" + top: "inception_4d/5x5" +} +layer { + name: "inception_4d/pool" + type: "Pooling" + bottom: "inception_4c/output" + top: "inception_4d/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4d/pool_proj" + type: "Convolution" + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4d/relu_pool_proj" + type: "ReLU" + bottom: "inception_4d/pool_proj" + top: "inception_4d/pool_proj" +} +layer { + name: "inception_4d/output" + type: "Concat" + bottom: "inception_4d/1x1" + bottom: "inception_4d/3x3" + bottom: "inception_4d/5x5" + bottom: "inception_4d/pool_proj" + top: "inception_4d/output" +} +layer { + name: "loss2/ave_pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "loss2/ave_pool" + pooling_param { + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + name: "loss2/conv" + type: "Convolution" + bottom: "loss2/ave_pool" + top: "loss2/conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss2/relu_conv" + type: "ReLU" + bottom: "loss2/conv" + top: "loss2/conv" +} +layer { + name: "loss2/fc" + type: "InnerProduct" + bottom: "loss2/conv" + top: "loss2/fc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "loss2/relu_fc" + type: "ReLU" + bottom: "loss2/fc" + top: "loss2/fc" +} +layer { + name: "loss2/drop_fc" + type: "Dropout" + bottom: "loss2/fc" + top: "loss2/fc" + dropout_param { + dropout_ratio: 0.7 + } +} +layer { + name: "loss2/classifier" + type: "InnerProduct" + bottom: "loss2/fc" + top: "loss2/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss2/loss" + type: "SoftmaxWithLoss" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss1" + loss_weight: 0.3 +} +layer { + name: "loss2/top-1" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-1" + include { + phase: TEST + } +} +layer { + name: "loss2/top-5" + type: "Accuracy" + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} +layer { + name: "inception_4e/1x1" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_1x1" + type: "ReLU" + bottom: "inception_4e/1x1" + top: "inception_4e/1x1" +} +layer { + name: "inception_4e/3x3_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3_reduce" +} +layer { + name: "inception_4e/3x3" + type: "Convolution" + bottom: "inception_4e/3x3_reduce" + top: "inception_4e/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_3x3" + type: "ReLU" + bottom: "inception_4e/3x3" + top: "inception_4e/3x3" +} +layer { + name: "inception_4e/5x5_reduce" + type: "Convolution" + bottom: "inception_4d/output" + top: "inception_4e/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5_reduce" +} +layer { + name: "inception_4e/5x5" + type: "Convolution" + bottom: "inception_4e/5x5_reduce" + top: "inception_4e/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_5x5" + type: "ReLU" + bottom: "inception_4e/5x5" + top: "inception_4e/5x5" +} +layer { + name: "inception_4e/pool" + type: "Pooling" + bottom: "inception_4d/output" + top: "inception_4e/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_4e/pool_proj" + type: "Convolution" + bottom: "inception_4e/pool" + top: "inception_4e/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_4e/relu_pool_proj" + type: "ReLU" + bottom: "inception_4e/pool_proj" + top: "inception_4e/pool_proj" +} +layer { + name: "inception_4e/output" + type: "Concat" + bottom: "inception_4e/1x1" + bottom: "inception_4e/3x3" + bottom: "inception_4e/5x5" + bottom: "inception_4e/pool_proj" + top: "inception_4e/output" +} +layer { + name: "pool4/3x3_s2" + type: "Pooling" + bottom: "inception_4e/output" + top: "pool4/3x3_s2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + name: "inception_5a/1x1" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_1x1" + type: "ReLU" + bottom: "inception_5a/1x1" + top: "inception_5a/1x1" +} +layer { + name: "inception_5a/3x3_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 160 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3_reduce" +} +layer { + name: "inception_5a/3x3" + type: "Convolution" + bottom: "inception_5a/3x3_reduce" + top: "inception_5a/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 320 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_3x3" + type: "ReLU" + bottom: "inception_5a/3x3" + top: "inception_5a/3x3" +} +layer { + name: "inception_5a/5x5_reduce" + type: "Convolution" + bottom: "pool4/3x3_s2" + top: "inception_5a/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 32 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5_reduce" +} +layer { + name: "inception_5a/5x5" + type: "Convolution" + bottom: "inception_5a/5x5_reduce" + top: "inception_5a/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_5x5" + type: "ReLU" + bottom: "inception_5a/5x5" + top: "inception_5a/5x5" +} +layer { + name: "inception_5a/pool" + type: "Pooling" + bottom: "pool4/3x3_s2" + top: "inception_5a/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5a/pool_proj" + type: "Convolution" + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5a/relu_pool_proj" + type: "ReLU" + bottom: "inception_5a/pool_proj" + top: "inception_5a/pool_proj" +} +layer { + name: "inception_5a/output" + type: "Concat" + bottom: "inception_5a/1x1" + bottom: "inception_5a/3x3" + bottom: "inception_5a/5x5" + bottom: "inception_5a/pool_proj" + top: "inception_5a/output" +} +layer { + name: "inception_5b/1x1" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/1x1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_1x1" + type: "ReLU" + bottom: "inception_5b/1x1" + top: "inception_5b/1x1" +} +layer { + name: "inception_5b/3x3_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 192 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_3x3_reduce" + type: "ReLU" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3_reduce" +} +layer { + name: "inception_5b/3x3" + type: "Convolution" + bottom: "inception_5b/3x3_reduce" + top: "inception_5b/3x3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_3x3" + type: "ReLU" + bottom: "inception_5b/3x3" + top: "inception_5b/3x3" +} +layer { + name: "inception_5b/5x5_reduce" + type: "Convolution" + bottom: "inception_5a/output" + top: "inception_5b/5x5_reduce" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 48 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_5x5_reduce" + type: "ReLU" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5_reduce" +} +layer { + name: "inception_5b/5x5" + type: "Convolution" + bottom: "inception_5b/5x5_reduce" + top: "inception_5b/5x5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 2 + kernel_size: 5 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_5x5" + type: "ReLU" + bottom: "inception_5b/5x5" + top: "inception_5b/5x5" +} +layer { + name: "inception_5b/pool" + type: "Pooling" + bottom: "inception_5a/output" + top: "inception_5b/pool" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "inception_5b/pool_proj" + type: "Convolution" + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0.1 + } + } +} +layer { + name: "inception_5b/relu_pool_proj" + type: "ReLU" + bottom: "inception_5b/pool_proj" + top: "inception_5b/pool_proj" +} +layer { + name: "inception_5b/output" + type: "Concat" + bottom: "inception_5b/1x1" + bottom: "inception_5b/3x3" + bottom: "inception_5b/5x5" + bottom: "inception_5b/pool_proj" + top: "inception_5b/output" +} +layer { + name: "pool5/7x7_s1" + type: "Pooling" + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + pooling_param { + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + name: "pool5/drop_7x7_s1" + type: "Dropout" + bottom: "pool5/7x7_s1" + top: "pool5/7x7_s1" + dropout_param { + dropout_ratio: 0.4 + } +} +layer { + name: "loss3/classifier" + type: "InnerProduct" + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "loss3/loss3" + type: "SoftmaxWithLoss" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss3" + loss_weight: 1 +} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-1" + include { + phase: TEST + } +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} diff --git a/models/intel_optimized_models/multinode/googlenet_v2_4nodes/solver.prototxt b/models/intel_optimized_models/multinode/googlenet_v2_4nodes/solver.prototxt new file mode 100644 index 00000000000..dda5240f39e --- /dev/null +++ b/models/intel_optimized_models/multinode/googlenet_v2_4nodes/solver.prototxt @@ -0,0 +1,24 @@ +#This is Intel(R) optimized (in terms of time to train) version of solver for model GoogLeNet v2. +#Original solver.prototxt can be found in /models/default_resnet_50/ directory of this repository. +#Differences: +#- lr_policy is set to poly instead of step +#- base_lr is set to 0.05 +#- max_iter is decreased to 100000 +# +#Top-5 and Top-1 results achieved with this version of solver: +#Top-5: 89.40% +#Top-1: 69.02% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. + +net: "models/intel_optimized_models/multinode/googlenet_v2_4nodes/train_val.prototxt" +base_lr: 0.05 +display: 40 +max_iter: 100000 +lr_policy: "poly" +power: 0.5 +momentum: 0.9 +weight_decay: 0.0002 +snapshot: 10000 +snapshot_prefix: "models/intel_optimized_models/multinode/googlenet_v2_4nodes/default_googlenet_v2" +solver_mode: CPU +average_loss: 40 \ No newline at end of file diff --git a/models/intel_optimized_models/multinode/googlenet_v2_4nodes/train_val.prototxt b/models/intel_optimized_models/multinode/googlenet_v2_4nodes/train_val.prototxt new file mode 100644 index 00000000000..23cde7452fb --- /dev/null +++ b/models/intel_optimized_models/multinode/googlenet_v2_4nodes/train_val.prototxt @@ -0,0 +1,4044 @@ +# Inception Network (GoogLeNet Batch Normalization Network) +name: "InceptionNetwork" +### Training Set +layer { + top: "data" + top: "label" + name: "data" + type: "Data" + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 96 + backend: LMDB + shuffle: true + } + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 +# mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + mean_value: 104 + mean_value: 117 + mean_value: 123 + } +} +### Validation Set +layer { + top: "data" + top: "label" + name: "data" + type: "Data" + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 +# mean_file: "data/ilsvrc12/imagenet_mean.binaryproto" + mean_value: 104 + mean_value: 117 + mean_value: 123 + } +} + +layer { + bottom: "data" + top: "conv1/7x7_s2" + name: "conv1/7x7_s2" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv1/7x7_s2" + name: "conv1/7x7_s2/bn" + top: "conv1/7x7_s2/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv1/7x7_s2/bn" + top: "conv1/7x7_s2/bn/sc" + name: "conv1/7x7_s2/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv1/7x7_s2/bn/sc" + top: "conv1/7x7_s2/bn/sc" + name: "conv1/7x7_s2/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv1/7x7_s2/bn/sc" + top: "pool1/3x3_s2" + name: "pool1/3x3_s2" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "pool1/3x3_s2" + top: "conv2/3x3_reduce" + name: "conv2/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv2/3x3_reduce" + name: "conv2/3x3_reduce/bn" + top: "conv2/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv2/3x3_reduce/bn" + top: "conv2/3x3_reduce/bn/sc" + name: "conv2/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv2/3x3_reduce/bn/sc" + top: "conv2/3x3_reduce/bn/sc" + name: "conv2/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2/3x3_reduce/bn/sc" + top: "conv2/3x3" + name: "conv2/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "conv2/3x3" + name: "conv2/3x3/bn" + top: "conv2/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "conv2/3x3/bn" + top: "conv2/3x3/bn/sc" + name: "conv2/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "conv2/3x3/bn/sc" + top: "conv2/3x3/bn/sc" + name: "conv2/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "conv2/3x3/bn/sc" + top: "pool2/3x3_s2" + name: "pool2/3x3_s2" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/1x1" + name: "inception_3a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/1x1" + name: "inception_3a/1x1/bn" + top: "inception_3a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/1x1/bn" + top: "inception_3a/1x1/bn/sc" + name: "inception_3a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/1x1/bn/sc" + top: "inception_3a/1x1/bn/sc" + name: "inception_3a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/3x3_reduce" + name: "inception_3a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/3x3_reduce" + name: "inception_3a/3x3_reduce/bn" + top: "inception_3a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn" + top: "inception_3a/3x3_reduce/bn/sc" + name: "inception_3a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn/sc" + top: "inception_3a/3x3_reduce/bn/sc" + name: "inception_3a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/3x3_reduce/bn/sc" + top: "inception_3a/3x3" + name: "inception_3a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/3x3" + name: "inception_3a/3x3/bn" + top: "inception_3a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/3x3/bn" + top: "inception_3a/3x3/bn/sc" + name: "inception_3a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/3x3/bn/sc" + top: "inception_3a/3x3/bn/sc" + name: "inception_3a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/double3x3_reduce" + name: "inception_3a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3_reduce" + name: "inception_3a/double3x3_reduce/bn" + top: "inception_3a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn" + top: "inception_3a/double3x3_reduce/bn/sc" + name: "inception_3a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn/sc" + top: "inception_3a/double3x3_reduce/bn/sc" + name: "inception_3a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/double3x3_reduce/bn/sc" + top: "inception_3a/double3x3a" + name: "inception_3a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3a" + name: "inception_3a/double3x3a/bn" + top: "inception_3a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3a/bn" + top: "inception_3a/double3x3a/bn/sc" + name: "inception_3a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3a/bn/sc" + top: "inception_3a/double3x3a/bn/sc" + name: "inception_3a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/double3x3a/bn/sc" + top: "inception_3a/double3x3b" + name: "inception_3a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/double3x3b" + name: "inception_3a/double3x3b/bn" + top: "inception_3a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/double3x3b/bn" + top: "inception_3a/double3x3b/bn/sc" + name: "inception_3a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/double3x3b/bn/sc" + top: "inception_3a/double3x3b/bn/sc" + name: "inception_3a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "pool2/3x3_s2" + top: "inception_3a/pool" + name: "inception_3a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_3a/pool" + top: "inception_3a/pool_proj" + name: "inception_3a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 32 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3a/pool_proj" + name: "inception_3a/pool_proj/bn" + top: "inception_3a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3a/pool_proj/bn" + top: "inception_3a/pool_proj/bn/sc" + name: "inception_3a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3a/pool_proj/bn/sc" + top: "inception_3a/pool_proj/bn/sc" + name: "inception_3a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/1x1/bn/sc" + bottom: "inception_3a/3x3/bn/sc" + bottom: "inception_3a/double3x3b/bn/sc" + bottom: "inception_3a/pool_proj/bn/sc" + top: "inception_3a/output" + name: "inception_3a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/1x1" + name: "inception_3b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/1x1" + name: "inception_3b/1x1/bn" + top: "inception_3b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/1x1/bn" + top: "inception_3b/1x1/bn/sc" + name: "inception_3b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/1x1/bn/sc" + top: "inception_3b/1x1/bn/sc" + name: "inception_3b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/3x3_reduce" + name: "inception_3b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/3x3_reduce" + name: "inception_3b/3x3_reduce/bn" + top: "inception_3b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn" + top: "inception_3b/3x3_reduce/bn/sc" + name: "inception_3b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn/sc" + top: "inception_3b/3x3_reduce/bn/sc" + name: "inception_3b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/3x3_reduce/bn/sc" + top: "inception_3b/3x3" + name: "inception_3b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/3x3" + name: "inception_3b/3x3/bn" + top: "inception_3b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/3x3/bn" + top: "inception_3b/3x3/bn/sc" + name: "inception_3b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/3x3/bn/sc" + top: "inception_3b/3x3/bn/sc" + name: "inception_3b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/double3x3_reduce" + name: "inception_3b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3_reduce" + name: "inception_3b/double3x3_reduce/bn" + top: "inception_3b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn" + top: "inception_3b/double3x3_reduce/bn/sc" + name: "inception_3b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn/sc" + top: "inception_3b/double3x3_reduce/bn/sc" + name: "inception_3b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/double3x3_reduce/bn/sc" + top: "inception_3b/double3x3a" + name: "inception_3b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3a" + name: "inception_3b/double3x3a/bn" + top: "inception_3b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3a/bn" + top: "inception_3b/double3x3a/bn/sc" + name: "inception_3b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3a/bn/sc" + top: "inception_3b/double3x3a/bn/sc" + name: "inception_3b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/double3x3a/bn/sc" + top: "inception_3b/double3x3b" + name: "inception_3b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/double3x3b" + name: "inception_3b/double3x3b/bn" + top: "inception_3b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/double3x3b/bn" + top: "inception_3b/double3x3b/bn/sc" + name: "inception_3b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/double3x3b/bn/sc" + top: "inception_3b/double3x3b/bn/sc" + name: "inception_3b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3a/output" + top: "inception_3b/pool" + name: "inception_3b/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_3b/pool" + top: "inception_3b/pool_proj" + name: "inception_3b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3b/pool_proj" + name: "inception_3b/pool_proj/bn" + top: "inception_3b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3b/pool_proj/bn" + top: "inception_3b/pool_proj/bn/sc" + name: "inception_3b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3b/pool_proj/bn/sc" + top: "inception_3b/pool_proj/bn/sc" + name: "inception_3b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/1x1/bn/sc" + bottom: "inception_3b/3x3/bn/sc" + bottom: "inception_3b/double3x3b/bn/sc" + bottom: "inception_3b/pool_proj/bn/sc" + top: "inception_3b/output" + name: "inception_3b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/3x3_reduce" + name: "inception_3c/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/3x3_reduce" + name: "inception_3c/3x3_reduce/bn" + top: "inception_3c/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn" + top: "inception_3c/3x3_reduce/bn/sc" + name: "inception_3c/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn/sc" + top: "inception_3c/3x3_reduce/bn/sc" + name: "inception_3c/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/3x3_reduce/bn/sc" + top: "inception_3c/3x3" + name: "inception_3c/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/3x3" + name: "inception_3c/3x3/bn" + top: "inception_3c/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/3x3/bn" + top: "inception_3c/3x3/bn/sc" + name: "inception_3c/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/3x3/bn/sc" + top: "inception_3c/3x3/bn/sc" + name: "inception_3c/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/double3x3_reduce" + name: "inception_3c/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3_reduce" + name: "inception_3c/double3x3_reduce/bn" + top: "inception_3c/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn" + top: "inception_3c/double3x3_reduce/bn/sc" + name: "inception_3c/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn/sc" + top: "inception_3c/double3x3_reduce/bn/sc" + name: "inception_3c/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/double3x3_reduce/bn/sc" + top: "inception_3c/double3x3a" + name: "inception_3c/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3a" + name: "inception_3c/double3x3a/bn" + top: "inception_3c/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3a/bn" + top: "inception_3c/double3x3a/bn/sc" + name: "inception_3c/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3a/bn/sc" + top: "inception_3c/double3x3a/bn/sc" + name: "inception_3c/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/double3x3a/bn/sc" + top: "inception_3c/double3x3b" + name: "inception_3c/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_3c/double3x3b" + name: "inception_3c/double3x3b/bn" + top: "inception_3c/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_3c/double3x3b/bn" + top: "inception_3c/double3x3b/bn/sc" + name: "inception_3c/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_3c/double3x3b/bn/sc" + top: "inception_3c/double3x3b/bn/sc" + name: "inception_3c/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3b/output" + top: "inception_3c/pool" + name: "inception_3c/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "inception_3c/3x3/bn/sc" + bottom: "inception_3c/double3x3b/bn/sc" + bottom: "inception_3c/pool" + top: "inception_3c/output" + name: "inception_3c/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "pool3/5x5_s3" + name: "pool3/5x5_s3" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + bottom: "pool3/5x5_s3" + top: "loss1/conv" + name: "loss1/conv" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss1/conv" + name: "loss1/conv/bn" + top: "loss1/conv/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss1/conv/bn" + top: "loss1/conv/bn/sc" + name: "loss1/conv/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss1/conv/bn/sc" + top: "loss1/conv/bn/sc" + name: "loss1/conv/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss1/conv/bn/sc" + top: "loss1/fc" + name: "loss1/fc" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss1/fc" + name: "loss1/fc/bn" + top: "loss1/fc/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss1/fc/bn" + top: "loss1/fc/bn/sc" + name: "loss1/fc/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss1/fc/bn/sc" + top: "loss1/fc/bn/sc" + name: "loss1/fc/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss1/fc/bn/sc" + top: "loss1/classifier" + name: "loss1/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss1/classifier" + bottom: "label" + top: "loss1/loss" + name: "loss1/loss" + type: "SoftmaxWithLoss" + loss_weight: 0.3 +} +layer { + bottom: "loss1/classifier" + top: "loss1/prob" + name: "loss1/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss1/prob" + bottom: "label" + top: "loss1/top-1" + name: "loss1/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss1/prob" + bottom: "label" + top: "loss1/top-5" + name: "loss1/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/1x1" + name: "inception_4a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/1x1" + name: "inception_4a/1x1/bn" + top: "inception_4a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/1x1/bn" + top: "inception_4a/1x1/bn/sc" + name: "inception_4a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/1x1/bn/sc" + top: "inception_4a/1x1/bn/sc" + name: "inception_4a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/3x3_reduce" + name: "inception_4a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 64 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/3x3_reduce" + name: "inception_4a/3x3_reduce/bn" + top: "inception_4a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn" + top: "inception_4a/3x3_reduce/bn/sc" + name: "inception_4a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn/sc" + top: "inception_4a/3x3_reduce/bn/sc" + name: "inception_4a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/3x3_reduce/bn/sc" + top: "inception_4a/3x3" + name: "inception_4a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/3x3" + name: "inception_4a/3x3/bn" + top: "inception_4a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/3x3/bn" + top: "inception_4a/3x3/bn/sc" + name: "inception_4a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/3x3/bn/sc" + top: "inception_4a/3x3/bn/sc" + name: "inception_4a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/double3x3_reduce" + name: "inception_4a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3_reduce" + name: "inception_4a/double3x3_reduce/bn" + top: "inception_4a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn" + top: "inception_4a/double3x3_reduce/bn/sc" + name: "inception_4a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn/sc" + top: "inception_4a/double3x3_reduce/bn/sc" + name: "inception_4a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/double3x3_reduce/bn/sc" + top: "inception_4a/double3x3a" + name: "inception_4a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3a" + name: "inception_4a/double3x3a/bn" + top: "inception_4a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3a/bn" + top: "inception_4a/double3x3a/bn/sc" + name: "inception_4a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3a/bn/sc" + top: "inception_4a/double3x3a/bn/sc" + name: "inception_4a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/double3x3a/bn/sc" + top: "inception_4a/double3x3b" + name: "inception_4a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/double3x3b" + name: "inception_4a/double3x3b/bn" + top: "inception_4a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/double3x3b/bn" + top: "inception_4a/double3x3b/bn/sc" + name: "inception_4a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/double3x3b/bn/sc" + top: "inception_4a/double3x3b/bn/sc" + name: "inception_4a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_3c/output" + top: "inception_4a/pool" + name: "inception_4a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4a/pool" + top: "inception_4a/pool_proj" + name: "inception_4a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4a/pool_proj" + name: "inception_4a/pool_proj/bn" + top: "inception_4a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4a/pool_proj/bn" + top: "inception_4a/pool_proj/bn/sc" + name: "inception_4a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4a/pool_proj/bn/sc" + top: "inception_4a/pool_proj/bn/sc" + name: "inception_4a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/1x1/bn/sc" + bottom: "inception_4a/3x3/bn/sc" + bottom: "inception_4a/double3x3b/bn/sc" + bottom: "inception_4a/pool_proj/bn/sc" + top: "inception_4a/output" + name: "inception_4a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/1x1" + name: "inception_4b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/1x1" + name: "inception_4b/1x1/bn" + top: "inception_4b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/1x1/bn" + top: "inception_4b/1x1/bn/sc" + name: "inception_4b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/1x1/bn/sc" + top: "inception_4b/1x1/bn/sc" + name: "inception_4b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/3x3_reduce" + name: "inception_4b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/3x3_reduce" + name: "inception_4b/3x3_reduce/bn" + top: "inception_4b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn" + top: "inception_4b/3x3_reduce/bn/sc" + name: "inception_4b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn/sc" + top: "inception_4b/3x3_reduce/bn/sc" + name: "inception_4b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/3x3_reduce/bn/sc" + top: "inception_4b/3x3" + name: "inception_4b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/3x3" + name: "inception_4b/3x3/bn" + top: "inception_4b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/3x3/bn" + top: "inception_4b/3x3/bn/sc" + name: "inception_4b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/3x3/bn/sc" + top: "inception_4b/3x3/bn/sc" + name: "inception_4b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/double3x3_reduce" + name: "inception_4b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3_reduce" + name: "inception_4b/double3x3_reduce/bn" + top: "inception_4b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn" + top: "inception_4b/double3x3_reduce/bn/sc" + name: "inception_4b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn/sc" + top: "inception_4b/double3x3_reduce/bn/sc" + name: "inception_4b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/double3x3_reduce/bn/sc" + top: "inception_4b/double3x3a" + name: "inception_4b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3a" + name: "inception_4b/double3x3a/bn" + top: "inception_4b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3a/bn" + top: "inception_4b/double3x3a/bn/sc" + name: "inception_4b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3a/bn/sc" + top: "inception_4b/double3x3a/bn/sc" + name: "inception_4b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/double3x3a/bn/sc" + top: "inception_4b/double3x3b" + name: "inception_4b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/double3x3b" + name: "inception_4b/double3x3b/bn" + top: "inception_4b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/double3x3b/bn" + top: "inception_4b/double3x3b/bn/sc" + name: "inception_4b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/double3x3b/bn/sc" + top: "inception_4b/double3x3b/bn/sc" + name: "inception_4b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4a/output" + top: "inception_4b/pool" + name: "inception_4b/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4b/pool" + top: "inception_4b/pool_proj" + name: "inception_4b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4b/pool_proj" + name: "inception_4b/pool_proj/bn" + top: "inception_4b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4b/pool_proj/bn" + top: "inception_4b/pool_proj/bn/sc" + name: "inception_4b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4b/pool_proj/bn/sc" + top: "inception_4b/pool_proj/bn/sc" + name: "inception_4b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/1x1/bn/sc" + bottom: "inception_4b/3x3/bn/sc" + bottom: "inception_4b/double3x3b/bn/sc" + bottom: "inception_4b/pool_proj/bn/sc" + top: "inception_4b/output" + name: "inception_4b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/1x1" + name: "inception_4c/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/1x1" + name: "inception_4c/1x1/bn" + top: "inception_4c/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/1x1/bn" + top: "inception_4c/1x1/bn/sc" + name: "inception_4c/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/1x1/bn/sc" + top: "inception_4c/1x1/bn/sc" + name: "inception_4c/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/3x3_reduce" + name: "inception_4c/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/3x3_reduce" + name: "inception_4c/3x3_reduce/bn" + top: "inception_4c/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn" + top: "inception_4c/3x3_reduce/bn/sc" + name: "inception_4c/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn/sc" + top: "inception_4c/3x3_reduce/bn/sc" + name: "inception_4c/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/3x3_reduce/bn/sc" + top: "inception_4c/3x3" + name: "inception_4c/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/3x3" + name: "inception_4c/3x3/bn" + top: "inception_4c/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/3x3/bn" + top: "inception_4c/3x3/bn/sc" + name: "inception_4c/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/3x3/bn/sc" + top: "inception_4c/3x3/bn/sc" + name: "inception_4c/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/double3x3_reduce" + name: "inception_4c/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3_reduce" + name: "inception_4c/double3x3_reduce/bn" + top: "inception_4c/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn" + top: "inception_4c/double3x3_reduce/bn/sc" + name: "inception_4c/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn/sc" + top: "inception_4c/double3x3_reduce/bn/sc" + name: "inception_4c/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/double3x3_reduce/bn/sc" + top: "inception_4c/double3x3a" + name: "inception_4c/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3a" + name: "inception_4c/double3x3a/bn" + top: "inception_4c/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3a/bn" + top: "inception_4c/double3x3a/bn/sc" + name: "inception_4c/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3a/bn/sc" + top: "inception_4c/double3x3a/bn/sc" + name: "inception_4c/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/double3x3a/bn/sc" + top: "inception_4c/double3x3b" + name: "inception_4c/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/double3x3b" + name: "inception_4c/double3x3b/bn" + top: "inception_4c/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/double3x3b/bn" + top: "inception_4c/double3x3b/bn/sc" + name: "inception_4c/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/double3x3b/bn/sc" + top: "inception_4c/double3x3b/bn/sc" + name: "inception_4c/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4b/output" + top: "inception_4c/pool" + name: "inception_4c/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4c/pool" + top: "inception_4c/pool_proj" + name: "inception_4c/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4c/pool_proj" + name: "inception_4c/pool_proj/bn" + top: "inception_4c/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4c/pool_proj/bn" + top: "inception_4c/pool_proj/bn/sc" + name: "inception_4c/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4c/pool_proj/bn/sc" + top: "inception_4c/pool_proj/bn/sc" + name: "inception_4c/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/1x1/bn/sc" + bottom: "inception_4c/3x3/bn/sc" + bottom: "inception_4c/double3x3b/bn/sc" + bottom: "inception_4c/pool_proj/bn/sc" + top: "inception_4c/output" + name: "inception_4c/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/1x1" + name: "inception_4d/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/1x1" + name: "inception_4d/1x1/bn" + top: "inception_4d/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/1x1/bn" + top: "inception_4d/1x1/bn/sc" + name: "inception_4d/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/1x1/bn/sc" + top: "inception_4d/1x1/bn/sc" + name: "inception_4d/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/3x3_reduce" + name: "inception_4d/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/3x3_reduce" + name: "inception_4d/3x3_reduce/bn" + top: "inception_4d/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn" + top: "inception_4d/3x3_reduce/bn/sc" + name: "inception_4d/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn/sc" + top: "inception_4d/3x3_reduce/bn/sc" + name: "inception_4d/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/3x3_reduce/bn/sc" + top: "inception_4d/3x3" + name: "inception_4d/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/3x3" + name: "inception_4d/3x3/bn" + top: "inception_4d/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/3x3/bn" + top: "inception_4d/3x3/bn/sc" + name: "inception_4d/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/3x3/bn/sc" + top: "inception_4d/3x3/bn/sc" + name: "inception_4d/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/double3x3_reduce" + name: "inception_4d/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3_reduce" + name: "inception_4d/double3x3_reduce/bn" + top: "inception_4d/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn" + top: "inception_4d/double3x3_reduce/bn/sc" + name: "inception_4d/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn/sc" + top: "inception_4d/double3x3_reduce/bn/sc" + name: "inception_4d/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/double3x3_reduce/bn/sc" + top: "inception_4d/double3x3a" + name: "inception_4d/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3a" + name: "inception_4d/double3x3a/bn" + top: "inception_4d/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3a/bn" + top: "inception_4d/double3x3a/bn/sc" + name: "inception_4d/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3a/bn/sc" + top: "inception_4d/double3x3a/bn/sc" + name: "inception_4d/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/double3x3a/bn/sc" + top: "inception_4d/double3x3b" + name: "inception_4d/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/double3x3b" + name: "inception_4d/double3x3b/bn" + top: "inception_4d/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/double3x3b/bn" + top: "inception_4d/double3x3b/bn/sc" + name: "inception_4d/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/double3x3b/bn/sc" + top: "inception_4d/double3x3b/bn/sc" + name: "inception_4d/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4c/output" + top: "inception_4d/pool" + name: "inception_4d/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_4d/pool" + top: "inception_4d/pool_proj" + name: "inception_4d/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 96 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4d/pool_proj" + name: "inception_4d/pool_proj/bn" + top: "inception_4d/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4d/pool_proj/bn" + top: "inception_4d/pool_proj/bn/sc" + name: "inception_4d/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4d/pool_proj/bn/sc" + top: "inception_4d/pool_proj/bn/sc" + name: "inception_4d/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/1x1/bn/sc" + bottom: "inception_4d/3x3/bn/sc" + bottom: "inception_4d/double3x3b/bn/sc" + bottom: "inception_4d/pool_proj/bn/sc" + top: "inception_4d/output" + name: "inception_4d/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/3x3_reduce" + name: "inception_4e/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/3x3_reduce" + name: "inception_4e/3x3_reduce/bn" + top: "inception_4e/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn" + top: "inception_4e/3x3_reduce/bn/sc" + name: "inception_4e/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn/sc" + top: "inception_4e/3x3_reduce/bn/sc" + name: "inception_4e/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/3x3_reduce/bn/sc" + top: "inception_4e/3x3" + name: "inception_4e/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/3x3" + name: "inception_4e/3x3/bn" + top: "inception_4e/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/3x3/bn" + top: "inception_4e/3x3/bn/sc" + name: "inception_4e/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/3x3/bn/sc" + top: "inception_4e/3x3/bn/sc" + name: "inception_4e/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/double3x3_reduce" + name: "inception_4e/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3_reduce" + name: "inception_4e/double3x3_reduce/bn" + top: "inception_4e/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn" + top: "inception_4e/double3x3_reduce/bn/sc" + name: "inception_4e/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn/sc" + top: "inception_4e/double3x3_reduce/bn/sc" + name: "inception_4e/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/double3x3_reduce/bn/sc" + top: "inception_4e/double3x3a" + name: "inception_4e/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3a" + name: "inception_4e/double3x3a/bn" + top: "inception_4e/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3a/bn" + top: "inception_4e/double3x3a/bn/sc" + name: "inception_4e/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3a/bn/sc" + top: "inception_4e/double3x3a/bn/sc" + name: "inception_4e/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/double3x3a/bn/sc" + top: "inception_4e/double3x3b" + name: "inception_4e/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_4e/double3x3b" + name: "inception_4e/double3x3b/bn" + top: "inception_4e/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_4e/double3x3b/bn" + top: "inception_4e/double3x3b/bn/sc" + name: "inception_4e/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_4e/double3x3b/bn/sc" + top: "inception_4e/double3x3b/bn/sc" + name: "inception_4e/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4d/output" + top: "inception_4e/pool" + name: "inception_4e/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + bottom: "inception_4e/3x3/bn/sc" + bottom: "inception_4e/double3x3b/bn/sc" + bottom: "inception_4e/pool" + top: "inception_4e/output" + name: "inception_4e/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "pool4/5x5_s3" + name: "pool4/5x5_s3" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 5 + stride: 3 + } +} +layer { + bottom: "pool4/5x5_s3" + top: "loss2/conv" + name: "loss2/conv" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss2/conv" + name: "loss2/conv/bn" + top: "loss2/conv/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss2/conv/bn" + top: "loss2/conv/bn/sc" + name: "loss2/conv/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss2/conv/bn/sc" + top: "loss2/conv/bn/sc" + name: "loss2/conv/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss2/conv/bn/sc" + top: "loss2/fc" + name: "loss2/fc" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + inner_product_param { + num_output: 1024 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "loss2/fc" + name: "loss2/fc/bn" + top: "loss2/fc/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "loss2/fc/bn" + top: "loss2/fc/bn/sc" + name: "loss2/fc/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "loss2/fc/bn/sc" + top: "loss2/fc/bn/sc" + name: "loss2/fc/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "loss2/fc/bn/sc" + top: "loss2/classifier" + name: "loss2/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss2/classifier" + bottom: "label" + top: "loss2/loss" + name: "loss2/loss" + type: "SoftmaxWithLoss" + loss_weight: 0.3 +} +layer { + bottom: "loss2/classifier" + top: "loss2/prob" + name: "loss2/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss2/prob" + bottom: "label" + top: "loss2/top-1" + name: "loss2/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss2/prob" + bottom: "label" + top: "loss2/top-5" + name: "loss2/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/1x1" + name: "inception_5a/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 352 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/1x1" + name: "inception_5a/1x1/bn" + top: "inception_5a/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/1x1/bn" + top: "inception_5a/1x1/bn/sc" + name: "inception_5a/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/1x1/bn/sc" + top: "inception_5a/1x1/bn/sc" + name: "inception_5a/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/3x3_reduce" + name: "inception_5a/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/3x3_reduce" + name: "inception_5a/3x3_reduce/bn" + top: "inception_5a/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn" + top: "inception_5a/3x3_reduce/bn/sc" + name: "inception_5a/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn/sc" + top: "inception_5a/3x3_reduce/bn/sc" + name: "inception_5a/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/3x3_reduce/bn/sc" + top: "inception_5a/3x3" + name: "inception_5a/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 320 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/3x3" + name: "inception_5a/3x3/bn" + top: "inception_5a/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/3x3/bn" + top: "inception_5a/3x3/bn/sc" + name: "inception_5a/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/3x3/bn/sc" + top: "inception_5a/3x3/bn/sc" + name: "inception_5a/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/double3x3_reduce" + name: "inception_5a/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 160 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3_reduce" + name: "inception_5a/double3x3_reduce/bn" + top: "inception_5a/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn" + top: "inception_5a/double3x3_reduce/bn/sc" + name: "inception_5a/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn/sc" + top: "inception_5a/double3x3_reduce/bn/sc" + name: "inception_5a/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/double3x3_reduce/bn/sc" + top: "inception_5a/double3x3a" + name: "inception_5a/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3a" + name: "inception_5a/double3x3a/bn" + top: "inception_5a/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3a/bn" + top: "inception_5a/double3x3a/bn/sc" + name: "inception_5a/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3a/bn/sc" + top: "inception_5a/double3x3a/bn/sc" + name: "inception_5a/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/double3x3a/bn/sc" + top: "inception_5a/double3x3b" + name: "inception_5a/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/double3x3b" + name: "inception_5a/double3x3b/bn" + top: "inception_5a/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/double3x3b/bn" + top: "inception_5a/double3x3b/bn/sc" + name: "inception_5a/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/double3x3b/bn/sc" + top: "inception_5a/double3x3b/bn/sc" + name: "inception_5a/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_4e/output" + top: "inception_5a/pool" + name: "inception_5a/pool" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_5a/pool" + top: "inception_5a/pool_proj" + name: "inception_5a/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5a/pool_proj" + name: "inception_5a/pool_proj/bn" + top: "inception_5a/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5a/pool_proj/bn" + top: "inception_5a/pool_proj/bn/sc" + name: "inception_5a/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5a/pool_proj/bn/sc" + top: "inception_5a/pool_proj/bn/sc" + name: "inception_5a/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/1x1/bn/sc" + bottom: "inception_5a/3x3/bn/sc" + bottom: "inception_5a/double3x3b/bn/sc" + bottom: "inception_5a/pool_proj/bn/sc" + top: "inception_5a/output" + name: "inception_5a/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/1x1" + name: "inception_5b/1x1" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 352 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/1x1" + name: "inception_5b/1x1/bn" + top: "inception_5b/1x1/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/1x1/bn" + top: "inception_5b/1x1/bn/sc" + name: "inception_5b/1x1/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/1x1/bn/sc" + top: "inception_5b/1x1/bn/sc" + name: "inception_5b/1x1/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/3x3_reduce" + name: "inception_5b/3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/3x3_reduce" + name: "inception_5b/3x3_reduce/bn" + top: "inception_5b/3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn" + top: "inception_5b/3x3_reduce/bn/sc" + name: "inception_5b/3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn/sc" + top: "inception_5b/3x3_reduce/bn/sc" + name: "inception_5b/3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/3x3_reduce/bn/sc" + top: "inception_5b/3x3" + name: "inception_5b/3x3" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 320 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/3x3" + name: "inception_5b/3x3/bn" + top: "inception_5b/3x3/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/3x3/bn" + top: "inception_5b/3x3/bn/sc" + name: "inception_5b/3x3/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/3x3/bn/sc" + top: "inception_5b/3x3/bn/sc" + name: "inception_5b/3x3/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/double3x3_reduce" + name: "inception_5b/double3x3_reduce" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 192 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3_reduce" + name: "inception_5b/double3x3_reduce/bn" + top: "inception_5b/double3x3_reduce/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn" + top: "inception_5b/double3x3_reduce/bn/sc" + name: "inception_5b/double3x3_reduce/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn/sc" + top: "inception_5b/double3x3_reduce/bn/sc" + name: "inception_5b/double3x3_reduce/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/double3x3_reduce/bn/sc" + top: "inception_5b/double3x3a" + name: "inception_5b/double3x3a" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3a" + name: "inception_5b/double3x3a/bn" + top: "inception_5b/double3x3a/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3a/bn" + top: "inception_5b/double3x3a/bn/sc" + name: "inception_5b/double3x3a/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3a/bn/sc" + top: "inception_5b/double3x3a/bn/sc" + name: "inception_5b/double3x3a/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/double3x3a/bn/sc" + top: "inception_5b/double3x3b" + name: "inception_5b/double3x3b" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 224 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/double3x3b" + name: "inception_5b/double3x3b/bn" + top: "inception_5b/double3x3b/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/double3x3b/bn" + top: "inception_5b/double3x3b/bn/sc" + name: "inception_5b/double3x3b/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/double3x3b/bn/sc" + top: "inception_5b/double3x3b/bn/sc" + name: "inception_5b/double3x3b/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5a/output" + top: "inception_5b/pool" + name: "inception_5b/pool" + type: "Pooling" + pooling_param { + + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + bottom: "inception_5b/pool" + top: "inception_5b/pool_proj" + name: "inception_5b/pool_proj" + type: "Convolution" + param { + lr_mult: 1 + decay_mult: 1 + } + convolution_param { + + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_term: false + } +} +layer { + bottom: "inception_5b/pool_proj" + name: "inception_5b/pool_proj/bn" + top: "inception_5b/pool_proj/bn" + type: "BatchNorm" + batch_norm_param { + + } +} +layer { + bottom: "inception_5b/pool_proj/bn" + top: "inception_5b/pool_proj/bn/sc" + name: "inception_5b/pool_proj/bn/sc" + type: "Scale" + scale_param { + bias_term: true + } +} +layer { + bottom: "inception_5b/pool_proj/bn/sc" + top: "inception_5b/pool_proj/bn/sc" + name: "inception_5b/pool_proj/bn/sc/relu" + type: "ReLU" + relu_param { + + } +} +layer { + bottom: "inception_5b/1x1/bn/sc" + bottom: "inception_5b/3x3/bn/sc" + bottom: "inception_5b/double3x3b/bn/sc" + bottom: "inception_5b/pool_proj/bn/sc" + top: "inception_5b/output" + name: "inception_5b/output" + type: "Concat" + concat_param { + + } +} +layer { + bottom: "inception_5b/output" + top: "pool5/7x7_s1" + name: "pool5/7x7_s1" + type: "Pooling" + pooling_param { + + pool: AVE + kernel_size: 7 + stride: 1 + } +} +layer { + bottom: "pool5/7x7_s1" + top: "loss3/classifier" + name: "loss3/classifier" + type: "InnerProduct" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + inner_product_param { + num_output: 1000 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + bottom: "loss3/classifier" + bottom: "label" + top: "loss3/loss" + name: "loss3/loss" + type: "SoftmaxWithLoss" + loss_weight: 1 +} +layer { + bottom: "loss3/classifier" + top: "loss3/prob" + name: "loss3/prob" + type: "Softmax" + include { + phase: TEST + } +} +layer { + bottom: "loss3/prob" + bottom: "label" + top: "loss3/top-1" + name: "loss3/top-1" + type: "Accuracy" + include { + phase: TEST + } +} +layer { + bottom: "loss3/prob" + bottom: "label" + top: "loss3/top-5" + name: "loss3/top-5" + type: "Accuracy" + accuracy_param { + top_k: 5 + } + include { + phase: TEST + } +} diff --git a/models/intel_optimized_models/multinode/resnet_50_16_nodes/solver.prototxt b/models/intel_optimized_models/multinode/resnet_50_16_nodes/solver.prototxt new file mode 100644 index 00000000000..a66f60dfa62 --- /dev/null +++ b/models/intel_optimized_models/multinode/resnet_50_16_nodes/solver.prototxt @@ -0,0 +1,15 @@ +#This solver is described by Computer Vision Group Jena (CVGJ) in [ImageNet pre-trained models with batch normalization] (https://arxiv.org/pdf/1612.01452.pdf) +net: "models/intel_optimized_models/multinode/resnet_50_16_nodes/train_val.prototxt" +#test_iter: 5000 +#test_interval: 15000 +#test_initialization: false +base_lr: 0.1 +display: 20 +max_iter: 320000 +lr_policy: "poly" +power: 1 +momentum: 0.9 +weight_decay: 0.0001 +snapshot: 30000 +snapshot_prefix: "caffe-resnet50" +solver_mode: CPU diff --git a/models/intel_optimized_models/multinode/resnet_50_16_nodes/train_val.prototxt b/models/intel_optimized_models/multinode/resnet_50_16_nodes/train_val.prototxt new file mode 100644 index 00000000000..71b07d00a9c --- /dev/null +++ b/models/intel_optimized_models/multinode/resnet_50_16_nodes/train_val.prototxt @@ -0,0 +1,2306 @@ +#This is Intel(R) optimized (in terms of time to train) version of topology described in the [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) publication. +# +#Top-5 and Top-1 results achieved with this topology: +#Top-5: 92% +#Top-1: 73.9% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. + +layer { +name: "data" +type: "Data" +top: "data" +top: "label" +include { + phase: TRAIN +} +transform_param { + scale: 0.0078125 + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 +} + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 16 + backend: LMDB + shuffle: true + } + +} +layer { +name: "data" +type: "Data" +top: "data" +top: "label" +include { + phase: TEST +} +transform_param { + scale: 0.0078125 + mirror: false + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 +} + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 10 + backend: LMDB + } + +} + +layer { +name: "conv1" +type: "Convolution" +bottom: "data" +top: "conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +param { + lr_mult: 2.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "msra" + variance_norm: FAN_OUT + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "conv1_bn" +type: "BatchNorm" +bottom: "conv1" +top: "conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "conv1_relu" +type: "ReLU" +bottom: "conv1_pcs_arm_sim" +top: "conv1_pcs_arm_sim" + +} +layer { +name: "conv1_pool" +type: "Pooling" +bottom: "conv1_pcs_arm_sim" +top: "conv1_pool" +pooling_param { + kernel_size: 3 + stride: 2 +} + +} +layer { +name: "layer_64_1_conv1" +type: "Convolution" +bottom: "conv1_pool" +top: "layer_64_1_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_1_bn2" +type: "BatchNorm" +bottom: "layer_64_1_conv1" +top: "layer_64_1_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_1_relu2" +type: "ReLU" +bottom: "layer_64_1_conv1_pcs_arm_sim" +top: "layer_64_1_conv1_pcs_arm_sim" + +} +layer { +name: "layer_64_1_conv2" +type: "Convolution" +bottom: "layer_64_1_conv1_pcs_arm_sim" +top: "layer_64_1_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_1_bn3" +type: "BatchNorm" +bottom: "layer_64_1_conv2" +top: "layer_64_1_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_1_relu3" +type: "ReLU" +bottom: "layer_64_1_conv2_pcs_arm_sim" +top: "layer_64_1_conv2_pcs_arm_sim" + +} +layer { +name: "layer_64_1_conv3" +type: "Convolution" +bottom: "layer_64_1_conv2_pcs_arm_sim" +top: "layer_64_1_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_1_conv_expand" +type: "Convolution" +bottom: "layer_64_1_conv1_pcs_arm_sim" +top: "layer_64_1_conv_expand" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_1_sum" +type: "Eltwise" +bottom: "layer_64_1_conv3" +bottom: "layer_64_1_conv_expand" +top: "layer_64_1_sum" + +} +layer { +name: "layer_64_2_bn1" +type: "BatchNorm" +bottom: "layer_64_1_sum" +top: "layer_64_2_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_2_relu1" +type: "ReLU" +bottom: "layer_64_2_bn1_pcs_arm_sim" +top: "layer_64_2_bn1_pcs_arm_sim" + +} +layer { +name: "layer_64_2_conv1" +type: "Convolution" +bottom: "layer_64_2_bn1_pcs_arm_sim" +top: "layer_64_2_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_2_bn2" +type: "BatchNorm" +bottom: "layer_64_2_conv1" +top: "layer_64_2_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_2_relu2" +type: "ReLU" +bottom: "layer_64_2_conv1_pcs_arm_sim" +top: "layer_64_2_conv1_pcs_arm_sim" + +} +layer { +name: "layer_64_2_conv2" +type: "Convolution" +bottom: "layer_64_2_conv1_pcs_arm_sim" +top: "layer_64_2_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_2_bn3" +type: "BatchNorm" +bottom: "layer_64_2_conv2" +top: "layer_64_2_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_2_relu3" +type: "ReLU" +bottom: "layer_64_2_conv2_pcs_arm_sim" +top: "layer_64_2_conv2_pcs_arm_sim" + +} +layer { +name: "layer_64_2_conv3" +type: "Convolution" +bottom: "layer_64_2_conv2_pcs_arm_sim" +top: "layer_64_2_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_2_sum" +type: "Eltwise" +bottom: "layer_64_2_conv3" +bottom: "layer_64_1_sum" +top: "layer_64_2_sum" + +} +layer { +name: "layer_64_3_bn1" +type: "BatchNorm" +bottom: "layer_64_2_sum" +top: "layer_64_3_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_3_relu1" +type: "ReLU" +bottom: "layer_64_3_bn1_pcs_arm_sim" +top: "layer_64_3_bn1_pcs_arm_sim" + +} +layer { +name: "layer_64_3_conv1" +type: "Convolution" +bottom: "layer_64_3_bn1_pcs_arm_sim" +top: "layer_64_3_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_3_bn2" +type: "BatchNorm" +bottom: "layer_64_3_conv1" +top: "layer_64_3_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_3_relu2" +type: "ReLU" +bottom: "layer_64_3_conv1_pcs_arm_sim" +top: "layer_64_3_conv1_pcs_arm_sim" + +} +layer { +name: "layer_64_3_conv2" +type: "Convolution" +bottom: "layer_64_3_conv1_pcs_arm_sim" +top: "layer_64_3_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_3_bn3" +type: "BatchNorm" +bottom: "layer_64_3_conv2" +top: "layer_64_3_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_3_relu3" +type: "ReLU" +bottom: "layer_64_3_conv2_pcs_arm_sim" +top: "layer_64_3_conv2_pcs_arm_sim" + +} +layer { +name: "layer_64_3_conv3" +type: "Convolution" +bottom: "layer_64_3_conv2_pcs_arm_sim" +top: "layer_64_3_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_3_sum" +type: "Eltwise" +bottom: "layer_64_3_conv3" +bottom: "layer_64_2_sum" +top: "layer_64_3_sum" + +} +layer { +name: "layer_128_1_bn1" +type: "BatchNorm" +bottom: "layer_64_3_sum" +top: "layer_128_1_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_1_relu1" +type: "ReLU" +bottom: "layer_128_1_bn1_pcs_arm_sim" +top: "layer_128_1_bn1_pcs_arm_sim" + +} +layer { +name: "layer_128_1_conv1" +type: "Convolution" +bottom: "layer_128_1_bn1_pcs_arm_sim" +top: "layer_128_1_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_1_bn2" +type: "BatchNorm" +bottom: "layer_128_1_conv1" +top: "layer_128_1_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_1_relu2" +type: "ReLU" +bottom: "layer_128_1_conv1_pcs_arm_sim" +top: "layer_128_1_conv1_pcs_arm_sim" + +} +layer { +name: "layer_128_1_conv2" +type: "Convolution" +bottom: "layer_128_1_conv1_pcs_arm_sim" +top: "layer_128_1_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_1_bn3" +type: "BatchNorm" +bottom: "layer_128_1_conv2" +top: "layer_128_1_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_1_relu3" +type: "ReLU" +bottom: "layer_128_1_conv2_pcs_arm_sim" +top: "layer_128_1_conv2_pcs_arm_sim" + +} +layer { +name: "layer_128_1_conv3" +type: "Convolution" +bottom: "layer_128_1_conv2_pcs_arm_sim" +top: "layer_128_1_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_1_conv_expand" +type: "Convolution" +bottom: "layer_128_1_bn1_pcs_arm_sim" +top: "layer_128_1_conv_expand" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_1_sum" +type: "Eltwise" +bottom: "layer_128_1_conv3" +bottom: "layer_128_1_conv_expand" +top: "layer_128_1_sum" + +} +layer { +name: "layer_128_2_bn1" +type: "BatchNorm" +bottom: "layer_128_1_sum" +top: "layer_128_2_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_2_relu1" +type: "ReLU" +bottom: "layer_128_2_bn1_pcs_arm_sim" +top: "layer_128_2_bn1_pcs_arm_sim" + +} +layer { +name: "layer_128_2_conv1" +type: "Convolution" +bottom: "layer_128_2_bn1_pcs_arm_sim" +top: "layer_128_2_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_2_bn2" +type: "BatchNorm" +bottom: "layer_128_2_conv1" +top: "layer_128_2_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_2_relu2" +type: "ReLU" +bottom: "layer_128_2_conv1_pcs_arm_sim" +top: "layer_128_2_conv1_pcs_arm_sim" + +} +layer { +name: "layer_128_2_conv2" +type: "Convolution" +bottom: "layer_128_2_conv1_pcs_arm_sim" +top: "layer_128_2_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_2_bn3" +type: "BatchNorm" +bottom: "layer_128_2_conv2" +top: "layer_128_2_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_2_relu3" +type: "ReLU" +bottom: "layer_128_2_conv2_pcs_arm_sim" +top: "layer_128_2_conv2_pcs_arm_sim" + +} +layer { +name: "layer_128_2_conv3" +type: "Convolution" +bottom: "layer_128_2_conv2_pcs_arm_sim" +top: "layer_128_2_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_2_sum" +type: "Eltwise" +bottom: "layer_128_2_conv3" +bottom: "layer_128_1_sum" +top: "layer_128_2_sum" + +} +layer { +name: "layer_128_3_bn1" +type: "BatchNorm" +bottom: "layer_128_2_sum" +top: "layer_128_3_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_3_relu1" +type: "ReLU" +bottom: "layer_128_3_bn1_pcs_arm_sim" +top: "layer_128_3_bn1_pcs_arm_sim" + +} +layer { +name: "layer_128_3_conv1" +type: "Convolution" +bottom: "layer_128_3_bn1_pcs_arm_sim" +top: "layer_128_3_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_3_bn2" +type: "BatchNorm" +bottom: "layer_128_3_conv1" +top: "layer_128_3_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_3_relu2" +type: "ReLU" +bottom: "layer_128_3_conv1_pcs_arm_sim" +top: "layer_128_3_conv1_pcs_arm_sim" + +} +layer { +name: "layer_128_3_conv2" +type: "Convolution" +bottom: "layer_128_3_conv1_pcs_arm_sim" +top: "layer_128_3_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_3_bn3" +type: "BatchNorm" +bottom: "layer_128_3_conv2" +top: "layer_128_3_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_3_relu3" +type: "ReLU" +bottom: "layer_128_3_conv2_pcs_arm_sim" +top: "layer_128_3_conv2_pcs_arm_sim" + +} +layer { +name: "layer_128_3_conv3" +type: "Convolution" +bottom: "layer_128_3_conv2_pcs_arm_sim" +top: "layer_128_3_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_3_sum" +type: "Eltwise" +bottom: "layer_128_3_conv3" +bottom: "layer_128_2_sum" +top: "layer_128_3_sum" + +} +layer { +name: "layer_128_4_bn1" +type: "BatchNorm" +bottom: "layer_128_3_sum" +top: "layer_128_4_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_4_relu1" +type: "ReLU" +bottom: "layer_128_4_bn1_pcs_arm_sim" +top: "layer_128_4_bn1_pcs_arm_sim" + +} +layer { +name: "layer_128_4_conv1" +type: "Convolution" +bottom: "layer_128_4_bn1_pcs_arm_sim" +top: "layer_128_4_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_4_bn2" +type: "BatchNorm" +bottom: "layer_128_4_conv1" +top: "layer_128_4_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_4_relu2" +type: "ReLU" +bottom: "layer_128_4_conv1_pcs_arm_sim" +top: "layer_128_4_conv1_pcs_arm_sim" + +} +layer { +name: "layer_128_4_conv2" +type: "Convolution" +bottom: "layer_128_4_conv1_pcs_arm_sim" +top: "layer_128_4_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_4_bn3" +type: "BatchNorm" +bottom: "layer_128_4_conv2" +top: "layer_128_4_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_4_relu3" +type: "ReLU" +bottom: "layer_128_4_conv2_pcs_arm_sim" +top: "layer_128_4_conv2_pcs_arm_sim" + +} +layer { +name: "layer_128_4_conv3" +type: "Convolution" +bottom: "layer_128_4_conv2_pcs_arm_sim" +top: "layer_128_4_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_4_sum" +type: "Eltwise" +bottom: "layer_128_4_conv3" +bottom: "layer_128_3_sum" +top: "layer_128_4_sum" + +} +layer { +name: "layer_256_1_bn1" +type: "BatchNorm" +bottom: "layer_128_4_sum" +top: "layer_256_1_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_1_relu1" +type: "ReLU" +bottom: "layer_256_1_bn1_pcs_arm_sim" +top: "layer_256_1_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_1_conv1" +type: "Convolution" +bottom: "layer_256_1_bn1_pcs_arm_sim" +top: "layer_256_1_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_1_bn2" +type: "BatchNorm" +bottom: "layer_256_1_conv1" +top: "layer_256_1_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_1_relu2" +type: "ReLU" +bottom: "layer_256_1_conv1_pcs_arm_sim" +top: "layer_256_1_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_1_conv2" +type: "Convolution" +bottom: "layer_256_1_conv1_pcs_arm_sim" +top: "layer_256_1_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_1_bn3" +type: "BatchNorm" +bottom: "layer_256_1_conv2" +top: "layer_256_1_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_1_relu3" +type: "ReLU" +bottom: "layer_256_1_conv2_pcs_arm_sim" +top: "layer_256_1_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_1_conv3" +type: "Convolution" +bottom: "layer_256_1_conv2_pcs_arm_sim" +top: "layer_256_1_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_1_conv_expand" +type: "Convolution" +bottom: "layer_256_1_bn1_pcs_arm_sim" +top: "layer_256_1_conv_expand" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_1_sum" +type: "Eltwise" +bottom: "layer_256_1_conv3" +bottom: "layer_256_1_conv_expand" +top: "layer_256_1_sum" + +} +layer { +name: "layer_256_2_bn1" +type: "BatchNorm" +bottom: "layer_256_1_sum" +top: "layer_256_2_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_2_relu1" +type: "ReLU" +bottom: "layer_256_2_bn1_pcs_arm_sim" +top: "layer_256_2_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_2_conv1" +type: "Convolution" +bottom: "layer_256_2_bn1_pcs_arm_sim" +top: "layer_256_2_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_2_bn2" +type: "BatchNorm" +bottom: "layer_256_2_conv1" +top: "layer_256_2_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_2_relu2" +type: "ReLU" +bottom: "layer_256_2_conv1_pcs_arm_sim" +top: "layer_256_2_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_2_conv2" +type: "Convolution" +bottom: "layer_256_2_conv1_pcs_arm_sim" +top: "layer_256_2_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_2_bn3" +type: "BatchNorm" +bottom: "layer_256_2_conv2" +top: "layer_256_2_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_2_relu3" +type: "ReLU" +bottom: "layer_256_2_conv2_pcs_arm_sim" +top: "layer_256_2_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_2_conv3" +type: "Convolution" +bottom: "layer_256_2_conv2_pcs_arm_sim" +top: "layer_256_2_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_2_sum" +type: "Eltwise" +bottom: "layer_256_2_conv3" +bottom: "layer_256_1_sum" +top: "layer_256_2_sum" + +} +layer { +name: "layer_256_3_bn1" +type: "BatchNorm" +bottom: "layer_256_2_sum" +top: "layer_256_3_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_3_relu1" +type: "ReLU" +bottom: "layer_256_3_bn1_pcs_arm_sim" +top: "layer_256_3_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_3_conv1" +type: "Convolution" +bottom: "layer_256_3_bn1_pcs_arm_sim" +top: "layer_256_3_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_3_bn2" +type: "BatchNorm" +bottom: "layer_256_3_conv1" +top: "layer_256_3_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_3_relu2" +type: "ReLU" +bottom: "layer_256_3_conv1_pcs_arm_sim" +top: "layer_256_3_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_3_conv2" +type: "Convolution" +bottom: "layer_256_3_conv1_pcs_arm_sim" +top: "layer_256_3_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_3_bn3" +type: "BatchNorm" +bottom: "layer_256_3_conv2" +top: "layer_256_3_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_3_relu3" +type: "ReLU" +bottom: "layer_256_3_conv2_pcs_arm_sim" +top: "layer_256_3_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_3_conv3" +type: "Convolution" +bottom: "layer_256_3_conv2_pcs_arm_sim" +top: "layer_256_3_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_3_sum" +type: "Eltwise" +bottom: "layer_256_3_conv3" +bottom: "layer_256_2_sum" +top: "layer_256_3_sum" + +} +layer { +name: "layer_256_4_bn1" +type: "BatchNorm" +bottom: "layer_256_3_sum" +top: "layer_256_4_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_4_relu1" +type: "ReLU" +bottom: "layer_256_4_bn1_pcs_arm_sim" +top: "layer_256_4_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_4_conv1" +type: "Convolution" +bottom: "layer_256_4_bn1_pcs_arm_sim" +top: "layer_256_4_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_4_bn2" +type: "BatchNorm" +bottom: "layer_256_4_conv1" +top: "layer_256_4_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_4_relu2" +type: "ReLU" +bottom: "layer_256_4_conv1_pcs_arm_sim" +top: "layer_256_4_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_4_conv2" +type: "Convolution" +bottom: "layer_256_4_conv1_pcs_arm_sim" +top: "layer_256_4_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_4_bn3" +type: "BatchNorm" +bottom: "layer_256_4_conv2" +top: "layer_256_4_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_4_relu3" +type: "ReLU" +bottom: "layer_256_4_conv2_pcs_arm_sim" +top: "layer_256_4_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_4_conv3" +type: "Convolution" +bottom: "layer_256_4_conv2_pcs_arm_sim" +top: "layer_256_4_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_4_sum" +type: "Eltwise" +bottom: "layer_256_4_conv3" +bottom: "layer_256_3_sum" +top: "layer_256_4_sum" + +} +layer { +name: "layer_256_5_bn1" +type: "BatchNorm" +bottom: "layer_256_4_sum" +top: "layer_256_5_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_5_relu1" +type: "ReLU" +bottom: "layer_256_5_bn1_pcs_arm_sim" +top: "layer_256_5_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_5_conv1" +type: "Convolution" +bottom: "layer_256_5_bn1_pcs_arm_sim" +top: "layer_256_5_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_5_bn2" +type: "BatchNorm" +bottom: "layer_256_5_conv1" +top: "layer_256_5_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_5_relu2" +type: "ReLU" +bottom: "layer_256_5_conv1_pcs_arm_sim" +top: "layer_256_5_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_5_conv2" +type: "Convolution" +bottom: "layer_256_5_conv1_pcs_arm_sim" +top: "layer_256_5_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_5_bn3" +type: "BatchNorm" +bottom: "layer_256_5_conv2" +top: "layer_256_5_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_5_relu3" +type: "ReLU" +bottom: "layer_256_5_conv2_pcs_arm_sim" +top: "layer_256_5_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_5_conv3" +type: "Convolution" +bottom: "layer_256_5_conv2_pcs_arm_sim" +top: "layer_256_5_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_5_sum" +type: "Eltwise" +bottom: "layer_256_5_conv3" +bottom: "layer_256_4_sum" +top: "layer_256_5_sum" + +} +layer { +name: "layer_256_6_bn1" +type: "BatchNorm" +bottom: "layer_256_5_sum" +top: "layer_256_6_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_6_relu1" +type: "ReLU" +bottom: "layer_256_6_bn1_pcs_arm_sim" +top: "layer_256_6_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_6_conv1" +type: "Convolution" +bottom: "layer_256_6_bn1_pcs_arm_sim" +top: "layer_256_6_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_6_bn2" +type: "BatchNorm" +bottom: "layer_256_6_conv1" +top: "layer_256_6_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_6_relu2" +type: "ReLU" +bottom: "layer_256_6_conv1_pcs_arm_sim" +top: "layer_256_6_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_6_conv2" +type: "Convolution" +bottom: "layer_256_6_conv1_pcs_arm_sim" +top: "layer_256_6_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_6_bn3" +type: "BatchNorm" +bottom: "layer_256_6_conv2" +top: "layer_256_6_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_6_relu3" +type: "ReLU" +bottom: "layer_256_6_conv2_pcs_arm_sim" +top: "layer_256_6_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_6_conv3" +type: "Convolution" +bottom: "layer_256_6_conv2_pcs_arm_sim" +top: "layer_256_6_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_6_sum" +type: "Eltwise" +bottom: "layer_256_6_conv3" +bottom: "layer_256_5_sum" +top: "layer_256_6_sum" + +} +layer { +name: "layer_512_1_bn1" +type: "BatchNorm" +bottom: "layer_256_6_sum" +top: "layer_512_1_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_1_relu1" +type: "ReLU" +bottom: "layer_512_1_bn1_pcs_arm_sim" +top: "layer_512_1_bn1_pcs_arm_sim" + +} +layer { +name: "layer_512_1_conv1" +type: "Convolution" +bottom: "layer_512_1_bn1_pcs_arm_sim" +top: "layer_512_1_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_1_bn2" +type: "BatchNorm" +bottom: "layer_512_1_conv1" +top: "layer_512_1_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_1_relu2" +type: "ReLU" +bottom: "layer_512_1_conv1_pcs_arm_sim" +top: "layer_512_1_conv1_pcs_arm_sim" + +} +layer { +name: "layer_512_1_conv2" +type: "Convolution" +bottom: "layer_512_1_conv1_pcs_arm_sim" +top: "layer_512_1_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_1_bn3" +type: "BatchNorm" +bottom: "layer_512_1_conv2" +top: "layer_512_1_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_1_relu3" +type: "ReLU" +bottom: "layer_512_1_conv2_pcs_arm_sim" +top: "layer_512_1_conv2_pcs_arm_sim" + +} +layer { +name: "layer_512_1_conv3" +type: "Convolution" +bottom: "layer_512_1_conv2_pcs_arm_sim" +top: "layer_512_1_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 2048 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_1_conv_expand" +type: "Convolution" +bottom: "layer_512_1_bn1_pcs_arm_sim" +top: "layer_512_1_conv_expand" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 2048 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_1_sum" +type: "Eltwise" +bottom: "layer_512_1_conv3" +bottom: "layer_512_1_conv_expand" +top: "layer_512_1_sum" + +} +layer { +name: "layer_512_2_bn1" +type: "BatchNorm" +bottom: "layer_512_1_sum" +top: "layer_512_2_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_2_relu1" +type: "ReLU" +bottom: "layer_512_2_bn1_pcs_arm_sim" +top: "layer_512_2_bn1_pcs_arm_sim" + +} +layer { +name: "layer_512_2_conv1" +type: "Convolution" +bottom: "layer_512_2_bn1_pcs_arm_sim" +top: "layer_512_2_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_2_bn2" +type: "BatchNorm" +bottom: "layer_512_2_conv1" +top: "layer_512_2_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_2_relu2" +type: "ReLU" +bottom: "layer_512_2_conv1_pcs_arm_sim" +top: "layer_512_2_conv1_pcs_arm_sim" + +} +layer { +name: "layer_512_2_conv2" +type: "Convolution" +bottom: "layer_512_2_conv1_pcs_arm_sim" +top: "layer_512_2_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_2_bn3" +type: "BatchNorm" +bottom: "layer_512_2_conv2" +top: "layer_512_2_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_2_relu3" +type: "ReLU" +bottom: "layer_512_2_conv2_pcs_arm_sim" +top: "layer_512_2_conv2_pcs_arm_sim" + +} +layer { +name: "layer_512_2_conv3" +type: "Convolution" +bottom: "layer_512_2_conv2_pcs_arm_sim" +top: "layer_512_2_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 2048 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_2_sum" +type: "Eltwise" +bottom: "layer_512_2_conv3" +bottom: "layer_512_1_sum" +top: "layer_512_2_sum" + +} +layer { +name: "layer_512_3_bn1" +type: "BatchNorm" +bottom: "layer_512_2_sum" +top: "layer_512_3_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_3_relu1" +type: "ReLU" +bottom: "layer_512_3_bn1_pcs_arm_sim" +top: "layer_512_3_bn1_pcs_arm_sim" + +} +layer { +name: "layer_512_3_conv1" +type: "Convolution" +bottom: "layer_512_3_bn1_pcs_arm_sim" +top: "layer_512_3_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_3_bn2" +type: "BatchNorm" +bottom: "layer_512_3_conv1" +top: "layer_512_3_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_3_relu2" +type: "ReLU" +bottom: "layer_512_3_conv1_pcs_arm_sim" +top: "layer_512_3_conv1_pcs_arm_sim" + +} +layer { +name: "layer_512_3_conv2" +type: "Convolution" +bottom: "layer_512_3_conv1_pcs_arm_sim" +top: "layer_512_3_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_3_bn3" +type: "BatchNorm" +bottom: "layer_512_3_conv2" +top: "layer_512_3_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_3_relu3" +type: "ReLU" +bottom: "layer_512_3_conv2_pcs_arm_sim" +top: "layer_512_3_conv2_pcs_arm_sim" + +} +layer { +name: "layer_512_3_conv3" +type: "Convolution" +bottom: "layer_512_3_conv2_pcs_arm_sim" +top: "layer_512_3_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 2048 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_3_sum" +type: "Eltwise" +bottom: "layer_512_3_conv3" +bottom: "layer_512_2_sum" +top: "layer_512_3_sum" + +} +layer { +name: "last_bn" +type: "BatchNorm" +bottom: "layer_512_3_sum" +top: "layer_512_3_sum_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "last_relu" +type: "ReLU" +bottom: "layer_512_3_sum_pcs_arm_sim" +top: "layer_512_3_sum_pcs_arm_sim" + +} +layer { +name: "global_pool" +type: "Pooling" +bottom: "layer_512_3_sum_pcs_arm_sim" +top: "global_pool" +pooling_param { + pool: AVE + global_pooling: true +} + +} +layer { +name: "score" +type: "InnerProduct" +bottom: "global_pool" +top: "score" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +param { + lr_mult: 2.0 + decay_mult: 1.0 +} +inner_product_param { + num_output: 1000 +} + +} +layer { +name: "loss" +type: "SoftmaxWithLoss" +bottom: "score" +bottom: "label" +top: "loss" + +} +layer { +name: "accuracy" +type: "Accuracy" +bottom: "score" +bottom: "label" +top: "accuracy" +include { + phase: TEST +} + +} diff --git a/models/intel_optimized_models/multinode/resnet_50_256_nodes_8k_batch/solver.prototxt b/models/intel_optimized_models/multinode/resnet_50_256_nodes_8k_batch/solver.prototxt new file mode 100644 index 00000000000..8f03f6a3aac --- /dev/null +++ b/models/intel_optimized_models/multinode/resnet_50_256_nodes_8k_batch/solver.prototxt @@ -0,0 +1,19 @@ +net: "models/intel_optimized_models/multinode/resnet_50_256_nodes_8k_batch/train_val.prototxt" +test_iter: 1000 +test_interval: 156 +test_initialization: false +display: 40 +base_lr: 3.2 +lr_policy: "multistep" +stepvalue:4680 +stepvalue:9360 +stepvalue:12480 +gamma: 0.1 +max_iter: 14075 +warmup_iter: 780 # 1281167 / 8192 * 5 epochs +warmup_start_lr: 0.1 +momentum: 0.9 +weight_decay: 0.0001 +snapshot: 156 +snapshot_prefix: "models/intel_optimized_models/multinode/resnet_50_256_nodes_8k_batch/resnet_50_256_nodes_8k" +solver_mode: CPU diff --git a/models/intel_optimized_models/multinode/resnet_50_256_nodes_8k_batch/train_val.prototxt b/models/intel_optimized_models/multinode/resnet_50_256_nodes_8k_batch/train_val.prototxt new file mode 100644 index 00000000000..d98323ed690 --- /dev/null +++ b/models/intel_optimized_models/multinode/resnet_50_256_nodes_8k_batch/train_val.prototxt @@ -0,0 +1,3321 @@ +name: "ResNet-50" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + scale: 0.0078125 + mean_value: 104 + mean_value: 117 + mean_value: 123 + random_aspect_ratio_param { + min_area_ratio: 0.08 + max_area_ratio: 1 + aspect_ratio_change: 0.75 + resize_param { + interp_mode: CUBIC + } + } + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 32 + backend: LMDB + prefetch: 2 + shuffle: true + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 + scale: 0.0078125 + mean_value: 104 + mean_value: 117 + mean_value: 123 + random_resize_param { + min_size: 256 + max_size: 256 + resize_param { + interp_mode: CUBIC + } + } + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} + +layer { + bottom: "data" + top: "conv1" + name: "conv1" + type: "Convolution" + convolution_param { + num_output: 64 + kernel_size: 7 + pad: 3 + stride: 2 + weight_filler { + type: "msra" + variance_norm: FAN_OUT + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "conv1" + top: "conv1" + name: "bn_conv1" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "conv1" + top: "conv1" + name: "scale_conv1" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "conv1" + top: "conv1" + name: "conv1_relu" + type: "ReLU" + relu_param { + } +} + +layer { + bottom: "conv1" + top: "pool1" + name: "pool1" + type: "Pooling" + pooling_param { + kernel_size: 3 + stride: 2 + pool: MAX + } +} + +layer { + bottom: "pool1" + top: "res2a_branch1" + name: "res2a_branch1" + type: "Convolution" + convolution_param { + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2a_branch1" + top: "res2a_branch1" + name: "bn2a_branch1" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2a_branch1" + top: "res2a_branch1" + name: "scale2a_branch1" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "pool1" + top: "res2a_branch2a" + name: "res2a_branch2a" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2a_branch2a" + top: "res2a_branch2a" + name: "bn2a_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2a_branch2a" + top: "res2a_branch2a" + name: "scale2a_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2a_branch2a" + top: "res2a_branch2a" + name: "res2a_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2a_branch2a" + top: "res2a_branch2b" + name: "res2a_branch2b" + type: "Convolution" + convolution_param { + num_output: 64 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2a_branch2b" + top: "res2a_branch2b" + name: "bn2a_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2a_branch2b" + top: "res2a_branch2b" + name: "scale2a_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2a_branch2b" + top: "res2a_branch2b" + name: "res2a_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2a_branch2b" + top: "res2a_branch2c" + name: "res2a_branch2c" + type: "Convolution" + convolution_param { + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2a_branch2c" + top: "res2a_branch2c" + name: "bn2a_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2a_branch2c" + top: "res2a_branch2c" + name: "scale2a_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2a_branch1" + bottom: "res2a_branch2c" + top: "res2a" + name: "res2a" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res2a" + top: "res2a" + name: "res2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2a" + top: "res2b_branch2a" + name: "res2b_branch2a" + type: "Convolution" + convolution_param { + num_output: 64 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2b_branch2a" + top: "res2b_branch2a" + name: "bn2b_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2b_branch2a" + top: "res2b_branch2a" + name: "scale2b_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2b_branch2a" + top: "res2b_branch2a" + name: "res2b_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2b_branch2a" + top: "res2b_branch2b" + name: "res2b_branch2b" + type: "Convolution" + convolution_param { + num_output: 64 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2b_branch2b" + top: "res2b_branch2b" + name: "bn2b_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2b_branch2b" + top: "res2b_branch2b" + name: "scale2b_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2b_branch2b" + top: "res2b_branch2b" + name: "res2b_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2b_branch2b" + top: "res2b_branch2c" + name: "res2b_branch2c" + type: "Convolution" + convolution_param { + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2b_branch2c" + top: "res2b_branch2c" + name: "bn2b_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2b_branch2c" + top: "res2b_branch2c" + name: "scale2b_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2a" + bottom: "res2b_branch2c" + top: "res2b" + name: "res2b" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res2b" + top: "res2b" + name: "res2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2b" + top: "res2c_branch2a" + name: "res2c_branch2a" + type: "Convolution" + convolution_param { + + num_output: 64 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2c_branch2a" + top: "res2c_branch2a" + name: "bn2c_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2c_branch2a" + top: "res2c_branch2a" + name: "scale2c_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2c_branch2a" + top: "res2c_branch2a" + name: "res2c_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2c_branch2a" + top: "res2c_branch2b" + name: "res2c_branch2b" + type: "Convolution" + convolution_param { + num_output: 64 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2c_branch2b" + top: "res2c_branch2b" + name: "bn2c_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res2c_branch2b" + top: "res2c_branch2b" + name: "scale2c_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2c_branch2b" + top: "res2c_branch2b" + name: "res2c_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2c_branch2b" + top: "res2c_branch2c" + name: "res2c_branch2c" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res2c_branch2c" + top: "res2c_branch2c" + name: "bn2c_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 0 } + } +} + +layer { + bottom: "res2c_branch2c" + top: "res2c_branch2c" + name: "scale2c_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2b" + bottom: "res2c_branch2c" + top: "res2c" + name: "res2c" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res2c" + top: "res2c" + name: "res2c_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res2c" + top: "res3a_branch1" + name: "res3a_branch1" + type: "Convolution" + convolution_param { + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3a_branch1" + top: "res3a_branch1" + name: "bn3a_branch1" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3a_branch1" + top: "res3a_branch1" + name: "scale3a_branch1" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res2c" + top: "res3a_branch2a" + name: "res3a_branch2a" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3a_branch2a" + top: "res3a_branch2a" + name: "bn3a_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3a_branch2a" + top: "res3a_branch2a" + name: "scale3a_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3a_branch2a" + top: "res3a_branch2a" + name: "res3a_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3a_branch2a" + top: "res3a_branch2b" + name: "res3a_branch2b" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 3 + pad: 1 + stride: 2 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3a_branch2b" + top: "res3a_branch2b" + name: "bn3a_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3a_branch2b" + top: "res3a_branch2b" + name: "scale3a_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3a_branch2b" + top: "res3a_branch2b" + name: "res3a_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3a_branch2b" + top: "res3a_branch2c" + name: "res3a_branch2c" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3a_branch2c" + top: "res3a_branch2c" + name: "bn3a_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3a_branch2c" + top: "res3a_branch2c" + name: "scale3a_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3a_branch1" + bottom: "res3a_branch2c" + top: "res3a" + name: "res3a" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res3a" + top: "res3a" + name: "res3a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3a" + top: "res3b_branch2a" + name: "res3b_branch2a" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3b_branch2a" + top: "res3b_branch2a" + name: "bn3b_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3b_branch2a" + top: "res3b_branch2a" + name: "scale3b_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3b_branch2a" + top: "res3b_branch2a" + name: "res3b_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3b_branch2a" + top: "res3b_branch2b" + name: "res3b_branch2b" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3b_branch2b" + top: "res3b_branch2b" + name: "bn3b_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3b_branch2b" + top: "res3b_branch2b" + name: "scale3b_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3b_branch2b" + top: "res3b_branch2b" + name: "res3b_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3b_branch2b" + top: "res3b_branch2c" + name: "res3b_branch2c" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3b_branch2c" + top: "res3b_branch2c" + name: "bn3b_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3b_branch2c" + top: "res3b_branch2c" + name: "scale3b_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3a" + bottom: "res3b_branch2c" + top: "res3b" + name: "res3b" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res3b" + top: "res3b" + name: "res3b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3b" + top: "res3c_branch2a" + name: "res3c_branch2a" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3c_branch2a" + top: "res3c_branch2a" + name: "bn3c_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3c_branch2a" + top: "res3c_branch2a" + name: "scale3c_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3c_branch2a" + top: "res3c_branch2a" + name: "res3c_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3c_branch2a" + top: "res3c_branch2b" + name: "res3c_branch2b" + type: "Convolution" + convolution_param { + + num_output: 128 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3c_branch2b" + top: "res3c_branch2b" + name: "bn3c_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3c_branch2b" + top: "res3c_branch2b" + name: "scale3c_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3c_branch2b" + top: "res3c_branch2b" + name: "res3c_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3c_branch2b" + top: "res3c_branch2c" + name: "res3c_branch2c" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3c_branch2c" + top: "res3c_branch2c" + name: "bn3c_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3c_branch2c" + top: "res3c_branch2c" + name: "scale3c_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3b" + bottom: "res3c_branch2c" + top: "res3c" + name: "res3c" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res3c" + top: "res3c" + name: "res3c_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3c" + top: "res3d_branch2a" + name: "res3d_branch2a" + type: "Convolution" + convolution_param { + num_output: 128 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3d_branch2a" + top: "res3d_branch2a" + name: "bn3d_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3d_branch2a" + top: "res3d_branch2a" + name: "scale3d_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3d_branch2a" + top: "res3d_branch2a" + name: "res3d_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3d_branch2a" + top: "res3d_branch2b" + name: "res3d_branch2b" + type: "Convolution" + convolution_param { + num_output: 128 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3d_branch2b" + top: "res3d_branch2b" + name: "bn3d_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res3d_branch2b" + top: "res3d_branch2b" + name: "scale3d_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3d_branch2b" + top: "res3d_branch2b" + name: "res3d_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3d_branch2b" + top: "res3d_branch2c" + name: "res3d_branch2c" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res3d_branch2c" + top: "res3d_branch2c" + name: "bn3d_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 0 } + } +} + +layer { + bottom: "res3d_branch2c" + top: "res3d_branch2c" + name: "scale3d_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3c" + bottom: "res3d_branch2c" + top: "res3d" + name: "res3d" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res3d" + top: "res3d" + name: "res3d_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res3d" + top: "res4a_branch1" + name: "res4a_branch1" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4a_branch1" + top: "res4a_branch1" + name: "bn4a_branch1" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4a_branch1" + top: "res4a_branch1" + name: "scale4a_branch1" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res3d" + top: "res4a_branch2a" + name: "res4a_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4a_branch2a" + top: "res4a_branch2a" + name: "bn4a_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4a_branch2a" + top: "res4a_branch2a" + name: "scale4a_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4a_branch2a" + top: "res4a_branch2a" + name: "res4a_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4a_branch2a" + top: "res4a_branch2b" + name: "res4a_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 2 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4a_branch2b" + top: "res4a_branch2b" + name: "bn4a_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4a_branch2b" + top: "res4a_branch2b" + name: "scale4a_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4a_branch2b" + top: "res4a_branch2b" + name: "res4a_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4a_branch2b" + top: "res4a_branch2c" + name: "res4a_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4a_branch2c" + top: "res4a_branch2c" + name: "bn4a_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4a_branch2c" + top: "res4a_branch2c" + name: "scale4a_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4a_branch1" + bottom: "res4a_branch2c" + top: "res4a" + name: "res4a" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4a" + top: "res4a" + name: "res4a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4a" + top: "res4b_branch2a" + name: "res4b_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4b_branch2a" + top: "res4b_branch2a" + name: "bn4b_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4b_branch2a" + top: "res4b_branch2a" + name: "scale4b_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4b_branch2a" + top: "res4b_branch2a" + name: "res4b_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4b_branch2a" + top: "res4b_branch2b" + name: "res4b_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4b_branch2b" + top: "res4b_branch2b" + name: "bn4b_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4b_branch2b" + top: "res4b_branch2b" + name: "scale4b_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4b_branch2b" + top: "res4b_branch2b" + name: "res4b_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4b_branch2b" + top: "res4b_branch2c" + name: "res4b_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4b_branch2c" + top: "res4b_branch2c" + name: "bn4b_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4b_branch2c" + top: "res4b_branch2c" + name: "scale4b_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4a" + bottom: "res4b_branch2c" + top: "res4b" + name: "res4b" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4b" + top: "res4b" + name: "res4b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4b" + top: "res4c_branch2a" + name: "res4c_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4c_branch2a" + top: "res4c_branch2a" + name: "bn4c_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4c_branch2a" + top: "res4c_branch2a" + name: "scale4c_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4c_branch2a" + top: "res4c_branch2a" + name: "res4c_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4c_branch2a" + top: "res4c_branch2b" + name: "res4c_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4c_branch2b" + top: "res4c_branch2b" + name: "bn4c_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4c_branch2b" + top: "res4c_branch2b" + name: "scale4c_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4c_branch2b" + top: "res4c_branch2b" + name: "res4c_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4c_branch2b" + top: "res4c_branch2c" + name: "res4c_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4c_branch2c" + top: "res4c_branch2c" + name: "bn4c_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4c_branch2c" + top: "res4c_branch2c" + name: "scale4c_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4b" + bottom: "res4c_branch2c" + top: "res4c" + name: "res4c" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4c" + top: "res4c" + name: "res4c_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4c" + top: "res4d_branch2a" + name: "res4d_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4d_branch2a" + top: "res4d_branch2a" + name: "bn4d_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4d_branch2a" + top: "res4d_branch2a" + name: "scale4d_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4d_branch2a" + top: "res4d_branch2a" + name: "res4d_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4d_branch2a" + top: "res4d_branch2b" + name: "res4d_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4d_branch2b" + top: "res4d_branch2b" + name: "bn4d_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4d_branch2b" + top: "res4d_branch2b" + name: "scale4d_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4d_branch2b" + top: "res4d_branch2b" + name: "res4d_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4d_branch2b" + top: "res4d_branch2c" + name: "res4d_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4d_branch2c" + top: "res4d_branch2c" + name: "bn4d_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4d_branch2c" + top: "res4d_branch2c" + name: "scale4d_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4c" + bottom: "res4d_branch2c" + top: "res4d" + name: "res4d" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4d" + top: "res4d" + name: "res4d_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4d" + top: "res4e_branch2a" + name: "res4e_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4e_branch2a" + top: "res4e_branch2a" + name: "bn4e_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4e_branch2a" + top: "res4e_branch2a" + name: "scale4e_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4e_branch2a" + top: "res4e_branch2a" + name: "res4e_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4e_branch2a" + top: "res4e_branch2b" + name: "res4e_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4e_branch2b" + top: "res4e_branch2b" + name: "bn4e_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4e_branch2b" + top: "res4e_branch2b" + name: "scale4e_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4e_branch2b" + top: "res4e_branch2b" + name: "res4e_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4e_branch2b" + top: "res4e_branch2c" + name: "res4e_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4e_branch2c" + top: "res4e_branch2c" + name: "bn4e_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4e_branch2c" + top: "res4e_branch2c" + name: "scale4e_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4d" + bottom: "res4e_branch2c" + top: "res4e" + name: "res4e" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4e" + top: "res4e" + name: "res4e_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4e" + top: "res4f_branch2a" + name: "res4f_branch2a" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4f_branch2a" + top: "res4f_branch2a" + name: "bn4f_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4f_branch2a" + top: "res4f_branch2a" + name: "scale4f_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4f_branch2a" + top: "res4f_branch2a" + name: "res4f_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4f_branch2a" + top: "res4f_branch2b" + name: "res4f_branch2b" + type: "Convolution" + convolution_param { + + num_output: 256 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4f_branch2b" + top: "res4f_branch2b" + name: "bn4f_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res4f_branch2b" + top: "res4f_branch2b" + name: "scale4f_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4f_branch2b" + top: "res4f_branch2b" + name: "res4f_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4f_branch2b" + top: "res4f_branch2c" + name: "res4f_branch2c" + type: "Convolution" + convolution_param { + + num_output: 1024 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res4f_branch2c" + top: "res4f_branch2c" + name: "bn4f_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 0 } + } +} + +layer { + bottom: "res4f_branch2c" + top: "res4f_branch2c" + name: "scale4f_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4e" + bottom: "res4f_branch2c" + top: "res4f" + name: "res4f" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res4f" + top: "res4f" + name: "res4f_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res4f" + top: "res5a_branch1" + name: "res5a_branch1" + type: "Convolution" + convolution_param { + + num_output: 2048 + kernel_size: 1 + pad: 0 + stride: 2 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5a_branch1" + top: "res5a_branch1" + name: "bn5a_branch1" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5a_branch1" + top: "res5a_branch1" + name: "scale5a_branch1" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res4f" + top: "res5a_branch2a" + name: "res5a_branch2a" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5a_branch2a" + top: "res5a_branch2a" + name: "bn5a_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5a_branch2a" + top: "res5a_branch2a" + name: "scale5a_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5a_branch2a" + top: "res5a_branch2a" + name: "res5a_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5a_branch2a" + top: "res5a_branch2b" + name: "res5a_branch2b" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 3 + pad: 1 + stride: 2 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5a_branch2b" + top: "res5a_branch2b" + name: "bn5a_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5a_branch2b" + top: "res5a_branch2b" + name: "scale5a_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5a_branch2b" + top: "res5a_branch2b" + name: "res5a_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5a_branch2b" + top: "res5a_branch2c" + name: "res5a_branch2c" + type: "Convolution" + convolution_param { + + num_output: 2048 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5a_branch2c" + top: "res5a_branch2c" + name: "bn5a_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5a_branch2c" + top: "res5a_branch2c" + name: "scale5a_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5a_branch1" + bottom: "res5a_branch2c" + top: "res5a" + name: "res5a" + type: "Eltwise" + eltwise_param { + + } +} + +layer { + bottom: "res5a" + top: "res5a" + name: "res5a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5a" + top: "res5b_branch2a" + name: "res5b_branch2a" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5b_branch2a" + top: "res5b_branch2a" + name: "bn5b_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5b_branch2a" + top: "res5b_branch2a" + name: "scale5b_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5b_branch2a" + top: "res5b_branch2a" + name: "res5b_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5b_branch2a" + top: "res5b_branch2b" + name: "res5b_branch2b" + type: "Convolution" + convolution_param { + + num_output: 512 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5b_branch2b" + top: "res5b_branch2b" + name: "bn5b_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5b_branch2b" + top: "res5b_branch2b" + name: "scale5b_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5b_branch2b" + top: "res5b_branch2b" + name: "res5b_branch2b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5b_branch2b" + top: "res5b_branch2c" + name: "res5b_branch2c" + type: "Convolution" + convolution_param { + + num_output: 2048 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5b_branch2c" + top: "res5b_branch2c" + name: "bn5b_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5b_branch2c" + top: "res5b_branch2c" + name: "scale5b_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5a" + bottom: "res5b_branch2c" + top: "res5b" + name: "res5b" + type: "Eltwise" + eltwise_param { + } +} + +layer { + bottom: "res5b" + top: "res5b" + name: "res5b_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5b" + top: "res5c_branch2a" + name: "res5c_branch2a" + type: "Convolution" + convolution_param { + num_output: 512 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5c_branch2a" + top: "res5c_branch2a" + name: "bn5c_branch2a" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5c_branch2a" + top: "res5c_branch2a" + name: "scale5c_branch2a" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5c_branch2a" + top: "res5c_branch2a" + name: "res5c_branch2a_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5c_branch2a" + top: "res5c_branch2b" + name: "res5c_branch2b" + type: "Convolution" + convolution_param { + num_output: 512 + kernel_size: 3 + pad: 1 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5c_branch2b" + top: "res5c_branch2b" + name: "bn5c_branch2b" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 1 } + } +} + +layer { + bottom: "res5c_branch2b" + top: "res5c_branch2b" + name: "scale5c_branch2b" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5c_branch2b" + top: "res5c_branch2b" + name: "res5c_branch2b_relu" + type: "ReLU" + relu_param { + } +} + +layer { + bottom: "res5c_branch2b" + top: "res5c_branch2c" + name: "res5c_branch2c" + type: "Convolution" + convolution_param { + num_output: 2048 + kernel_size: 1 + pad: 0 + stride: 1 + bias_term: false + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "res5c_branch2c" + top: "res5c_branch2c" + name: "bn5c_branch2c" + type: "BatchNorm" + param { lr_mult: 0 } + param { lr_mult: 0 } + param { lr_mult: 0 } + batch_norm_param { + moving_average_fraction: 0.9 + filler { value: 0 } + } +} + +layer { + bottom: "res5c_branch2c" + top: "res5c_branch2c" + name: "scale5c_branch2c" + type: "Scale" + param { decay_mult: 0 } + param { decay_mult: 0 } + scale_param { + bias_term: true + } +} + +layer { + bottom: "res5b" + bottom: "res5c_branch2c" + top: "res5c" + name: "res5c" + type: "Eltwise" + eltwise_param { + } +} + +layer { + bottom: "res5c" + top: "res5c" + name: "res5c_relu" + type: "ReLU" + relu_param { + + } +} + +layer { + bottom: "res5c" + top: "pool5" + name: "pool5" + type: "Pooling" + pooling_param { + kernel_size: 7 + stride: 1 + pool: AVE + } +} + +layer { + bottom: "pool5" + top: "fc1000" + name: "fc1000" + type: "InnerProduct" + inner_product_param { + num_output: 1000 + weight_filler { + type: "gaussian" + std: 0.01 + } + bias_filler { + type: "constant" + value: 0 + } + } +} + +layer { + bottom: "fc1000" + bottom: "label" + top: "loss" + name: "prob" + type: "SoftmaxWithLoss" +} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "fc1000" + bottom: "label" + top: "loss3/top-1" +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "fc1000" + bottom: "label" + top: "loss3/top-5" + accuracy_param { + top_k: 5 + } +} diff --git a/models/intel_optimized_models/resnet_50/solver.prototxt b/models/intel_optimized_models/resnet_50/solver.prototxt new file mode 100644 index 00000000000..4574a306f71 --- /dev/null +++ b/models/intel_optimized_models/resnet_50/solver.prototxt @@ -0,0 +1,17 @@ +#This solver is described by Computer Vision Group Jena (CVGJ) in [ImageNet pre-trained models with batch normalization] (https://arxiv.org/pdf/1612.01452.pdf) +net: "models/intel_optimized_models/resnet_50/train_val.prototxt" +test_iter: 5000 +test_interval: 15000 +base_lr: 0.1 +display: 20 +iter_size: 2 +max_iter: 320000 +lr_policy: "poly" +power: 1 +momentum: 0.9 +weight_decay: 0.0001 +snapshot: 30000 +snapshot_prefix: "models/intel_optimized_models/resnet_50/caffe-resnet50" +test_initialization: false +solver_mode: CPU + diff --git a/models/intel_optimized_models/resnet_50/train_val.prototxt b/models/intel_optimized_models/resnet_50/train_val.prototxt new file mode 100644 index 00000000000..6aadf5ca51a --- /dev/null +++ b/models/intel_optimized_models/resnet_50/train_val.prototxt @@ -0,0 +1,2329 @@ +#This is Intel(R) optimized (in terms of time to train) version of topology described in the [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) publication. +# +#Top-5 and Top-1 results achieved with this topology: +#Top-5: 92% +#Top-1: 73.9% +#Training was performed using server equipped with Intel(R) Xeon Phi(TM) CPU 7250 processor. + +layer { +name: "data" +type: "Data" +top: "data" +top: "label" +include { + phase: TRAIN +} +transform_param { + scale: 0.0078125 + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 +} + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 128 + backend: LMDB + shuffle: true + } + +} +layer { +name: "data" +type: "Data" +top: "data" +top: "label" +include { + phase: TEST +} +transform_param { + scale: 0.0078125 + mirror: false + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 +} + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb/" + batch_size: 10 + backend: LMDB + } + +} + +layer { +name: "conv1" +type: "Convolution" +bottom: "data" +top: "conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +param { + lr_mult: 2.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + pad: 3 + kernel_size: 7 + stride: 2 + weight_filler { + type: "msra" + variance_norm: FAN_OUT + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "conv1_bn" +type: "BatchNorm" +bottom: "conv1" +top: "conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "conv1_relu" +type: "ReLU" +bottom: "conv1_pcs_arm_sim" +top: "conv1_pcs_arm_sim" + +} +layer { +name: "conv1_pool" +type: "Pooling" +bottom: "conv1_pcs_arm_sim" +top: "conv1_pool" +pooling_param { + kernel_size: 3 + stride: 2 +} + +} +layer { +name: "layer_64_1_conv1" +type: "Convolution" +bottom: "conv1_pool" +top: "layer_64_1_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_1_bn2" +type: "BatchNorm" +bottom: "layer_64_1_conv1" +top: "layer_64_1_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_1_relu2" +type: "ReLU" +bottom: "layer_64_1_conv1_pcs_arm_sim" +top: "layer_64_1_conv1_pcs_arm_sim" + +} +layer { +name: "layer_64_1_conv2" +type: "Convolution" +bottom: "layer_64_1_conv1_pcs_arm_sim" +top: "layer_64_1_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_1_bn3" +type: "BatchNorm" +bottom: "layer_64_1_conv2" +top: "layer_64_1_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_1_relu3" +type: "ReLU" +bottom: "layer_64_1_conv2_pcs_arm_sim" +top: "layer_64_1_conv2_pcs_arm_sim" + +} +layer { +name: "layer_64_1_conv3" +type: "Convolution" +bottom: "layer_64_1_conv2_pcs_arm_sim" +top: "layer_64_1_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_1_conv_expand" +type: "Convolution" +bottom: "layer_64_1_conv1_pcs_arm_sim" +top: "layer_64_1_conv_expand" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_1_sum" +type: "Eltwise" +bottom: "layer_64_1_conv3" +bottom: "layer_64_1_conv_expand" +top: "layer_64_1_sum" + +} +layer { +name: "layer_64_2_bn1" +type: "BatchNorm" +bottom: "layer_64_1_sum" +top: "layer_64_2_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_2_relu1" +type: "ReLU" +bottom: "layer_64_2_bn1_pcs_arm_sim" +top: "layer_64_2_bn1_pcs_arm_sim" + +} +layer { +name: "layer_64_2_conv1" +type: "Convolution" +bottom: "layer_64_2_bn1_pcs_arm_sim" +top: "layer_64_2_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_2_bn2" +type: "BatchNorm" +bottom: "layer_64_2_conv1" +top: "layer_64_2_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_2_relu2" +type: "ReLU" +bottom: "layer_64_2_conv1_pcs_arm_sim" +top: "layer_64_2_conv1_pcs_arm_sim" + +} +layer { +name: "layer_64_2_conv2" +type: "Convolution" +bottom: "layer_64_2_conv1_pcs_arm_sim" +top: "layer_64_2_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_2_bn3" +type: "BatchNorm" +bottom: "layer_64_2_conv2" +top: "layer_64_2_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_2_relu3" +type: "ReLU" +bottom: "layer_64_2_conv2_pcs_arm_sim" +top: "layer_64_2_conv2_pcs_arm_sim" + +} +layer { +name: "layer_64_2_conv3" +type: "Convolution" +bottom: "layer_64_2_conv2_pcs_arm_sim" +top: "layer_64_2_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_2_sum" +type: "Eltwise" +bottom: "layer_64_2_conv3" +bottom: "layer_64_1_sum" +top: "layer_64_2_sum" + +} +layer { +name: "layer_64_3_bn1" +type: "BatchNorm" +bottom: "layer_64_2_sum" +top: "layer_64_3_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_3_relu1" +type: "ReLU" +bottom: "layer_64_3_bn1_pcs_arm_sim" +top: "layer_64_3_bn1_pcs_arm_sim" + +} +layer { +name: "layer_64_3_conv1" +type: "Convolution" +bottom: "layer_64_3_bn1_pcs_arm_sim" +top: "layer_64_3_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_3_bn2" +type: "BatchNorm" +bottom: "layer_64_3_conv1" +top: "layer_64_3_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_3_relu2" +type: "ReLU" +bottom: "layer_64_3_conv1_pcs_arm_sim" +top: "layer_64_3_conv1_pcs_arm_sim" + +} +layer { +name: "layer_64_3_conv2" +type: "Convolution" +bottom: "layer_64_3_conv1_pcs_arm_sim" +top: "layer_64_3_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 64 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_3_bn3" +type: "BatchNorm" +bottom: "layer_64_3_conv2" +top: "layer_64_3_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_64_3_relu3" +type: "ReLU" +bottom: "layer_64_3_conv2_pcs_arm_sim" +top: "layer_64_3_conv2_pcs_arm_sim" + +} +layer { +name: "layer_64_3_conv3" +type: "Convolution" +bottom: "layer_64_3_conv2_pcs_arm_sim" +top: "layer_64_3_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_64_3_sum" +type: "Eltwise" +bottom: "layer_64_3_conv3" +bottom: "layer_64_2_sum" +top: "layer_64_3_sum" + +} +layer { +name: "layer_128_1_bn1" +type: "BatchNorm" +bottom: "layer_64_3_sum" +top: "layer_128_1_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_1_relu1" +type: "ReLU" +bottom: "layer_128_1_bn1_pcs_arm_sim" +top: "layer_128_1_bn1_pcs_arm_sim" + +} +layer { +name: "layer_128_1_conv1" +type: "Convolution" +bottom: "layer_128_1_bn1_pcs_arm_sim" +top: "layer_128_1_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_1_bn2" +type: "BatchNorm" +bottom: "layer_128_1_conv1" +top: "layer_128_1_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_1_relu2" +type: "ReLU" +bottom: "layer_128_1_conv1_pcs_arm_sim" +top: "layer_128_1_conv1_pcs_arm_sim" + +} +layer { +name: "layer_128_1_conv2" +type: "Convolution" +bottom: "layer_128_1_conv1_pcs_arm_sim" +top: "layer_128_1_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_1_bn3" +type: "BatchNorm" +bottom: "layer_128_1_conv2" +top: "layer_128_1_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_1_relu3" +type: "ReLU" +bottom: "layer_128_1_conv2_pcs_arm_sim" +top: "layer_128_1_conv2_pcs_arm_sim" + +} +layer { +name: "layer_128_1_conv3" +type: "Convolution" +bottom: "layer_128_1_conv2_pcs_arm_sim" +top: "layer_128_1_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_1_conv_expand" +type: "Convolution" +bottom: "layer_128_1_bn1_pcs_arm_sim" +top: "layer_128_1_conv_expand" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_1_sum" +type: "Eltwise" +bottom: "layer_128_1_conv3" +bottom: "layer_128_1_conv_expand" +top: "layer_128_1_sum" + +} +layer { +name: "layer_128_2_bn1" +type: "BatchNorm" +bottom: "layer_128_1_sum" +top: "layer_128_2_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_2_relu1" +type: "ReLU" +bottom: "layer_128_2_bn1_pcs_arm_sim" +top: "layer_128_2_bn1_pcs_arm_sim" + +} +layer { +name: "layer_128_2_conv1" +type: "Convolution" +bottom: "layer_128_2_bn1_pcs_arm_sim" +top: "layer_128_2_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_2_bn2" +type: "BatchNorm" +bottom: "layer_128_2_conv1" +top: "layer_128_2_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_2_relu2" +type: "ReLU" +bottom: "layer_128_2_conv1_pcs_arm_sim" +top: "layer_128_2_conv1_pcs_arm_sim" + +} +layer { +name: "layer_128_2_conv2" +type: "Convolution" +bottom: "layer_128_2_conv1_pcs_arm_sim" +top: "layer_128_2_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_2_bn3" +type: "BatchNorm" +bottom: "layer_128_2_conv2" +top: "layer_128_2_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_2_relu3" +type: "ReLU" +bottom: "layer_128_2_conv2_pcs_arm_sim" +top: "layer_128_2_conv2_pcs_arm_sim" + +} +layer { +name: "layer_128_2_conv3" +type: "Convolution" +bottom: "layer_128_2_conv2_pcs_arm_sim" +top: "layer_128_2_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_2_sum" +type: "Eltwise" +bottom: "layer_128_2_conv3" +bottom: "layer_128_1_sum" +top: "layer_128_2_sum" + +} +layer { +name: "layer_128_3_bn1" +type: "BatchNorm" +bottom: "layer_128_2_sum" +top: "layer_128_3_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_3_relu1" +type: "ReLU" +bottom: "layer_128_3_bn1_pcs_arm_sim" +top: "layer_128_3_bn1_pcs_arm_sim" + +} +layer { +name: "layer_128_3_conv1" +type: "Convolution" +bottom: "layer_128_3_bn1_pcs_arm_sim" +top: "layer_128_3_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_3_bn2" +type: "BatchNorm" +bottom: "layer_128_3_conv1" +top: "layer_128_3_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_3_relu2" +type: "ReLU" +bottom: "layer_128_3_conv1_pcs_arm_sim" +top: "layer_128_3_conv1_pcs_arm_sim" + +} +layer { +name: "layer_128_3_conv2" +type: "Convolution" +bottom: "layer_128_3_conv1_pcs_arm_sim" +top: "layer_128_3_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_3_bn3" +type: "BatchNorm" +bottom: "layer_128_3_conv2" +top: "layer_128_3_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_3_relu3" +type: "ReLU" +bottom: "layer_128_3_conv2_pcs_arm_sim" +top: "layer_128_3_conv2_pcs_arm_sim" + +} +layer { +name: "layer_128_3_conv3" +type: "Convolution" +bottom: "layer_128_3_conv2_pcs_arm_sim" +top: "layer_128_3_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_3_sum" +type: "Eltwise" +bottom: "layer_128_3_conv3" +bottom: "layer_128_2_sum" +top: "layer_128_3_sum" + +} +layer { +name: "layer_128_4_bn1" +type: "BatchNorm" +bottom: "layer_128_3_sum" +top: "layer_128_4_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_4_relu1" +type: "ReLU" +bottom: "layer_128_4_bn1_pcs_arm_sim" +top: "layer_128_4_bn1_pcs_arm_sim" + +} +layer { +name: "layer_128_4_conv1" +type: "Convolution" +bottom: "layer_128_4_bn1_pcs_arm_sim" +top: "layer_128_4_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_4_bn2" +type: "BatchNorm" +bottom: "layer_128_4_conv1" +top: "layer_128_4_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_4_relu2" +type: "ReLU" +bottom: "layer_128_4_conv1_pcs_arm_sim" +top: "layer_128_4_conv1_pcs_arm_sim" + +} +layer { +name: "layer_128_4_conv2" +type: "Convolution" +bottom: "layer_128_4_conv1_pcs_arm_sim" +top: "layer_128_4_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 128 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_4_bn3" +type: "BatchNorm" +bottom: "layer_128_4_conv2" +top: "layer_128_4_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_128_4_relu3" +type: "ReLU" +bottom: "layer_128_4_conv2_pcs_arm_sim" +top: "layer_128_4_conv2_pcs_arm_sim" + +} +layer { +name: "layer_128_4_conv3" +type: "Convolution" +bottom: "layer_128_4_conv2_pcs_arm_sim" +top: "layer_128_4_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_128_4_sum" +type: "Eltwise" +bottom: "layer_128_4_conv3" +bottom: "layer_128_3_sum" +top: "layer_128_4_sum" + +} +layer { +name: "layer_256_1_bn1" +type: "BatchNorm" +bottom: "layer_128_4_sum" +top: "layer_256_1_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_1_relu1" +type: "ReLU" +bottom: "layer_256_1_bn1_pcs_arm_sim" +top: "layer_256_1_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_1_conv1" +type: "Convolution" +bottom: "layer_256_1_bn1_pcs_arm_sim" +top: "layer_256_1_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_1_bn2" +type: "BatchNorm" +bottom: "layer_256_1_conv1" +top: "layer_256_1_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_1_relu2" +type: "ReLU" +bottom: "layer_256_1_conv1_pcs_arm_sim" +top: "layer_256_1_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_1_conv2" +type: "Convolution" +bottom: "layer_256_1_conv1_pcs_arm_sim" +top: "layer_256_1_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_1_bn3" +type: "BatchNorm" +bottom: "layer_256_1_conv2" +top: "layer_256_1_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_1_relu3" +type: "ReLU" +bottom: "layer_256_1_conv2_pcs_arm_sim" +top: "layer_256_1_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_1_conv3" +type: "Convolution" +bottom: "layer_256_1_conv2_pcs_arm_sim" +top: "layer_256_1_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_1_conv_expand" +type: "Convolution" +bottom: "layer_256_1_bn1_pcs_arm_sim" +top: "layer_256_1_conv_expand" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_1_sum" +type: "Eltwise" +bottom: "layer_256_1_conv3" +bottom: "layer_256_1_conv_expand" +top: "layer_256_1_sum" + +} +layer { +name: "layer_256_2_bn1" +type: "BatchNorm" +bottom: "layer_256_1_sum" +top: "layer_256_2_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_2_relu1" +type: "ReLU" +bottom: "layer_256_2_bn1_pcs_arm_sim" +top: "layer_256_2_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_2_conv1" +type: "Convolution" +bottom: "layer_256_2_bn1_pcs_arm_sim" +top: "layer_256_2_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_2_bn2" +type: "BatchNorm" +bottom: "layer_256_2_conv1" +top: "layer_256_2_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_2_relu2" +type: "ReLU" +bottom: "layer_256_2_conv1_pcs_arm_sim" +top: "layer_256_2_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_2_conv2" +type: "Convolution" +bottom: "layer_256_2_conv1_pcs_arm_sim" +top: "layer_256_2_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_2_bn3" +type: "BatchNorm" +bottom: "layer_256_2_conv2" +top: "layer_256_2_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_2_relu3" +type: "ReLU" +bottom: "layer_256_2_conv2_pcs_arm_sim" +top: "layer_256_2_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_2_conv3" +type: "Convolution" +bottom: "layer_256_2_conv2_pcs_arm_sim" +top: "layer_256_2_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_2_sum" +type: "Eltwise" +bottom: "layer_256_2_conv3" +bottom: "layer_256_1_sum" +top: "layer_256_2_sum" + +} +layer { +name: "layer_256_3_bn1" +type: "BatchNorm" +bottom: "layer_256_2_sum" +top: "layer_256_3_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_3_relu1" +type: "ReLU" +bottom: "layer_256_3_bn1_pcs_arm_sim" +top: "layer_256_3_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_3_conv1" +type: "Convolution" +bottom: "layer_256_3_bn1_pcs_arm_sim" +top: "layer_256_3_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_3_bn2" +type: "BatchNorm" +bottom: "layer_256_3_conv1" +top: "layer_256_3_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_3_relu2" +type: "ReLU" +bottom: "layer_256_3_conv1_pcs_arm_sim" +top: "layer_256_3_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_3_conv2" +type: "Convolution" +bottom: "layer_256_3_conv1_pcs_arm_sim" +top: "layer_256_3_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_3_bn3" +type: "BatchNorm" +bottom: "layer_256_3_conv2" +top: "layer_256_3_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_3_relu3" +type: "ReLU" +bottom: "layer_256_3_conv2_pcs_arm_sim" +top: "layer_256_3_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_3_conv3" +type: "Convolution" +bottom: "layer_256_3_conv2_pcs_arm_sim" +top: "layer_256_3_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_3_sum" +type: "Eltwise" +bottom: "layer_256_3_conv3" +bottom: "layer_256_2_sum" +top: "layer_256_3_sum" + +} +layer { +name: "layer_256_4_bn1" +type: "BatchNorm" +bottom: "layer_256_3_sum" +top: "layer_256_4_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_4_relu1" +type: "ReLU" +bottom: "layer_256_4_bn1_pcs_arm_sim" +top: "layer_256_4_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_4_conv1" +type: "Convolution" +bottom: "layer_256_4_bn1_pcs_arm_sim" +top: "layer_256_4_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_4_bn2" +type: "BatchNorm" +bottom: "layer_256_4_conv1" +top: "layer_256_4_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_4_relu2" +type: "ReLU" +bottom: "layer_256_4_conv1_pcs_arm_sim" +top: "layer_256_4_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_4_conv2" +type: "Convolution" +bottom: "layer_256_4_conv1_pcs_arm_sim" +top: "layer_256_4_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_4_bn3" +type: "BatchNorm" +bottom: "layer_256_4_conv2" +top: "layer_256_4_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_4_relu3" +type: "ReLU" +bottom: "layer_256_4_conv2_pcs_arm_sim" +top: "layer_256_4_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_4_conv3" +type: "Convolution" +bottom: "layer_256_4_conv2_pcs_arm_sim" +top: "layer_256_4_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_4_sum" +type: "Eltwise" +bottom: "layer_256_4_conv3" +bottom: "layer_256_3_sum" +top: "layer_256_4_sum" + +} +layer { +name: "layer_256_5_bn1" +type: "BatchNorm" +bottom: "layer_256_4_sum" +top: "layer_256_5_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_5_relu1" +type: "ReLU" +bottom: "layer_256_5_bn1_pcs_arm_sim" +top: "layer_256_5_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_5_conv1" +type: "Convolution" +bottom: "layer_256_5_bn1_pcs_arm_sim" +top: "layer_256_5_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_5_bn2" +type: "BatchNorm" +bottom: "layer_256_5_conv1" +top: "layer_256_5_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_5_relu2" +type: "ReLU" +bottom: "layer_256_5_conv1_pcs_arm_sim" +top: "layer_256_5_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_5_conv2" +type: "Convolution" +bottom: "layer_256_5_conv1_pcs_arm_sim" +top: "layer_256_5_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_5_bn3" +type: "BatchNorm" +bottom: "layer_256_5_conv2" +top: "layer_256_5_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_5_relu3" +type: "ReLU" +bottom: "layer_256_5_conv2_pcs_arm_sim" +top: "layer_256_5_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_5_conv3" +type: "Convolution" +bottom: "layer_256_5_conv2_pcs_arm_sim" +top: "layer_256_5_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_5_sum" +type: "Eltwise" +bottom: "layer_256_5_conv3" +bottom: "layer_256_4_sum" +top: "layer_256_5_sum" + +} +layer { +name: "layer_256_6_bn1" +type: "BatchNorm" +bottom: "layer_256_5_sum" +top: "layer_256_6_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_6_relu1" +type: "ReLU" +bottom: "layer_256_6_bn1_pcs_arm_sim" +top: "layer_256_6_bn1_pcs_arm_sim" + +} +layer { +name: "layer_256_6_conv1" +type: "Convolution" +bottom: "layer_256_6_bn1_pcs_arm_sim" +top: "layer_256_6_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_6_bn2" +type: "BatchNorm" +bottom: "layer_256_6_conv1" +top: "layer_256_6_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_6_relu2" +type: "ReLU" +bottom: "layer_256_6_conv1_pcs_arm_sim" +top: "layer_256_6_conv1_pcs_arm_sim" + +} +layer { +name: "layer_256_6_conv2" +type: "Convolution" +bottom: "layer_256_6_conv1_pcs_arm_sim" +top: "layer_256_6_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 256 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_6_bn3" +type: "BatchNorm" +bottom: "layer_256_6_conv2" +top: "layer_256_6_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_256_6_relu3" +type: "ReLU" +bottom: "layer_256_6_conv2_pcs_arm_sim" +top: "layer_256_6_conv2_pcs_arm_sim" + +} +layer { +name: "layer_256_6_conv3" +type: "Convolution" +bottom: "layer_256_6_conv2_pcs_arm_sim" +top: "layer_256_6_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 1024 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_256_6_sum" +type: "Eltwise" +bottom: "layer_256_6_conv3" +bottom: "layer_256_5_sum" +top: "layer_256_6_sum" + +} +layer { +name: "layer_512_1_bn1" +type: "BatchNorm" +bottom: "layer_256_6_sum" +top: "layer_512_1_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_1_relu1" +type: "ReLU" +bottom: "layer_512_1_bn1_pcs_arm_sim" +top: "layer_512_1_bn1_pcs_arm_sim" + +} +layer { +name: "layer_512_1_conv1" +type: "Convolution" +bottom: "layer_512_1_bn1_pcs_arm_sim" +top: "layer_512_1_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_1_bn2" +type: "BatchNorm" +bottom: "layer_512_1_conv1" +top: "layer_512_1_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_1_relu2" +type: "ReLU" +bottom: "layer_512_1_conv1_pcs_arm_sim" +top: "layer_512_1_conv1_pcs_arm_sim" + +} +layer { +name: "layer_512_1_conv2" +type: "Convolution" +bottom: "layer_512_1_conv1_pcs_arm_sim" +top: "layer_512_1_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_1_bn3" +type: "BatchNorm" +bottom: "layer_512_1_conv2" +top: "layer_512_1_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_1_relu3" +type: "ReLU" +bottom: "layer_512_1_conv2_pcs_arm_sim" +top: "layer_512_1_conv2_pcs_arm_sim" + +} +layer { +name: "layer_512_1_conv3" +type: "Convolution" +bottom: "layer_512_1_conv2_pcs_arm_sim" +top: "layer_512_1_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 2048 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_1_conv_expand" +type: "Convolution" +bottom: "layer_512_1_bn1_pcs_arm_sim" +top: "layer_512_1_conv_expand" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 2048 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 2 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_1_sum" +type: "Eltwise" +bottom: "layer_512_1_conv3" +bottom: "layer_512_1_conv_expand" +top: "layer_512_1_sum" + +} +layer { +name: "layer_512_2_bn1" +type: "BatchNorm" +bottom: "layer_512_1_sum" +top: "layer_512_2_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_2_relu1" +type: "ReLU" +bottom: "layer_512_2_bn1_pcs_arm_sim" +top: "layer_512_2_bn1_pcs_arm_sim" + +} +layer { +name: "layer_512_2_conv1" +type: "Convolution" +bottom: "layer_512_2_bn1_pcs_arm_sim" +top: "layer_512_2_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_2_bn2" +type: "BatchNorm" +bottom: "layer_512_2_conv1" +top: "layer_512_2_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_2_relu2" +type: "ReLU" +bottom: "layer_512_2_conv1_pcs_arm_sim" +top: "layer_512_2_conv1_pcs_arm_sim" + +} +layer { +name: "layer_512_2_conv2" +type: "Convolution" +bottom: "layer_512_2_conv1_pcs_arm_sim" +top: "layer_512_2_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_2_bn3" +type: "BatchNorm" +bottom: "layer_512_2_conv2" +top: "layer_512_2_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_2_relu3" +type: "ReLU" +bottom: "layer_512_2_conv2_pcs_arm_sim" +top: "layer_512_2_conv2_pcs_arm_sim" + +} +layer { +name: "layer_512_2_conv3" +type: "Convolution" +bottom: "layer_512_2_conv2_pcs_arm_sim" +top: "layer_512_2_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 2048 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_2_sum" +type: "Eltwise" +bottom: "layer_512_2_conv3" +bottom: "layer_512_1_sum" +top: "layer_512_2_sum" + +} +layer { +name: "layer_512_3_bn1" +type: "BatchNorm" +bottom: "layer_512_2_sum" +top: "layer_512_3_bn1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_3_relu1" +type: "ReLU" +bottom: "layer_512_3_bn1_pcs_arm_sim" +top: "layer_512_3_bn1_pcs_arm_sim" + +} +layer { +name: "layer_512_3_conv1" +type: "Convolution" +bottom: "layer_512_3_bn1_pcs_arm_sim" +top: "layer_512_3_conv1" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_3_bn2" +type: "BatchNorm" +bottom: "layer_512_3_conv1" +top: "layer_512_3_conv1_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_3_relu2" +type: "ReLU" +bottom: "layer_512_3_conv1_pcs_arm_sim" +top: "layer_512_3_conv1_pcs_arm_sim" + +} +layer { +name: "layer_512_3_conv2" +type: "Convolution" +bottom: "layer_512_3_conv1_pcs_arm_sim" +top: "layer_512_3_conv2" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 512 + bias_term: false + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_3_bn3" +type: "BatchNorm" +bottom: "layer_512_3_conv2" +top: "layer_512_3_conv2_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "layer_512_3_relu3" +type: "ReLU" +bottom: "layer_512_3_conv2_pcs_arm_sim" +top: "layer_512_3_conv2_pcs_arm_sim" + +} +layer { +name: "layer_512_3_conv3" +type: "Convolution" +bottom: "layer_512_3_conv2_pcs_arm_sim" +top: "layer_512_3_conv3" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +convolution_param { + num_output: 2048 + bias_term: false + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "msra" + } + bias_filler { + type: "constant" + value: 0.0 + } +} + +} +layer { +name: "layer_512_3_sum" +type: "Eltwise" +bottom: "layer_512_3_conv3" +bottom: "layer_512_2_sum" +top: "layer_512_3_sum" + +} +layer { +name: "last_bn" +type: "BatchNorm" +bottom: "layer_512_3_sum" +top: "layer_512_3_sum_pcs_arm_sim" + batch_norm_param { + } +} +layer { +name: "last_relu" +type: "ReLU" +bottom: "layer_512_3_sum_pcs_arm_sim" +top: "layer_512_3_sum_pcs_arm_sim" + +} +layer { +name: "global_pool" +type: "Pooling" +bottom: "layer_512_3_sum_pcs_arm_sim" +top: "global_pool" +pooling_param { + pool: AVE + global_pooling: true +} + +} +layer { +name: "score" +type: "InnerProduct" +bottom: "global_pool" +top: "score" +param { + lr_mult: 1.0 + decay_mult: 1.0 +} +param { + lr_mult: 2.0 + decay_mult: 1.0 +} +inner_product_param { + num_output: 1000 +} + +} +layer { +name: "loss" +type: "SoftmaxWithLoss" +bottom: "score" +bottom: "label" +top: "loss" + +} +#layer { +#name: "accuracy" +#type: "Accuracy" +#bottom: "score" +#bottom: "label" +#top: "accuracy" +#include { +# phase: TEST +#} +layer { + name: "loss3/top-1" + type: "Accuracy" + bottom: "score" + bottom: "label" + top: "loss3/top-1" + include { + phase: TEST + } +} +layer { + name: "loss3/top-5" + type: "Accuracy" + bottom: "score" + bottom: "label" + top: "loss3/top-5" + include { + phase: TEST + } + accuracy_param { + top_k: 5 + } +} + +#} diff --git a/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/deploy.prototxt b/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/deploy.prototxt new file mode 100644 index 00000000000..d37915e2689 --- /dev/null +++ b/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/deploy.prototxt @@ -0,0 +1,1423 @@ +name: "AlexNet_VOC0712_SSD_300x300_deploy" +input: "data" +input_shape { + dim: 1 + dim: 3 + dim: 300 + dim: 300 +} +layer { + engine: "MKL2017" + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + engine: "MKL2017" + name: "norm1" + type: "LRN" + bottom: "conv1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + k: 2 + } +} +layer { + engine: "MKL2017" + name: "pool1" + type: "Pooling" + bottom: "norm1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + engine: "MKL2017" + name: "norm2" + type: "LRN" + bottom: "conv2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + k: 2 + } +} +layer { + engine: "MKL2017" + name: "pool2" + type: "Pooling" + bottom: "norm2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv3" + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + engine: "MKL2017" + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + engine: "MKL2017" + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + engine: "MKL2017" + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + engine: "MKL2017" + name: "fc6_conv" + type: "Convolution" + bottom: "pool5" + top: "fc6_conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 5 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 5 + } +} +layer { + engine: "MKL2017" + name: "relu6" + type: "ReLU" + bottom: "fc6_conv" + top: "fc6_conv" +} +layer { + engine: "MKL2017" + name: "fc7_conv" + type: "Convolution" + bottom: "fc6_conv" + top: "fc7_conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu7" + type: "ReLU" + bottom: "fc7_conv" + top: "fc7_conv" +} +layer { + engine: "MKL2017" + name: "conv6_1" + type: "Convolution" + bottom: "fc7_conv" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + engine: "MKL2017" + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + engine: "MKL2017" + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + engine: "MKL2017" + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + engine: "MKL2017" + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + engine: "MKL2017" + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + engine: "MKL2017" + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + engine: "MKL2017" + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv2_norm" + type: "Normalize" + bottom: "conv2" + top: "conv2_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv2_norm_mbox_loc" + type: "Convolution" + bottom: "conv2_norm" + top: "conv2_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv2_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv2_norm_mbox_loc" + top: "conv2_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv2_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv2_norm_mbox_loc_perm" + top: "conv2_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv2_norm_mbox_conf" + type: "Convolution" + bottom: "conv2_norm" + top: "conv2_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv2_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv2_norm_mbox_conf" + top: "conv2_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv2_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv2_norm_mbox_conf_perm" + top: "conv2_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv2_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv2_norm" + bottom: "data" + top: "conv2_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "fc6_conv_mbox_loc" + type: "Convolution" + bottom: "fc6_conv" + top: "fc6_conv_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc6_conv_mbox_loc_perm" + type: "Permute" + bottom: "fc6_conv_mbox_loc" + top: "fc6_conv_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc6_conv_mbox_loc_flat" + type: "Flatten" + bottom: "fc6_conv_mbox_loc_perm" + top: "fc6_conv_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc6_conv_mbox_conf" + type: "Convolution" + bottom: "fc6_conv" + top: "fc6_conv_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc6_conv_mbox_conf_perm" + type: "Permute" + bottom: "fc6_conv_mbox_conf" + top: "fc6_conv_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc6_conv_mbox_conf_flat" + type: "Flatten" + bottom: "fc6_conv_mbox_conf_perm" + top: "fc6_conv_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc6_conv_mbox_priorbox" + type: "PriorBox" + bottom: "fc6_conv" + bottom: "data" + top: "fc6_conv_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv2_norm_mbox_loc_flat" + bottom: "fc6_conv_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv2_norm_mbox_conf_flat" + bottom: "fc6_conv_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv2_norm_mbox_priorbox" + bottom: "fc6_conv_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf_reshape" + type: "Reshape" + bottom: "mbox_conf" + top: "mbox_conf_reshape" + reshape_param { + shape { + dim: 0 + dim: -1 + dim: 21 + } + } +} +layer { + name: "mbox_conf_softmax" + type: "Softmax" + bottom: "mbox_conf_reshape" + top: "mbox_conf_softmax" + softmax_param { + axis: 2 + } +} +layer { + name: "mbox_conf_flatten" + type: "Flatten" + bottom: "mbox_conf_softmax" + top: "mbox_conf_flatten" + flatten_param { + axis: 1 + } +} +layer { + name: "detection_out" + type: "DetectionOutput" + bottom: "mbox_loc" + bottom: "mbox_conf_flatten" + bottom: "mbox_priorbox" + top: "detection_out" + include { + phase: TEST + } + detection_output_param { + num_classes: 21 + share_location: true + background_label_id: 0 + nms_param { + nms_threshold: 0.45 + top_k: 400 + } + save_output_param { + output_directory: "data/ssd_out/VOC2007/SSD_300x300" + output_name_prefix: "comp4_det_test_" + output_format: "VOC" + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + name_size_file: "data/VOC0712/test_name_size.txt" + num_test_image: 4952 + } + code_type: CENTER_SIZE + keep_top_k: 200 + confidence_threshold: 0.01 + } +} + diff --git a/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/solver.prototxt b/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/solver.prototxt new file mode 100644 index 00000000000..2d0ce34656b --- /dev/null +++ b/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/solver.prototxt @@ -0,0 +1,27 @@ +train_net: "examples/ssd/AlexNet/VOC0712/SSD_300x300/train.prototxt" +test_net: "examples/ssd/AlexNet/VOC0712/SSD_300x300/test.prototxt" +test_iter: 619 +test_interval: 10000 +base_lr: 0.00025 +display: 10 +max_iter: 120000 +lr_policy: "multistep" +gamma: 0.1 +momentum: 0.9 +weight_decay: 0.0005 +snapshot: 80000 +snapshot_prefix: "examples/ssd/AlexNet/VOC0712/SSD_300x300/AlexNet_VOC0712_SSD_300x300" +solver_mode: CPU +device_id: 0 +debug_info: false +snapshot_after_train: true +test_initialization: false +average_loss: 10 +stepvalue: 80000 +stepvalue: 100000 +stepvalue: 120000 +iter_size: 1 +type: "SGD" +eval_type: "detection" +ap_version: "11point" + diff --git a/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/test.prototxt b/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/test.prototxt new file mode 100644 index 00000000000..0eac716a082 --- /dev/null +++ b/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/test.prototxt @@ -0,0 +1,1464 @@ +name: "AlexNet_VOC0712_SSD_300x300_test" +layer { + name: "data" + type: "AnnotatedData" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mean_value: 104 + mean_value: 117 + mean_value: 123 + resize_param { + prob: 1 + resize_mode: WARP + height: 300 + width: 300 + interp_mode: LINEAR + } + } + data_param { + source: "examples/VOC0712/VOC0712_test_lmdb" + batch_size: 8 + backend: LMDB + } + annotated_data_param { + batch_sampler { + } + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + } +} +layer { + engine: "MKL2017" + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + engine: "MKL2017" + name: "norm1" + type: "LRN" + bottom: "conv1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + k: 2 + } +} +layer { + engine: "MKL2017" + name: "pool1" + type: "Pooling" + bottom: "norm1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + engine: "MKL2017" + name: "norm2" + type: "LRN" + bottom: "conv2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + k: 2 + } +} +layer { + engine: "MKL2017" + name: "pool2" + type: "Pooling" + bottom: "norm2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv3" + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + engine: "MKL2017" + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + engine: "MKL2017" + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + engine: "MKL2017" + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + engine: "MKL2017" + name: "fc6_conv" + type: "Convolution" + bottom: "pool5" + top: "fc6_conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 5 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 5 + } +} +layer { + engine: "MKL2017" + name: "relu6" + type: "ReLU" + bottom: "fc6_conv" + top: "fc6_conv" +} +layer { + engine: "MKL2017" + name: "fc7_conv" + type: "Convolution" + bottom: "fc6_conv" + top: "fc7_conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu7" + type: "ReLU" + bottom: "fc7_conv" + top: "fc7_conv" +} +layer { + engine: "MKL2017" + name: "conv6_1" + type: "Convolution" + bottom: "fc7_conv" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + engine: "MKL2017" + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + engine: "MKL2017" + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + engine: "MKL2017" + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + engine: "MKL2017" + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + engine: "MKL2017" + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + engine: "MKL2017" + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + engine: "MKL2017" + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv2_norm" + type: "Normalize" + bottom: "conv2" + top: "conv2_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv2_norm_mbox_loc" + type: "Convolution" + bottom: "conv2_norm" + top: "conv2_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv2_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv2_norm_mbox_loc" + top: "conv2_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv2_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv2_norm_mbox_loc_perm" + top: "conv2_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv2_norm_mbox_conf" + type: "Convolution" + bottom: "conv2_norm" + top: "conv2_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv2_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv2_norm_mbox_conf" + top: "conv2_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv2_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv2_norm_mbox_conf_perm" + top: "conv2_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv2_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv2_norm" + bottom: "data" + top: "conv2_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "fc6_conv_mbox_loc" + type: "Convolution" + bottom: "fc6_conv" + top: "fc6_conv_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc6_conv_mbox_loc_perm" + type: "Permute" + bottom: "fc6_conv_mbox_loc" + top: "fc6_conv_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc6_conv_mbox_loc_flat" + type: "Flatten" + bottom: "fc6_conv_mbox_loc_perm" + top: "fc6_conv_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc6_conv_mbox_conf" + type: "Convolution" + bottom: "fc6_conv" + top: "fc6_conv_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc6_conv_mbox_conf_perm" + type: "Permute" + bottom: "fc6_conv_mbox_conf" + top: "fc6_conv_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc6_conv_mbox_conf_flat" + type: "Flatten" + bottom: "fc6_conv_mbox_conf_perm" + top: "fc6_conv_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc6_conv_mbox_priorbox" + type: "PriorBox" + bottom: "fc6_conv" + bottom: "data" + top: "fc6_conv_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv2_norm_mbox_loc_flat" + bottom: "fc6_conv_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv2_norm_mbox_conf_flat" + bottom: "fc6_conv_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv2_norm_mbox_priorbox" + bottom: "fc6_conv_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf_reshape" + type: "Reshape" + bottom: "mbox_conf" + top: "mbox_conf_reshape" + reshape_param { + shape { + dim: 0 + dim: -1 + dim: 21 + } + } +} +layer { + name: "mbox_conf_softmax" + type: "Softmax" + bottom: "mbox_conf_reshape" + top: "mbox_conf_softmax" + softmax_param { + axis: 2 + } +} +layer { + name: "mbox_conf_flatten" + type: "Flatten" + bottom: "mbox_conf_softmax" + top: "mbox_conf_flatten" + flatten_param { + axis: 1 + } +} +layer { + name: "detection_out" + type: "DetectionOutput" + bottom: "mbox_loc" + bottom: "mbox_conf_flatten" + bottom: "mbox_priorbox" + top: "detection_out" + include { + phase: TEST + } + detection_output_param { + num_classes: 21 + share_location: true + background_label_id: 0 + nms_param { + nms_threshold: 0.45 + top_k: 400 + } + save_output_param { + output_directory: "data/ssd_out/VOC2007/SSD_300x300" + output_name_prefix: "comp4_det_test_" + output_format: "VOC" + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + name_size_file: "data/VOC0712/test_name_size.txt" + num_test_image: 4952 + } + code_type: CENTER_SIZE + keep_top_k: 200 + confidence_threshold: 0.01 + } +} +layer { + name: "detection_eval" + type: "DetectionEvaluate" + bottom: "detection_out" + bottom: "label" + top: "detection_eval" + include { + phase: TEST + } + detection_evaluate_param { + num_classes: 21 + background_label_id: 0 + overlap_threshold: 0.5 + evaluate_difficult_gt: false + name_size_file: "data/VOC0712/test_name_size.txt" + } +} + diff --git a/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/train.prototxt b/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/train.prototxt new file mode 100644 index 00000000000..d8bac9b7f86 --- /dev/null +++ b/models/intel_optimized_models/ssd/AlexNet/VOC0712/SSD_300x300/train.prototxt @@ -0,0 +1,1526 @@ +name: "AlexNet_VOC0712_SSD_300x300_train" +layer { + name: "data" + type: "AnnotatedData" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + mean_value: 104 + mean_value: 117 + mean_value: 123 + resize_param { + prob: 1 + resize_mode: WARP + height: 300 + width: 300 + interp_mode: LINEAR + interp_mode: AREA + interp_mode: NEAREST + interp_mode: CUBIC + interp_mode: LANCZOS4 + } + emit_constraint { + emit_type: CENTER + } + distort_param { + brightness_prob: 0.5 + brightness_delta: 32 + contrast_prob: 0.5 + contrast_lower: 0.5 + contrast_upper: 1.5 + hue_prob: 0.5 + hue_delta: 18 + saturation_prob: 0.5 + saturation_lower: 0.5 + saturation_upper: 1.5 + random_order_prob: 0.0 + } + expand_param { + prob: 0.5 + max_expand_ratio: 4.0 + } + } + data_param { + source: "examples/VOC0712/VOC0712_trainval_lmdb" + batch_size: 256 + backend: LMDB + } + annotated_data_param { + batch_sampler { + max_sample: 1 + max_trials: 1 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.1 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.3 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.5 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.7 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.9 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + max_jaccard_overlap: 1.0 + } + max_sample: 1 + max_trials: 50 + } + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + } +} +layer { + engine: "MKL2017" + name: "conv1" + type: "Convolution" + bottom: "data" + top: "conv1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 96 + kernel_size: 11 + stride: 4 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu1" + type: "ReLU" + bottom: "conv1" + top: "conv1" +} +layer { + engine: "MKL2017" + name: "norm1" + type: "LRN" + bottom: "conv1" + top: "norm1" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + k: 2 + } +} +layer { + engine: "MKL2017" + name: "pool1" + type: "Pooling" + bottom: "norm1" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv2" + type: "Convolution" + bottom: "pool1" + top: "conv2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 2 + kernel_size: 5 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu2" + type: "ReLU" + bottom: "conv2" + top: "conv2" +} +layer { + engine: "MKL2017" + name: "norm2" + type: "LRN" + bottom: "conv2" + top: "norm2" + lrn_param { + local_size: 5 + alpha: 0.0001 + beta: 0.75 + k: 2 + } +} +layer { + engine: "MKL2017" + name: "pool2" + type: "Pooling" + bottom: "norm2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv3" + type: "Convolution" + bottom: "pool2" + top: "conv3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu3" + type: "ReLU" + bottom: "conv3" + top: "conv3" +} +layer { + engine: "MKL2017" + name: "conv4" + type: "Convolution" + bottom: "conv3" + top: "conv4" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 384 + pad: 1 + kernel_size: 3 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu4" + type: "ReLU" + bottom: "conv4" + top: "conv4" +} +layer { + engine: "MKL2017" + name: "conv5" + type: "Convolution" + bottom: "conv4" + top: "conv5" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + group: 2 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu5" + type: "ReLU" + bottom: "conv5" + top: "conv5" +} +layer { + engine: "MKL2017" + name: "pool5" + type: "Pooling" + bottom: "conv5" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + engine: "MKL2017" + name: "fc6_conv" + type: "Convolution" + bottom: "pool5" + top: "fc6_conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 5 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 5 + } +} +layer { + engine: "MKL2017" + name: "relu6" + type: "ReLU" + bottom: "fc6_conv" + top: "fc6_conv" +} +layer { + engine: "MKL2017" + name: "fc7_conv" + type: "Convolution" + bottom: "fc6_conv" + top: "fc7_conv" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu7" + type: "ReLU" + bottom: "fc7_conv" + top: "fc7_conv" +} +layer { + engine: "MKL2017" + name: "conv6_1" + type: "Convolution" + bottom: "fc7_conv" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + engine: "MKL2017" + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + engine: "MKL2017" + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + engine: "MKL2017" + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + engine: "MKL2017" + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + engine: "MKL2017" + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + engine: "MKL2017" + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + engine: "MKL2017" + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv2_norm" + type: "Normalize" + bottom: "conv2" + top: "conv2_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv2_norm_mbox_loc" + type: "Convolution" + bottom: "conv2_norm" + top: "conv2_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv2_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv2_norm_mbox_loc" + top: "conv2_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv2_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv2_norm_mbox_loc_perm" + top: "conv2_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv2_norm_mbox_conf" + type: "Convolution" + bottom: "conv2_norm" + top: "conv2_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv2_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv2_norm_mbox_conf" + top: "conv2_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv2_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv2_norm_mbox_conf_perm" + top: "conv2_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv2_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv2_norm" + bottom: "data" + top: "conv2_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "fc6_conv_mbox_loc" + type: "Convolution" + bottom: "fc6_conv" + top: "fc6_conv_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc6_conv_mbox_loc_perm" + type: "Permute" + bottom: "fc6_conv_mbox_loc" + top: "fc6_conv_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc6_conv_mbox_loc_flat" + type: "Flatten" + bottom: "fc6_conv_mbox_loc_perm" + top: "fc6_conv_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc6_conv_mbox_conf" + type: "Convolution" + bottom: "fc6_conv" + top: "fc6_conv_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc6_conv_mbox_conf_perm" + type: "Permute" + bottom: "fc6_conv_mbox_conf" + top: "fc6_conv_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc6_conv_mbox_conf_flat" + type: "Flatten" + bottom: "fc6_conv_mbox_conf_perm" + top: "fc6_conv_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc6_conv_mbox_priorbox" + type: "PriorBox" + bottom: "fc6_conv" + bottom: "data" + top: "fc6_conv_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv2_norm_mbox_loc_flat" + bottom: "fc6_conv_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv2_norm_mbox_conf_flat" + bottom: "fc6_conv_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv2_norm_mbox_priorbox" + bottom: "fc6_conv_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + } + engine: "CAFFE" +} +layer { + name: "mbox_loss" + type: "MultiBoxLoss" + bottom: "mbox_loc" + bottom: "mbox_conf" + bottom: "mbox_priorbox" + bottom: "label" + top: "mbox_loss" + include { + phase: TRAIN + } + propagate_down: true + propagate_down: true + propagate_down: false + propagate_down: false + loss_param { + normalization: VALID + } + multibox_loss_param { + loc_loss_type: SMOOTH_L1 + conf_loss_type: SOFTMAX + loc_weight: 1.0 + num_classes: 21 + share_location: true + match_type: PER_PREDICTION + overlap_threshold: 0.5 + use_prior_for_matching: true + background_label_id: 0 + use_difficult_gt: true + neg_pos_ratio: 3.0 + neg_overlap: 0.5 + code_type: CENTER_SIZE + ignore_cross_boundary_bbox: false + mining_type: MAX_NEGATIVE + } +} + diff --git a/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy.prototxt b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy.prototxt new file mode 100644 index 00000000000..dfe14b5161e --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy.prototxt @@ -0,0 +1,1632 @@ +name: "VGG_VOC0712_SSD_300x300_deploy" +input: "data" +input_shape { + dim: 1 + dim: 3 + dim: 300 + dim: 300 +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf_reshape" + type: "Reshape" + bottom: "mbox_conf" + top: "mbox_conf_reshape" + reshape_param { + shape { + dim: 0 + dim: -1 + dim: 21 + } + } +} +layer { + name: "mbox_conf_softmax" + type: "Softmax" + bottom: "mbox_conf_reshape" + top: "mbox_conf_softmax" + softmax_param { + axis: 2 + } +} +layer { + name: "mbox_conf_flatten" + type: "Flatten" + bottom: "mbox_conf_softmax" + top: "mbox_conf_flatten" + flatten_param { + axis: 1 + } +} +layer { + name: "detection_out" + type: "DetectionOutput" + bottom: "mbox_loc" + bottom: "mbox_conf_flatten" + bottom: "mbox_priorbox" + top: "detection_out" + include { + phase: TEST + } + detection_output_param { + num_classes: 21 + share_location: true + background_label_id: 0 + nms_param { + nms_threshold: 0.45 + top_k: 400 + } + save_output_param { + output_directory: "data/ssd_out/VOC2007/SSD_300x300" + output_name_prefix: "comp4_det_test_" + output_format: "VOC" + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + name_size_file: "data/VOC0712/test_name_size.txt" + num_test_image: 4952 + } + code_type: CENTER_SIZE + keep_top_k: 200 + confidence_threshold: 0.01 + } +} + diff --git a/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy_mkl2017.prototxt b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy_mkl2017.prototxt new file mode 100644 index 00000000000..6d03b44f3b6 --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy_mkl2017.prototxt @@ -0,0 +1,1629 @@ +name: "VGG_VOC0712_SSD_300x300_deploy" +input: "data" +input_shape { + dim: 1 + dim: 3 + dim: 300 + dim: 300 +} +layer { + engine: "MKL2017" + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + engine: "MKL2017" + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + engine: "MKL2017" + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + engine: "MKL2017" + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + engine: "MKL2017" + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + engine: "MKL2017" + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + engine: "MKL2017" + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + engine: "MKL2017" + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + engine: "MKL2017" + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + engine: "MKL2017" + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + engine: "MKL2017" + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + engine: "MKL2017" + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + engine: "MKL2017" + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + engine: "MKL2017" + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + engine: "MKL2017" + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + engine: "MKL2017" + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + engine: "MKL2017" + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + engine: "MKL2017" + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + engine: "MKL2017" + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + engine: "MKL2017" + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + engine: "MKL2017" + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + engine: "MKL2017" + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + engine: "MKL2017" + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + engine: "MKL2017" + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + engine: "MKL2017" + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + engine: "MKL2017" + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + engine: "MKL2017" + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + engine: "MKL2017" + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + engine: "MKL2017" + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKL2017" + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + engine: "CAFFE" +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + engine: "CAFFE" +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + engine: "CAFFE" +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + engine: "MKL2017" + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKL2017" + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + } + engine: "CAFFE" +} diff --git a/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy_mkldnn.prototxt b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy_mkldnn.prototxt new file mode 100644 index 00000000000..7d34d1aa44e --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/deploy_mkldnn.prototxt @@ -0,0 +1,1629 @@ +name: "VGG_VOC0712_SSD_300x300_deploy" +input: "data" +input_shape { + dim: 1 + dim: 3 + dim: 300 + dim: 300 +} +layer { + engine: "MKLDNN" + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + engine: "MKLDNN" + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + engine: "MKLDNN" + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + engine: "MKLDNN" + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + engine: "MKLDNN" + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + engine: "MKLDNN" + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + engine: "MKLDNN" + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + engine: "MKLDNN" + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + engine: "MKLDNN" + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + engine: "MKLDNN" + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + engine: "MKLDNN" + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + engine: "MKLDNN" + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + engine: "MKLDNN" + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + engine: "MKLDNN" + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + engine: "MKLDNN" + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + engine: "MKLDNN" + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + engine: "MKLDNN" + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + engine: "MKLDNN" + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + engine: "MKLDNN" + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + engine: "MKLDNN" + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + engine: "MKLDNN" + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + engine: "MKLDNN" + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + engine: "MKLDNN" + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + engine: "MKLDNN" + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + engine: "MKLDNN" + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + engine: "MKLDNN" + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + engine: "MKLDNN" + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + engine: "MKLDNN" + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + engine: "MKLDNN" + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + engine: "MKLDNN" + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + engine: "MKLDNN" + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + engine: "MKLDNN" + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + engine: "MKLDNN" + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + engine: "CAFFE" +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + engine: "MKLDNN" + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + engine: "CAFFE" +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + engine: "MKLDNN" + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } + engine: "CAFFE" +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKLDNN" + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + engine: "MKLDNN" + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKLDNN" + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + engine: "MKLDNN" + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + } + engine: "CAFFE" +} diff --git a/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/solver.prototxt b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/solver.prototxt new file mode 100644 index 00000000000..c33a9558c05 --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/solver.prototxt @@ -0,0 +1,28 @@ +train_net: "models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/train.prototxt" +test_net: "models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/test.prototxt" +test_iter: 619 +test_interval: 10000 +base_lr: 0.001 +display: 10 +max_iter: 120000 +lr_policy: "multistep" +gamma: 0.1 +momentum: 0.9 +weight_decay: 0.0005 +snapshot: 80000 +snapshot_prefix: "models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/VGG_VOC0712_SSD_300x300" +solver_mode: CPU +device_id: 0 +debug_info: false +snapshot_after_train: true +test_initialization: false +average_loss: 10 +stepvalue: 80000 +stepvalue: 100000 +stepvalue: 120000 +iter_size: 1 +type: "SGD" +eval_type: "detection" +ap_version: "11point" + + diff --git a/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/test.prototxt b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/test.prototxt new file mode 100644 index 00000000000..38152e56a63 --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/test.prototxt @@ -0,0 +1,1673 @@ +name: "VGG_VOC0712_SSD_300x300_test" +layer { + name: "data" + type: "AnnotatedData" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mean_value: 104 + mean_value: 117 + mean_value: 123 + resize_param { + prob: 1 + resize_mode: WARP + height: 300 + width: 300 + interp_mode: LINEAR + } + } + data_param { + source: "examples/VOC0712/VOC0712_test_lmdb" + batch_size: 8 + backend: LMDB + } + annotated_data_param { + batch_sampler { + } + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + } +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf_reshape" + type: "Reshape" + bottom: "mbox_conf" + top: "mbox_conf_reshape" + reshape_param { + shape { + dim: 0 + dim: -1 + dim: 21 + } + } +} +layer { + name: "mbox_conf_softmax" + type: "Softmax" + bottom: "mbox_conf_reshape" + top: "mbox_conf_softmax" + softmax_param { + axis: 2 + } +} +layer { + name: "mbox_conf_flatten" + type: "Flatten" + bottom: "mbox_conf_softmax" + top: "mbox_conf_flatten" + flatten_param { + axis: 1 + } +} +layer { + name: "detection_out" + type: "DetectionOutput" + bottom: "mbox_loc" + bottom: "mbox_conf_flatten" + bottom: "mbox_priorbox" + top: "detection_out" + include { + phase: TEST + } + detection_output_param { + num_classes: 21 + share_location: true + background_label_id: 0 + nms_param { + nms_threshold: 0.45 + top_k: 400 + } + save_output_param { + output_directory: "data/ssd_out/VOC2007/SSD_300x300" + output_name_prefix: "comp4_det_test_" + output_format: "VOC" + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + name_size_file: "data/VOC0712/test_name_size.txt" + num_test_image: 4952 + } + code_type: CENTER_SIZE + keep_top_k: 200 + confidence_threshold: 0.01 + } +} +layer { + name: "detection_eval" + type: "DetectionEvaluate" + bottom: "detection_out" + bottom: "label" + top: "detection_eval" + include { + phase: TEST + } + detection_evaluate_param { + num_classes: 21 + background_label_id: 0 + overlap_threshold: 0.5 + evaluate_difficult_gt: false + name_size_file: "data/VOC0712/test_name_size.txt" + } +} + diff --git a/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/train.prototxt b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/train.prototxt new file mode 100644 index 00000000000..451a5bad913 --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300/train.prototxt @@ -0,0 +1,1735 @@ +name: "VGG_VOC0712_SSD_300x300_train" +layer { + name: "data" + type: "AnnotatedData" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + mean_value: 104 + mean_value: 117 + mean_value: 123 + resize_param { + prob: 1 + resize_mode: WARP + height: 300 + width: 300 + interp_mode: LINEAR + interp_mode: AREA + interp_mode: NEAREST + interp_mode: CUBIC + interp_mode: LANCZOS4 + } + emit_constraint { + emit_type: CENTER + } + distort_param { + brightness_prob: 0.5 + brightness_delta: 32 + contrast_prob: 0.5 + contrast_lower: 0.5 + contrast_upper: 1.5 + hue_prob: 0.5 + hue_delta: 18 + saturation_prob: 0.5 + saturation_lower: 0.5 + saturation_upper: 1.5 + random_order_prob: 0.0 + } + expand_param { + prob: 0.5 + max_expand_ratio: 4.0 + } + } + data_param { + source: "examples/VOC0712/VOC0712_trainval_lmdb" + batch_size: 32 + backend: LMDB + } + annotated_data_param { + batch_sampler { + max_sample: 1 + max_trials: 1 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.1 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.3 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.5 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.7 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.9 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + max_jaccard_overlap: 1.0 + } + max_sample: 1 + max_trials: 50 + } + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + } +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + } + engine: "CAFFE" +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + } + engine: "CAFFE" +} +layer { + name: "mbox_loss" + type: "MultiBoxLoss" + bottom: "mbox_loc" + bottom: "mbox_conf" + bottom: "mbox_priorbox" + bottom: "label" + top: "mbox_loss" + include { + phase: TRAIN + } + propagate_down: true + propagate_down: true + propagate_down: false + propagate_down: false + loss_param { + normalization: VALID + } + multibox_loss_param { + loc_loss_type: SMOOTH_L1 + conf_loss_type: SOFTMAX + loc_weight: 1.0 + num_classes: 21 + share_location: true + match_type: PER_PREDICTION + overlap_threshold: 0.5 + use_prior_for_matching: true + background_label_id: 0 + use_difficult_gt: true + neg_pos_ratio: 3.0 + neg_overlap: 0.5 + code_type: CENTER_SIZE + ignore_cross_boundary_bbox: false + mining_type: MAX_NEGATIVE + } +} + diff --git a/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300_webcam/test.prototxt b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300_webcam/test.prototxt new file mode 100644 index 00000000000..eaff4c56e38 --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/VOC0712/SSD_300x300_webcam/test.prototxt @@ -0,0 +1,1668 @@ +name: "VGG_VOC0712_SSD_300x300_test" +layer { + name: "data" + type: "VideoData" + top: "data" + transform_param { + mean_value: 104 + mean_value: 117 + mean_value: 123 + resize_param { + prob: 1 + resize_mode: WARP + height: 300 + width: 300 + interp_mode: LINEAR + } + } + data_param { + batch_size: 1 + } + video_data_param { + video_type: WEBCAM + device_id: 0 + skip_frames: 0 + } +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 126 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 84 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + engine: CAFFE + } +} +layer { + name: "mbox_conf_reshape" + type: "Reshape" + bottom: "mbox_conf" + top: "mbox_conf_reshape" + reshape_param { + shape { + dim: 0 + dim: -1 + dim: 21 + } + } +} +layer { + name: "mbox_conf_softmax" + type: "Softmax" + bottom: "mbox_conf_reshape" + top: "mbox_conf_softmax" + softmax_param { + axis: 2 + } +} +layer { + name: "mbox_conf_flatten" + type: "Flatten" + bottom: "mbox_conf_softmax" + top: "mbox_conf_flatten" + flatten_param { + axis: 1 + } +} +layer { + name: "detection_out" + type: "DetectionOutput" + bottom: "mbox_loc" + bottom: "mbox_conf_flatten" + bottom: "mbox_priorbox" + bottom: "data" + top: "detection_out" + include { + phase: TEST + } + transform_param { + mean_value: 104 + mean_value: 117 + mean_value: 123 + resize_param { + prob: 1 + resize_mode: WARP + height: 720 + width: 960 + interp_mode: LINEAR + } + } + detection_output_param { + num_classes: 21 + share_location: true + background_label_id: 0 + nms_param { + nms_threshold: 0.45 + top_k: 400 + } + save_output_param { + label_map_file: "data/VOC0712/labelmap_voc.prototxt" + } + code_type: CENTER_SIZE + keep_top_k: 200 + confidence_threshold: 0.01 + visualize: true + visualize_threshold: 0.6 + } +} +layer { + name: "slience" + type: "Silence" + bottom: "detection_out" + include { + phase: TEST + } +} + diff --git a/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/deploy.prototxt b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/deploy.prototxt new file mode 100644 index 00000000000..fb56a59f1b5 --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/deploy.prototxt @@ -0,0 +1,1632 @@ +name: "VGG_coco_SSD_300x300_deploy" +input: "data" +input_shape { + dim: 1 + dim: 3 + dim: 300 + dim: 300 +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 21.0 + max_size: 45.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 45.0 + max_size: 99.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 99.0 + max_size: 153.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 153.0 + max_size: 207.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 207.0 + max_size: 261.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 261.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + engine: CAFFE + } +} +layer { + name: "mbox_conf_reshape" + type: "Reshape" + bottom: "mbox_conf" + top: "mbox_conf_reshape" + reshape_param { + shape { + dim: 0 + dim: -1 + dim: 81 + } + } +} +layer { + name: "mbox_conf_softmax" + type: "Softmax" + bottom: "mbox_conf_reshape" + top: "mbox_conf_softmax" + softmax_param { + axis: 2 + } +} +layer { + name: "mbox_conf_flatten" + type: "Flatten" + bottom: "mbox_conf_softmax" + top: "mbox_conf_flatten" + flatten_param { + axis: 1 + } +} +layer { + name: "detection_out" + type: "DetectionOutput" + bottom: "mbox_loc" + bottom: "mbox_conf_flatten" + bottom: "mbox_priorbox" + top: "detection_out" + include { + phase: TEST + } + detection_output_param { + num_classes: 81 + share_location: true + background_label_id: 0 + nms_param { + nms_threshold: 0.45 + top_k: 400 + } + save_output_param { + output_directory: "data/ssd_out/mscoco/SSD_300x300" + output_name_prefix: "detections_minival_ssd300_results" + output_format: "COCO" + label_map_file: "data/coco/labelmap_coco.prototxt" + name_size_file: "data/coco/minival2014_name_size.txt" + num_test_image: 5000 + } + code_type: CENTER_SIZE + keep_top_k: 200 + confidence_threshold: 0.01 + } +} + diff --git a/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/solver.prototxt b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/solver.prototxt new file mode 100644 index 00000000000..5f07abcc42d --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/solver.prototxt @@ -0,0 +1,27 @@ +train_net: "models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/train.prototxt" +test_net: "models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/test.prototxt" +test_iter: 625 +test_interval: 10000 +base_lr: 0.001 +display: 10 +max_iter: 400000 +lr_policy: "multistep" +gamma: 0.1 +momentum: 0.9 +weight_decay: 0.0005 +snapshot: 40000 +snapshot_prefix: "models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/VGG_coco_SSD_300x300" +solver_mode: CPU +device_id: 0 +debug_info: false +snapshot_after_train: true +test_initialization: false +average_loss: 10 +stepvalue: 280000 +stepvalue: 360000 +stepvalue: 400000 +iter_size: 1 +type: "SGD" +eval_type: "detection" +ap_version: "11point" + diff --git a/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/test.prototxt b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/test.prototxt new file mode 100644 index 00000000000..97e83c7f0cf --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/test.prototxt @@ -0,0 +1,1674 @@ +name: "VGG_coco_SSD_300x300_test" +layer { + name: "data" + type: "AnnotatedData" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mean_value: 104 + mean_value: 117 + mean_value: 123 + force_color: true + resize_param { + prob: 1 + resize_mode: WARP + height: 300 + width: 300 + interp_mode: LINEAR + } + } + data_param { + source: "examples/coco/coco_minival_lmdb" + batch_size: 8 + backend: LMDB + } + annotated_data_param { + batch_sampler { + } + label_map_file: "data/coco/labelmap_coco.prototxt" + } +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 21.0 + max_size: 45.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 45.0 + max_size: 99.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 99.0 + max_size: 153.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 153.0 + max_size: 207.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 207.0 + max_size: 261.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 261.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + engine: CAFFE + } +} +layer { + name: "mbox_conf_reshape" + type: "Reshape" + bottom: "mbox_conf" + top: "mbox_conf_reshape" + reshape_param { + shape { + dim: 0 + dim: -1 + dim: 81 + } + } +} +layer { + name: "mbox_conf_softmax" + type: "Softmax" + bottom: "mbox_conf_reshape" + top: "mbox_conf_softmax" + softmax_param { + axis: 2 + } +} +layer { + name: "mbox_conf_flatten" + type: "Flatten" + bottom: "mbox_conf_softmax" + top: "mbox_conf_flatten" + flatten_param { + axis: 1 + } +} +layer { + name: "detection_out" + type: "DetectionOutput" + bottom: "mbox_loc" + bottom: "mbox_conf_flatten" + bottom: "mbox_priorbox" + top: "detection_out" + include { + phase: TEST + } + detection_output_param { + num_classes: 81 + share_location: true + background_label_id: 0 + nms_param { + nms_threshold: 0.45 + top_k: 400 + } + save_output_param { + output_directory: "data/ssd_out/mscoco/SSD_300x300" + output_name_prefix: "detections_minival_ssd300_results" + output_format: "COCO" + label_map_file: "data/coco/labelmap_coco.prototxt" + name_size_file: "data/coco/minival2014_name_size.txt" + num_test_image: 5000 + } + code_type: CENTER_SIZE + keep_top_k: 200 + confidence_threshold: 0.01 + } +} +layer { + name: "detection_eval" + type: "DetectionEvaluate" + bottom: "detection_out" + bottom: "label" + top: "detection_eval" + include { + phase: TEST + } + detection_evaluate_param { + num_classes: 81 + background_label_id: 0 + overlap_threshold: 0.5 + evaluate_difficult_gt: false + name_size_file: "data/coco/minival2014_name_size.txt" + } +} + diff --git a/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/train.prototxt b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/train.prototxt new file mode 100644 index 00000000000..ef92f1da947 --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300/train.prototxt @@ -0,0 +1,1736 @@ +name: "VGG_coco_SSD_300x300_train" +layer { + name: "data" + type: "AnnotatedData" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + mean_value: 104 + mean_value: 117 + mean_value: 123 + force_color: true + resize_param { + prob: 1 + resize_mode: WARP + height: 300 + width: 300 + interp_mode: LINEAR + interp_mode: AREA + interp_mode: NEAREST + interp_mode: CUBIC + interp_mode: LANCZOS4 + } + emit_constraint { + emit_type: CENTER + } + distort_param { + brightness_prob: 0.5 + brightness_delta: 32 + contrast_prob: 0.5 + contrast_lower: 0.5 + contrast_upper: 1.5 + hue_prob: 0.5 + hue_delta: 18 + saturation_prob: 0.5 + saturation_lower: 0.5 + saturation_upper: 1.5 + random_order_prob: 0.0 + } + expand_param { + prob: 0.5 + max_expand_ratio: 4.0 + } + } + data_param { + source: "examples/coco/coco_train_lmdb" + batch_size: 32 + backend: LMDB + } + annotated_data_param { + batch_sampler { + max_sample: 1 + max_trials: 1 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.1 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.3 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.5 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.7 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + min_jaccard_overlap: 0.9 + } + max_sample: 1 + max_trials: 50 + } + batch_sampler { + sampler { + min_scale: 0.3 + max_scale: 1.0 + min_aspect_ratio: 0.5 + max_aspect_ratio: 2.0 + } + sample_constraint { + max_jaccard_overlap: 1.0 + } + max_sample: 1 + max_trials: 50 + } + label_map_file: "data/coco/labelmap_coco.prototxt" + } +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 21.0 + max_size: 45.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 45.0 + max_size: 99.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 99.0 + max_size: 153.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 153.0 + max_size: 207.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 207.0 + max_size: 261.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 261.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + engine: CAFFE + } +} +layer { + name: "mbox_loss" + type: "MultiBoxLoss" + bottom: "mbox_loc" + bottom: "mbox_conf" + bottom: "mbox_priorbox" + bottom: "label" + top: "mbox_loss" + include { + phase: TRAIN + } + propagate_down: true + propagate_down: true + propagate_down: false + propagate_down: false + loss_param { + normalization: VALID + } + multibox_loss_param { + loc_loss_type: SMOOTH_L1 + conf_loss_type: SOFTMAX + loc_weight: 1.0 + num_classes: 81 + share_location: true + match_type: PER_PREDICTION + overlap_threshold: 0.5 + use_prior_for_matching: true + background_label_id: 0 + use_difficult_gt: false + neg_pos_ratio: 3.0 + neg_overlap: 0.5 + code_type: CENTER_SIZE + ignore_cross_boundary_bbox: false + mining_type: MAX_NEGATIVE + } +} + diff --git a/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300_webcam/test.prototxt b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300_webcam/test.prototxt new file mode 100644 index 00000000000..c78dcccb38f --- /dev/null +++ b/models/intel_optimized_models/ssd/VGGNet/coco/SSD_300x300_webcam/test.prototxt @@ -0,0 +1,1668 @@ +name: "VGG_coco_SSD_300x300_test" +layer { + name: "data" + type: "VideoData" + top: "data" + transform_param { + mean_value: 104 + mean_value: 117 + mean_value: 123 + resize_param { + prob: 1 + resize_mode: WARP + height: 300 + width: 300 + interp_mode: LINEAR + } + } + data_param { + batch_size: 1 + } + video_data_param { + video_type: WEBCAM + device_id: 0 + skip_frames: 0 + } +} +layer { + name: "conv1_1" + type: "Convolution" + bottom: "data" + top: "conv1_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_1" + type: "ReLU" + bottom: "conv1_1" + top: "conv1_1" +} +layer { + name: "conv1_2" + type: "Convolution" + bottom: "conv1_1" + top: "conv1_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 64 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu1_2" + type: "ReLU" + bottom: "conv1_2" + top: "conv1_2" +} +layer { + name: "pool1" + type: "Pooling" + bottom: "conv1_2" + top: "pool1" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv2_1" + type: "Convolution" + bottom: "pool1" + top: "conv2_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_1" + type: "ReLU" + bottom: "conv2_1" + top: "conv2_1" +} +layer { + name: "conv2_2" + type: "Convolution" + bottom: "conv2_1" + top: "conv2_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu2_2" + type: "ReLU" + bottom: "conv2_2" + top: "conv2_2" +} +layer { + name: "pool2" + type: "Pooling" + bottom: "conv2_2" + top: "pool2" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv3_1" + type: "Convolution" + bottom: "pool2" + top: "conv3_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_1" + type: "ReLU" + bottom: "conv3_1" + top: "conv3_1" +} +layer { + name: "conv3_2" + type: "Convolution" + bottom: "conv3_1" + top: "conv3_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_2" + type: "ReLU" + bottom: "conv3_2" + top: "conv3_2" +} +layer { + name: "conv3_3" + type: "Convolution" + bottom: "conv3_2" + top: "conv3_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu3_3" + type: "ReLU" + bottom: "conv3_3" + top: "conv3_3" +} +layer { + name: "pool3" + type: "Pooling" + bottom: "conv3_3" + top: "pool3" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv4_1" + type: "Convolution" + bottom: "pool3" + top: "conv4_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_1" + type: "ReLU" + bottom: "conv4_1" + top: "conv4_1" +} +layer { + name: "conv4_2" + type: "Convolution" + bottom: "conv4_1" + top: "conv4_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_2" + type: "ReLU" + bottom: "conv4_2" + top: "conv4_2" +} +layer { + name: "conv4_3" + type: "Convolution" + bottom: "conv4_2" + top: "conv4_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu4_3" + type: "ReLU" + bottom: "conv4_3" + top: "conv4_3" +} +layer { + name: "pool4" + type: "Pooling" + bottom: "conv4_3" + top: "pool4" + pooling_param { + pool: MAX + kernel_size: 2 + stride: 2 + } +} +layer { + name: "conv5_1" + type: "Convolution" + bottom: "pool4" + top: "conv5_1" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_1" + type: "ReLU" + bottom: "conv5_1" + top: "conv5_1" +} +layer { + name: "conv5_2" + type: "Convolution" + bottom: "conv5_1" + top: "conv5_2" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_2" + type: "ReLU" + bottom: "conv5_2" + top: "conv5_2" +} +layer { + name: "conv5_3" + type: "Convolution" + bottom: "conv5_2" + top: "conv5_3" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 1 + } +} +layer { + name: "relu5_3" + type: "ReLU" + bottom: "conv5_3" + top: "conv5_3" +} +layer { + name: "pool5" + type: "Pooling" + bottom: "conv5_3" + top: "pool5" + pooling_param { + pool: MAX + kernel_size: 3 + stride: 1 + pad: 1 + } +} +layer { + name: "fc6" + type: "Convolution" + bottom: "pool5" + top: "fc6" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + pad: 6 + kernel_size: 3 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + dilation: 6 + } +} +layer { + name: "relu6" + type: "ReLU" + bottom: "fc6" + top: "fc6" +} +layer { + name: "fc7" + type: "Convolution" + bottom: "fc6" + top: "fc7" + param { + lr_mult: 1 + decay_mult: 1 + } + param { + lr_mult: 2 + decay_mult: 0 + } + convolution_param { + num_output: 1024 + kernel_size: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "relu7" + type: "ReLU" + bottom: "fc7" + top: "fc7" +} +layer { + name: "conv6_1" + type: "Convolution" + bottom: "fc7" + top: "conv6_1" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_1_relu" + type: "ReLU" + bottom: "conv6_1" + top: "conv6_1" +} +layer { + name: "conv6_2" + type: "Convolution" + bottom: "conv6_1" + top: "conv6_2" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 512 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_relu" + type: "ReLU" + bottom: "conv6_2" + top: "conv6_2" +} +layer { + name: "conv7_1" + type: "Convolution" + bottom: "conv6_2" + top: "conv7_1" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_1_relu" + type: "ReLU" + bottom: "conv7_1" + top: "conv7_1" +} +layer { + name: "conv7_2" + type: "Convolution" + bottom: "conv7_1" + top: "conv7_2" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 1 + kernel_size: 3 + stride: 2 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_relu" + type: "ReLU" + bottom: "conv7_2" + top: "conv7_2" +} +layer { + name: "conv8_1" + type: "Convolution" + bottom: "conv7_2" + top: "conv8_1" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_1_relu" + type: "ReLU" + bottom: "conv8_1" + top: "conv8_1" +} +layer { + name: "conv8_2" + type: "Convolution" + bottom: "conv8_1" + top: "conv8_2" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_relu" + type: "ReLU" + bottom: "conv8_2" + top: "conv8_2" +} +layer { + name: "conv9_1" + type: "Convolution" + bottom: "conv8_2" + top: "conv9_1" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 128 + pad: 0 + kernel_size: 1 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_1_relu" + type: "ReLU" + bottom: "conv9_1" + top: "conv9_1" +} +layer { + name: "conv9_2" + type: "Convolution" + bottom: "conv9_1" + top: "conv9_2" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 256 + pad: 0 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_relu" + type: "ReLU" + bottom: "conv9_2" + top: "conv9_2" +} +layer { + name: "conv4_3_norm" + type: "Normalize" + bottom: "conv4_3" + top: "conv4_3_norm" + norm_param { + across_spatial: false + scale_filler { + type: "constant" + value: 20 + } + channel_shared: false + } +} +layer { + name: "conv4_3_norm_mbox_loc" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_loc_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_loc" + top: "conv4_3_norm_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_loc_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_loc_perm" + top: "conv4_3_norm_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf" + type: "Convolution" + bottom: "conv4_3_norm" + top: "conv4_3_norm_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv4_3_norm_mbox_conf_perm" + type: "Permute" + bottom: "conv4_3_norm_mbox_conf" + top: "conv4_3_norm_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv4_3_norm_mbox_conf_flat" + type: "Flatten" + bottom: "conv4_3_norm_mbox_conf_perm" + top: "conv4_3_norm_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv4_3_norm_mbox_priorbox" + type: "PriorBox" + bottom: "conv4_3_norm" + bottom: "data" + top: "conv4_3_norm_mbox_priorbox" + prior_box_param { + min_size: 30.0 + max_size: 60.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 8 + offset: 0.5 + } +} +layer { + name: "fc7_mbox_loc" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_loc_perm" + type: "Permute" + bottom: "fc7_mbox_loc" + top: "fc7_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_loc_flat" + type: "Flatten" + bottom: "fc7_mbox_loc_perm" + top: "fc7_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_conf" + type: "Convolution" + bottom: "fc7" + top: "fc7_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "fc7_mbox_conf_perm" + type: "Permute" + bottom: "fc7_mbox_conf" + top: "fc7_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "fc7_mbox_conf_flat" + type: "Flatten" + bottom: "fc7_mbox_conf_perm" + top: "fc7_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "fc7_mbox_priorbox" + type: "PriorBox" + bottom: "fc7" + bottom: "data" + top: "fc7_mbox_priorbox" + prior_box_param { + min_size: 60.0 + max_size: 111.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 16 + offset: 0.5 + } +} +layer { + name: "conv6_2_mbox_loc" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_loc_perm" + type: "Permute" + bottom: "conv6_2_mbox_loc" + top: "conv6_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv6_2_mbox_loc_perm" + top: "conv6_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_conf" + type: "Convolution" + bottom: "conv6_2" + top: "conv6_2_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv6_2_mbox_conf_perm" + type: "Permute" + bottom: "conv6_2_mbox_conf" + top: "conv6_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv6_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv6_2_mbox_conf_perm" + top: "conv6_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv6_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv6_2" + bottom: "data" + top: "conv6_2_mbox_priorbox" + prior_box_param { + min_size: 111.0 + max_size: 162.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 32 + offset: 0.5 + } +} +layer { + name: "conv7_2_mbox_loc" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 24 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_loc_perm" + type: "Permute" + bottom: "conv7_2_mbox_loc" + top: "conv7_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv7_2_mbox_loc_perm" + top: "conv7_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_conf" + type: "Convolution" + bottom: "conv7_2" + top: "conv7_2_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 486 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv7_2_mbox_conf_perm" + type: "Permute" + bottom: "conv7_2_mbox_conf" + top: "conv7_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv7_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv7_2_mbox_conf_perm" + top: "conv7_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv7_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv7_2" + bottom: "data" + top: "conv7_2_mbox_priorbox" + prior_box_param { + min_size: 162.0 + max_size: 213.0 + aspect_ratio: 2 + aspect_ratio: 3 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 64 + offset: 0.5 + } +} +layer { + name: "conv8_2_mbox_loc" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_loc_perm" + type: "Permute" + bottom: "conv8_2_mbox_loc" + top: "conv8_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv8_2_mbox_loc_perm" + top: "conv8_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_conf" + type: "Convolution" + bottom: "conv8_2" + top: "conv8_2_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv8_2_mbox_conf_perm" + type: "Permute" + bottom: "conv8_2_mbox_conf" + top: "conv8_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv8_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv8_2_mbox_conf_perm" + top: "conv8_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv8_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv8_2" + bottom: "data" + top: "conv8_2_mbox_priorbox" + prior_box_param { + min_size: 213.0 + max_size: 264.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 100 + offset: 0.5 + } +} +layer { + name: "conv9_2_mbox_loc" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_loc" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 16 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_loc_perm" + type: "Permute" + bottom: "conv9_2_mbox_loc" + top: "conv9_2_mbox_loc_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_loc_flat" + type: "Flatten" + bottom: "conv9_2_mbox_loc_perm" + top: "conv9_2_mbox_loc_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_conf" + type: "Convolution" + bottom: "conv9_2" + top: "conv9_2_mbox_conf" + param { + lr_mult: 1.0 + decay_mult: 1 + } + param { + lr_mult: 2.0 + decay_mult: 0 + } + convolution_param { + num_output: 324 + pad: 1 + kernel_size: 3 + stride: 1 + weight_filler { + type: "xavier" + } + bias_filler { + type: "constant" + value: 0 + } + } +} +layer { + name: "conv9_2_mbox_conf_perm" + type: "Permute" + bottom: "conv9_2_mbox_conf" + top: "conv9_2_mbox_conf_perm" + permute_param { + order: 0 + order: 2 + order: 3 + order: 1 + } +} +layer { + name: "conv9_2_mbox_conf_flat" + type: "Flatten" + bottom: "conv9_2_mbox_conf_perm" + top: "conv9_2_mbox_conf_flat" + flatten_param { + axis: 1 + } +} +layer { + name: "conv9_2_mbox_priorbox" + type: "PriorBox" + bottom: "conv9_2" + bottom: "data" + top: "conv9_2_mbox_priorbox" + prior_box_param { + min_size: 264.0 + max_size: 315.0 + aspect_ratio: 2 + flip: true + clip: false + variance: 0.1 + variance: 0.1 + variance: 0.2 + variance: 0.2 + step: 300 + offset: 0.5 + } +} +layer { + name: "mbox_loc" + type: "Concat" + bottom: "conv4_3_norm_mbox_loc_flat" + bottom: "fc7_mbox_loc_flat" + bottom: "conv6_2_mbox_loc_flat" + bottom: "conv7_2_mbox_loc_flat" + bottom: "conv8_2_mbox_loc_flat" + bottom: "conv9_2_mbox_loc_flat" + top: "mbox_loc" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_conf" + type: "Concat" + bottom: "conv4_3_norm_mbox_conf_flat" + bottom: "fc7_mbox_conf_flat" + bottom: "conv6_2_mbox_conf_flat" + bottom: "conv7_2_mbox_conf_flat" + bottom: "conv8_2_mbox_conf_flat" + bottom: "conv9_2_mbox_conf_flat" + top: "mbox_conf" + concat_param { + axis: 1 + engine: CAFFE + } +} +layer { + name: "mbox_priorbox" + type: "Concat" + bottom: "conv4_3_norm_mbox_priorbox" + bottom: "fc7_mbox_priorbox" + bottom: "conv6_2_mbox_priorbox" + bottom: "conv7_2_mbox_priorbox" + bottom: "conv8_2_mbox_priorbox" + bottom: "conv9_2_mbox_priorbox" + top: "mbox_priorbox" + concat_param { + axis: 2 + engine: CAFFE + } +} +layer { + name: "mbox_conf_reshape" + type: "Reshape" + bottom: "mbox_conf" + top: "mbox_conf_reshape" + reshape_param { + shape { + dim: 0 + dim: -1 + dim: 81 + } + } +} +layer { + name: "mbox_conf_softmax" + type: "Softmax" + bottom: "mbox_conf_reshape" + top: "mbox_conf_softmax" + softmax_param { + axis: 2 + } +} +layer { + name: "mbox_conf_flatten" + type: "Flatten" + bottom: "mbox_conf_softmax" + top: "mbox_conf_flatten" + flatten_param { + axis: 1 + } +} +layer { + name: "detection_out" + type: "DetectionOutput" + bottom: "mbox_loc" + bottom: "mbox_conf_flatten" + bottom: "mbox_priorbox" + bottom: "data" + top: "detection_out" + include { + phase: TEST + } + transform_param { + mean_value: 104 + mean_value: 117 + mean_value: 123 + resize_param { + prob: 1 + resize_mode: WARP + height: 720 + width: 960 + interp_mode: LINEAR + } + } + detection_output_param { + num_classes: 81 + share_location: true + background_label_id: 0 + nms_param { + nms_threshold: 0.45 + top_k: 400 + } + save_output_param { + label_map_file: "data/coco/labelmap_coco.prototxt" + } + code_type: CENTER_SIZE + keep_top_k: 200 + confidence_threshold: 0.01 + visualize: true + visualize_threshold: 0.6 + } +} +layer { + name: "slience" + type: "Silence" + bottom: "detection_out" + include { + phase: TEST + } +} + diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index bf492a24b1c..c53299d265b 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -3,13 +3,13 @@ if(NOT HAVE_PYTHON) return() endif() -include_directories(${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR} ${Boost_INCLUDE_DIRS}) file(GLOB_RECURSE python_srcs ${PROJECT_SOURCE_DIR}/python/*.cpp) add_library(pycaffe SHARED ${python_srcs}) -target_link_libraries(pycaffe ${Caffe_LINK} ${PYTHON_LIBRARIES} ${Boost_LIBRARIES}) -set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe") caffe_default_properties(pycaffe) +set_target_properties(pycaffe PROPERTIES PREFIX "" OUTPUT_NAME "_caffe") +target_include_directories(pycaffe PUBLIC ${PYTHON_INCLUDE_DIRS} ${NUMPY_INCLUDE_DIR}) +target_link_libraries(pycaffe PUBLIC ${Caffe_LINK} ${PYTHON_LIBRARIES}) if(UNIX OR APPLE) set(__linkname "${PROJECT_SOURCE_DIR}/python/caffe/_caffe.so") diff --git a/python/caffe/__init__.py b/python/caffe/__init__.py old mode 100644 new mode 100755 index 35868a403a3..34c939a5b80 --- a/python/caffe/__init__.py +++ b/python/caffe/__init__.py @@ -1,5 +1,41 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# from .pycaffe import Net, SGDSolver, NesterovSolver, AdaGradSolver, RMSPropSolver, AdaDeltaSolver, AdamSolver -from ._caffe import set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list, set_random_seed +from ._caffe import init_log, log, set_mode_cpu, set_mode_gpu, set_device, Layer, get_solver, layer_type_list, set_random_seed from ._caffe import __version__ from .proto.caffe_pb2 import TRAIN, TEST from .classifier import Classifier diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index bdee75acd6c..b9dc23e245f 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include // NOLINT(build/include_alpha) // Produce deprecation warnings (needs to come before arrayobject.h inclusion). @@ -51,6 +88,23 @@ const int NPY_DTYPE = NPY_FLOAT32; void set_mode_cpu() { Caffe::set_mode(Caffe::CPU); } void set_mode_gpu() { Caffe::set_mode(Caffe::GPU); } +void InitLog() { + ::google::InitGoogleLogging(""); + ::google::InstallFailureSignalHandler(); +} +void InitLogLevel(int level) { + FLAGS_minloglevel = level; + InitLog(); +} +void InitLogLevelPipe(int level, bool stderr) { + FLAGS_minloglevel = level; + FLAGS_logtostderr = stderr; + InitLog(); +} +void Log(const string& s) { + LOG(INFO) << s; +} + void set_random_seed(unsigned int seed) { Caffe::set_random_seed(seed); } // For convenience, check that input files can be opened, and raise an @@ -91,9 +145,16 @@ void CheckContiguousArray(PyArrayObject* arr, string name, // Net constructor shared_ptr > Net_Init(string network_file, int phase, const int level, const bp::object& stages, - const bp::object& weights) { + const bp::object& weights, + const bp::object& engine) { CheckFile(network_file); + // Extract engine if specified + string engine_str = ""; + if (!engine.is_none()) { + engine_str = bp::extract(engine); + } + // Convert stages from list to vector vector stages_vector; if (!stages.is_none()) { @@ -104,7 +165,7 @@ shared_ptr > Net_Init(string network_file, int phase, // Initialize net shared_ptr > net(new Net(network_file, - static_cast(phase), level, &stages_vector)); + static_cast(phase), level, &stages_vector, NULL, engine_str)); // Load weights if (!weights.is_none()) { @@ -283,6 +344,10 @@ BOOST_PYTHON_MODULE(_caffe) { bp::scope().attr("__version__") = AS_STRING(CAFFE_VERSION); // Caffe utility functions + bp::def("init_log", &InitLog); + bp::def("init_log", &InitLogLevel); + bp::def("init_log", &InitLogLevelPipe); + bp::def("log", &Log); bp::def("set_mode_cpu", &set_mode_cpu); bp::def("set_mode_gpu", &set_mode_gpu); bp::def("set_random_seed", &set_random_seed); @@ -296,13 +361,15 @@ BOOST_PYTHON_MODULE(_caffe) { .def("__init__", bp::make_constructor(&Net_Init, bp::default_call_policies(), (bp::arg("network_file"), "phase", bp::arg("level")=0, bp::arg("stages")=bp::object(), - bp::arg("weights")=bp::object()))) + bp::arg("weights")=bp::object(), + bp::arg("engine")=bp::object()))) // Legacy constructor .def("__init__", bp::make_constructor(&Net_Init_Load)) .def("_forward", &Net::ForwardFromTo) .def("_backward", &Net::BackwardFromTo) .def("reshape", &Net::Reshape) - .def("clear_param_diffs", &Net::ClearParamDiffs) + .def("clear_param_diffs", static_cast::*)(void)>( + &Net::ClearParamDiffs)) // The cast is to select a particular overload. .def("copy_from", static_cast::*)(const string)>( &Net::CopyTrainedLayersFrom)) diff --git a/python/caffe/classifier.py b/python/caffe/classifier.py old mode 100644 new mode 100755 index ea29fed86f9..4eb5e718086 --- a/python/caffe/classifier.py +++ b/python/caffe/classifier.py @@ -1,4 +1,40 @@ #!/usr/bin/env python +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Classifier is an image classifier specialization of Net. """ diff --git a/python/caffe/coord_map.py b/python/caffe/coord_map.py old mode 100644 new mode 100755 index a3413cfa855..6ac29f4fb02 --- a/python/caffe/coord_map.py +++ b/python/caffe/coord_map.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Determine spatial relationships between layers to relate their coordinates. Coordinates are mapped from input-to-output (forward), but can diff --git a/python/caffe/detector.py b/python/caffe/detector.py old mode 100644 new mode 100755 index ef1f91730bf..489213e76ef --- a/python/caffe/detector.py +++ b/python/caffe/detector.py @@ -1,4 +1,40 @@ #!/usr/bin/env python +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Do windowed detection by classifying a number of images/crops at once, optionally using the selective search window proposal method. diff --git a/python/caffe/draw.py b/python/caffe/draw.py old mode 100644 new mode 100755 index 9eecf6d7b46..1f3ab6f7ded --- a/python/caffe/draw.py +++ b/python/caffe/draw.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Caffe network visualization: draw the NetParameter protobuffer. @@ -91,11 +127,11 @@ def get_layer_label(layer, rankdir): separator, layer.type, separator, - layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size._values) else 1, + layer.convolution_param.kernel_size[0] if len(layer.convolution_param.kernel_size) else 1, separator, - layer.convolution_param.stride[0] if len(layer.convolution_param.stride._values) else 1, + layer.convolution_param.stride[0] if len(layer.convolution_param.stride) else 1, separator, - layer.convolution_param.pad[0] if len(layer.convolution_param.pad._values) else 0) + layer.convolution_param.pad[0] if len(layer.convolution_param.pad) else 0) elif layer.type == 'Pooling': pooling_types_dict = get_pooling_types_dict() node_label = '"%s%s(%s %s)%skernel size: %d%sstride: %d%spad: %d"' %\ diff --git a/python/caffe/io.py b/python/caffe/io.py old mode 100644 new mode 100755 index e1759beb587..0df78e7f395 --- a/python/caffe/io.py +++ b/python/caffe/io.py @@ -1,3 +1,45 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +""" +Use AGG matplotlib backend, so no X11 display is needed. +skimage.io imports matplotlib.pyplot so backend needs to be set before. +""" +import matplotlib +matplotlib.use('AGG') import numpy as np import skimage.io from scipy.ndimage import zoom @@ -75,7 +117,7 @@ def array_to_datum(arr, label=None): if arr.dtype == np.uint8: datum.data = arr.tostring() else: - datum.float_data.extend(arr.flat) + datum.float_data.extend(arr.astype(float).flat) if label is not None: datum.label = label return datum @@ -256,7 +298,12 @@ def set_mean(self, in_, mean): if len(ms) != 3: raise ValueError('Mean shape invalid') if ms != self.inputs[in_][1:]: - raise ValueError('Mean shape incompatible with input shape.') + print(self.inputs[in_]) + in_shape = self.inputs[in_][1:] + m_min, m_max = mean.min(), mean.max() + normal_mean = (mean - m_min) / (m_max - m_min) + mean = resize_image(normal_mean.transpose((1,2,0)),in_shape[1:]).transpose((2,0,1)) * (m_max - m_min) + m_min + #raise ValueError('Mean shape incompatible with input shape.') self.mean[in_] = mean def set_input_scale(self, in_, scale): @@ -302,6 +349,22 @@ def load_image(filename, color=True): img = img[:, :, :3] return img +def flip_image(im, scale=128, is_flow=False): + """ + Flip image. + + Take + im: (H x W x K) ndarray + scale: scale needed for flipping + is_flow: indicates if image is flow image + + Give + im: flipped image with shape (HxWxK) + """ + im = im[:, ::-1, :] # flip for mirrors + if is_flow: #if using a flow input, should flip first channel which corresponds to x-flow + im[:,:,0] = scale-im[:,:,0] + return im def resize_image(im, new_dims, interp_order=1): """ diff --git a/python/caffe/model_libs.py b/python/caffe/model_libs.py new file mode 100644 index 00000000000..28c1b01f41e --- /dev/null +++ b/python/caffe/model_libs.py @@ -0,0 +1,1016 @@ +import os + +import caffe +from caffe import layers as L +from caffe import params as P +from caffe.proto import caffe_pb2 + +def check_if_exist(path): + return os.path.exists(path) + +def make_if_not_exist(path): + if not os.path.exists(path): + os.makedirs(path) + +def UnpackVariable(var, num): + assert len > 0 + if type(var) is list and len(var) == num: + return var + else: + ret = [] + if type(var) is list: + assert len(var) == 1 + for i in xrange(0, num): + ret.append(var[0]) + else: + for i in xrange(0, num): + ret.append(var) + return ret + +def ConvBNLayer(net, from_layer, out_layer, use_bn, use_relu, num_output, + kernel_size, pad, stride, dilation=1, use_scale=True, lr_mult=1, + conv_prefix='', conv_postfix='', bn_prefix='', bn_postfix='_bn', + scale_prefix='', scale_postfix='_scale', bias_prefix='', bias_postfix='_bias', + **bn_params): + if use_bn: + # parameters for convolution layer with batchnorm. + kwargs = { + 'param': [dict(lr_mult=lr_mult, decay_mult=1)], + 'weight_filler': dict(type='gaussian', std=0.01), + 'bias_term': False, + } + eps = bn_params.get('eps', 0.001) + moving_average_fraction = bn_params.get('moving_average_fraction', 0.999) + use_global_stats = bn_params.get('use_global_stats', False) + # parameters for batchnorm layer. + bn_kwargs = { + 'param': [ + dict(lr_mult=0, decay_mult=0), + dict(lr_mult=0, decay_mult=0), + dict(lr_mult=0, decay_mult=0)], + 'eps': eps, + 'moving_average_fraction': moving_average_fraction, + } + bn_lr_mult = lr_mult + if use_global_stats: + # only specify if use_global_stats is explicitly provided; + # otherwise, use_global_stats_ = this->phase_ == TEST; + bn_kwargs = { + 'param': [ + dict(lr_mult=0, decay_mult=0), + dict(lr_mult=0, decay_mult=0), + dict(lr_mult=0, decay_mult=0)], + 'eps': eps, + 'use_global_stats': use_global_stats, + } + # not updating scale/bias parameters + bn_lr_mult = 0 + # parameters for scale bias layer after batchnorm. + if use_scale: + sb_kwargs = { + 'bias_term': True, + 'param': [ + dict(lr_mult=bn_lr_mult, decay_mult=0), + dict(lr_mult=bn_lr_mult, decay_mult=0)], + 'filler': dict(type='constant', value=1.0), + 'bias_filler': dict(type='constant', value=0.0), + } + else: + bias_kwargs = { + 'param': [dict(lr_mult=bn_lr_mult, decay_mult=0)], + 'filler': dict(type='constant', value=0.0), + } + else: + kwargs = { + 'param': [ + dict(lr_mult=lr_mult, decay_mult=1), + dict(lr_mult=2 * lr_mult, decay_mult=0)], + 'weight_filler': dict(type='xavier'), + 'bias_filler': dict(type='constant', value=0) + } + + conv_name = '{}{}{}'.format(conv_prefix, out_layer, conv_postfix) + [kernel_h, kernel_w] = UnpackVariable(kernel_size, 2) + [pad_h, pad_w] = UnpackVariable(pad, 2) + [stride_h, stride_w] = UnpackVariable(stride, 2) + if kernel_h == kernel_w: + net[conv_name] = L.Convolution(net[from_layer], num_output=num_output, + kernel_size=kernel_h, pad=pad_h, stride=stride_h, **kwargs) + else: + net[conv_name] = L.Convolution(net[from_layer], num_output=num_output, + kernel_h=kernel_h, kernel_w=kernel_w, pad_h=pad_h, pad_w=pad_w, + stride_h=stride_h, stride_w=stride_w, **kwargs) + if dilation > 1: + net.update(conv_name, {'dilation': dilation}) + if use_bn: + bn_name = '{}{}{}'.format(bn_prefix, out_layer, bn_postfix) + net[bn_name] = L.BatchNorm(net[conv_name], in_place=True, **bn_kwargs) + if use_scale: + sb_name = '{}{}{}'.format(scale_prefix, out_layer, scale_postfix) + net[sb_name] = L.Scale(net[bn_name], in_place=True, **sb_kwargs) + else: + bias_name = '{}{}{}'.format(bias_prefix, out_layer, bias_postfix) + net[bias_name] = L.Bias(net[bn_name], in_place=True, **bias_kwargs) + if use_relu: + relu_name = '{}_relu'.format(conv_name) + net[relu_name] = L.ReLU(net[conv_name], in_place=True) + +def ResBody(net, from_layer, block_name, out2a, out2b, out2c, stride, use_branch1, dilation=1, **bn_param): + # ResBody(net, 'pool1', '2a', 64, 64, 256, 1, True) + + conv_prefix = 'res{}_'.format(block_name) + conv_postfix = '' + bn_prefix = 'bn{}_'.format(block_name) + bn_postfix = '' + scale_prefix = 'scale{}_'.format(block_name) + scale_postfix = '' + use_scale = True + + if use_branch1: + branch_name = 'branch1' + ConvBNLayer(net, from_layer, branch_name, use_bn=True, use_relu=False, + num_output=out2c, kernel_size=1, pad=0, stride=stride, use_scale=use_scale, + conv_prefix=conv_prefix, conv_postfix=conv_postfix, + bn_prefix=bn_prefix, bn_postfix=bn_postfix, + scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param) + branch1 = '{}{}'.format(conv_prefix, branch_name) + else: + branch1 = from_layer + + branch_name = 'branch2a' + ConvBNLayer(net, from_layer, branch_name, use_bn=True, use_relu=True, + num_output=out2a, kernel_size=1, pad=0, stride=stride, use_scale=use_scale, + conv_prefix=conv_prefix, conv_postfix=conv_postfix, + bn_prefix=bn_prefix, bn_postfix=bn_postfix, + scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param) + out_name = '{}{}'.format(conv_prefix, branch_name) + + branch_name = 'branch2b' + if dilation == 1: + ConvBNLayer(net, out_name, branch_name, use_bn=True, use_relu=True, + num_output=out2b, kernel_size=3, pad=1, stride=1, use_scale=use_scale, + conv_prefix=conv_prefix, conv_postfix=conv_postfix, + bn_prefix=bn_prefix, bn_postfix=bn_postfix, + scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param) + else: + pad = int((3 + (dilation - 1) * 2) - 1) / 2 + ConvBNLayer(net, out_name, branch_name, use_bn=True, use_relu=True, + num_output=out2b, kernel_size=3, pad=pad, stride=1, use_scale=use_scale, + dilation=dilation, conv_prefix=conv_prefix, conv_postfix=conv_postfix, + bn_prefix=bn_prefix, bn_postfix=bn_postfix, + scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param) + out_name = '{}{}'.format(conv_prefix, branch_name) + + branch_name = 'branch2c' + ConvBNLayer(net, out_name, branch_name, use_bn=True, use_relu=False, + num_output=out2c, kernel_size=1, pad=0, stride=1, use_scale=use_scale, + conv_prefix=conv_prefix, conv_postfix=conv_postfix, + bn_prefix=bn_prefix, bn_postfix=bn_postfix, + scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param) + branch2 = '{}{}'.format(conv_prefix, branch_name) + + res_name = 'res{}'.format(block_name) + net[res_name] = L.Eltwise(net[branch1], net[branch2]) + relu_name = '{}_relu'.format(res_name) + net[relu_name] = L.ReLU(net[res_name], in_place=True) + + +def InceptionTower(net, from_layer, tower_name, layer_params, **bn_param): + use_scale = False + for param in layer_params: + tower_layer = '{}/{}'.format(tower_name, param['name']) + del param['name'] + if 'pool' in tower_layer: + net[tower_layer] = L.Pooling(net[from_layer], **param) + else: + param.update(bn_param) + ConvBNLayer(net, from_layer, tower_layer, use_bn=True, use_relu=True, + use_scale=use_scale, **param) + from_layer = tower_layer + return net[from_layer] + +def CreateAnnotatedDataLayer(source, batch_size=32, backend=P.Data.LMDB, + output_label=True, train=True, label_map_file='', anno_type=None, + transform_param={}, batch_sampler=[{}]): + if train: + kwargs = { + 'include': dict(phase=caffe_pb2.Phase.Value('TRAIN')), + 'transform_param': transform_param, + } + else: + kwargs = { + 'include': dict(phase=caffe_pb2.Phase.Value('TEST')), + 'transform_param': transform_param, + } + ntop = 1 + if output_label: + ntop = 2 + annotated_data_param = { + 'label_map_file': label_map_file, + 'batch_sampler': batch_sampler, + } + if anno_type is not None: + annotated_data_param.update({'anno_type': anno_type}) + return L.AnnotatedData(name="data", annotated_data_param=annotated_data_param, + data_param=dict(batch_size=batch_size, backend=backend, source=source), + ntop=ntop, **kwargs) + + +def ZFNetBody(net, from_layer, need_fc=True, fully_conv=False, reduced=False, + dilated=False, dropout=True, need_fc8=False, freeze_layers=[]): + kwargs = { + 'param': [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)], + 'weight_filler': dict(type='xavier'), + 'bias_filler': dict(type='constant', value=0)} + + assert from_layer in net.keys() + net.conv1 = L.Convolution(net[from_layer], num_output=96, pad=3, kernel_size=7, stride=2, **kwargs) + net.relu1 = L.ReLU(net.conv1, in_place=True) + + net.norm1 = L.LRN(net.relu1, local_size=3, alpha=0.00005, beta=0.75, + norm_region=P.LRN.WITHIN_CHANNEL, engine=P.LRN.CAFFE) + + net.pool1 = L.Pooling(net.norm1, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=2) + + net.conv2 = L.Convolution(net.pool1, num_output=256, pad=2, kernel_size=5, stride=2, **kwargs) + net.relu2 = L.ReLU(net.conv2, in_place=True) + + net.norm2 = L.LRN(net.relu2, local_size=3, alpha=0.00005, beta=0.75, + norm_region=P.LRN.WITHIN_CHANNEL, engine=P.LRN.CAFFE) + + net.pool2 = L.Pooling(net.norm2, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=2) + + net.conv3 = L.Convolution(net.pool2, num_output=384, pad=1, kernel_size=3, **kwargs) + net.relu3 = L.ReLU(net.conv3, in_place=True) + net.conv4 = L.Convolution(net.relu3, num_output=384, pad=1, kernel_size=3, **kwargs) + net.relu4 = L.ReLU(net.conv4, in_place=True) + net.conv5 = L.Convolution(net.relu4, num_output=256, pad=1, kernel_size=3, **kwargs) + net.relu5 = L.ReLU(net.conv5, in_place=True) + + if need_fc: + if dilated: + name = 'pool5' + net[name] = L.Pooling(net.relu5, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=1) + else: + name = 'pool5' + net[name] = L.Pooling(net.relu5, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=2) + + if fully_conv: + if dilated: + if reduced: + net.fc6 = L.Convolution(net[name], num_output=1024, pad=5, kernel_size=3, dilation=5, **kwargs) + else: + net.fc6 = L.Convolution(net[name], num_output=4096, pad=5, kernel_size=6, dilation=2, **kwargs) + else: + if reduced: + net.fc6 = L.Convolution(net[name], num_output=1024, pad=2, kernel_size=3, dilation=2, **kwargs) + else: + net.fc6 = L.Convolution(net[name], num_output=4096, pad=2, kernel_size=6, **kwargs) + + net.relu6 = L.ReLU(net.fc6, in_place=True) + if dropout: + net.drop6 = L.Dropout(net.relu6, dropout_ratio=0.5, in_place=True) + + if reduced: + net.fc7 = L.Convolution(net.relu6, num_output=1024, kernel_size=1, **kwargs) + else: + net.fc7 = L.Convolution(net.relu6, num_output=4096, kernel_size=1, **kwargs) + net.relu7 = L.ReLU(net.fc7, in_place=True) + if dropout: + net.drop7 = L.Dropout(net.relu7, dropout_ratio=0.5, in_place=True) + else: + net.fc6 = L.InnerProduct(net.pool5, num_output=4096) + net.relu6 = L.ReLU(net.fc6, in_place=True) + if dropout: + net.drop6 = L.Dropout(net.relu6, dropout_ratio=0.5, in_place=True) + net.fc7 = L.InnerProduct(net.relu6, num_output=4096) + net.relu7 = L.ReLU(net.fc7, in_place=True) + if dropout: + net.drop7 = L.Dropout(net.relu7, dropout_ratio=0.5, in_place=True) + if need_fc8: + from_layer = net.keys()[-1] + if fully_conv: + net.fc8 = L.Convolution(net[from_layer], num_output=1000, kernel_size=1, **kwargs) + else: + net.fc8 = L.InnerProduct(net[from_layer], num_output=1000) + net.prob = L.Softmax(net.fc8) + + # Update freeze layers. + kwargs['param'] = [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)] + layers = net.keys() + for freeze_layer in freeze_layers: + if freeze_layer in layers: + net.update(freeze_layer, kwargs) + + return net + +def AlexNetBody(net, from_layer, need_fc=True, fully_conv=False, reduced=False, + dilated=False, dropout=True, need_fc8=False, freeze_layers=[]): + kwargs = { + 'param': [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)], + 'weight_filler': dict(type='xavier'), + 'bias_filler': dict(type='constant', value=0)} + + assert from_layer in net.keys() + net.conv1 = L.Convolution(net[from_layer], num_output=96, kernel_size=11, stride=4, **kwargs) + net.relu1 = L.ReLU(net.conv1, in_place=True) + net.norm1 = L.LRN(net.relu1, k=2, local_size=5, alpha=0.0001, beta=0.75) + net.pool1 = L.Pooling(net.norm1, pool=P.Pooling.MAX, kernel_size=3, stride=2) + + net.conv2 = L.Convolution(net.pool1, num_output=256, pad=2, stride=1, kernel_size=5, group=2, **kwargs) + net.relu2 = L.ReLU(net.conv2, in_place=True) + net.norm2 = L.LRN(net.relu2, k=2, local_size=5, alpha=0.0001, beta=0.75) + net.pool2 = L.Pooling(net.norm2, pool=P.Pooling.MAX, kernel_size=3, stride=2) + + net.conv3 = L.Convolution(net.pool2, num_output=384, pad=1, stride=1, kernel_size=3, **kwargs) + net.relu3 = L.ReLU(net.conv3, in_place=True) + net.conv4 = L.Convolution(net.relu3, num_output=384, pad=1, stride=1, kernel_size=3, group=2, **kwargs) + net.relu4 = L.ReLU(net.conv4, in_place=True) + net.conv5 = L.Convolution(net.relu4, num_output=256, pad=1, stride=1, kernel_size=3, group=2, **kwargs) + net.relu5 = L.ReLU(net.conv5, in_place=True) + + if need_fc: + if dilated: + name = 'pool5' + net[name] = L.Pooling(net.relu5, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=1) + else: + name = 'pool5' + net[name] = L.Pooling(net.relu5, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=2) + + if fully_conv: + if dilated: + if reduced: + net.fc6_conv = L.Convolution(net[name], num_output=1024, pad=5, kernel_size=3, dilation=5, **kwargs) + else: + net.fc6_conv = L.Convolution(net[name], num_output=4096, pad=5, kernel_size=6, dilation=2, **kwargs) + else: + if reduced: + net.fc6_conv = L.Convolution(net[name], num_output=1024, pad=2, kernel_size=3, dilation=2, **kwargs) + else: + net.fc6_conv = L.Convolution(net[name], num_output=4096, pad=2, kernel_size=6, **kwargs) + + net.relu6 = L.ReLU(net.fc6_conv, in_place=True) + if dropout: + net.drop6 = L.Dropout(net.relu6, dropout_ratio=0.5, in_place=True) + + if reduced: + net.fc7_conv = L.Convolution(net.relu6, num_output=1024, kernel_size=1, **kwargs) + else: + net.fc7_conv = L.Convolution(net.relu6, num_output=4096, kernel_size=1, **kwargs) + net.relu7 = L.ReLU(net.fc7_conv, in_place=True) + if dropout: + net.drop7 = L.Dropout(net.relu7, dropout_ratio=0.5, in_place=True) + else: + net.fc6 = L.InnerProduct(net.pool5, num_output=4096) + net.relu6 = L.ReLU(net.fc6, in_place=True) + if dropout: + net.drop6 = L.Dropout(net.relu6, dropout_ratio=0.5, in_place=True) + net.fc7 = L.InnerProduct(net.relu6, num_output=4096) + net.relu7 = L.ReLU(net.fc7, in_place=True) + if dropout: + net.drop7 = L.Dropout(net.relu7, dropout_ratio=0.5, in_place=True) + + if need_fc8: + from_layer = net.keys()[-1] + if fully_conv: + net.fc8_conv = L.Convolution(net[from_layer], num_output=1000, kernel_size=1, **kwargs) + else: + net.fc8 = L.InnerProduct(net[from_layer], num_output=1000) + net.prob = L.Softmax(net.fc8) + + + # Update freeze layers. + kwargs['param'] = [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)] + layers = net.keys() + for freeze_layer in freeze_layers: + if freeze_layer in layers: + net.update(freeze_layer, kwargs) + + return net + +def VGGNetBody(net, from_layer, need_fc=True, fully_conv=False, reduced=False, + dilated=False, nopool=False, dropout=True, freeze_layers=[], dilate_pool4=False): + kwargs = { + 'param': [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)], + 'weight_filler': dict(type='xavier'), + 'bias_filler': dict(type='constant', value=0)} + + assert from_layer in net.keys() + net.conv1_1 = L.Convolution(net[from_layer], num_output=64, pad=1, kernel_size=3, **kwargs) + + net.relu1_1 = L.ReLU(net.conv1_1, in_place=True) + net.conv1_2 = L.Convolution(net.relu1_1, num_output=64, pad=1, kernel_size=3, **kwargs) + net.relu1_2 = L.ReLU(net.conv1_2, in_place=True) + + if nopool: + name = 'conv1_3' + net[name] = L.Convolution(net.relu1_2, num_output=64, pad=1, kernel_size=3, stride=2, **kwargs) + else: + name = 'pool1' + net.pool1 = L.Pooling(net.relu1_2, pool=P.Pooling.MAX, kernel_size=2, stride=2) + + net.conv2_1 = L.Convolution(net[name], num_output=128, pad=1, kernel_size=3, **kwargs) + net.relu2_1 = L.ReLU(net.conv2_1, in_place=True) + net.conv2_2 = L.Convolution(net.relu2_1, num_output=128, pad=1, kernel_size=3, **kwargs) + net.relu2_2 = L.ReLU(net.conv2_2, in_place=True) + + if nopool: + name = 'conv2_3' + net[name] = L.Convolution(net.relu2_2, num_output=128, pad=1, kernel_size=3, stride=2, **kwargs) + else: + name = 'pool2' + net[name] = L.Pooling(net.relu2_2, pool=P.Pooling.MAX, kernel_size=2, stride=2) + + net.conv3_1 = L.Convolution(net[name], num_output=256, pad=1, kernel_size=3, **kwargs) + net.relu3_1 = L.ReLU(net.conv3_1, in_place=True) + net.conv3_2 = L.Convolution(net.relu3_1, num_output=256, pad=1, kernel_size=3, **kwargs) + net.relu3_2 = L.ReLU(net.conv3_2, in_place=True) + net.conv3_3 = L.Convolution(net.relu3_2, num_output=256, pad=1, kernel_size=3, **kwargs) + net.relu3_3 = L.ReLU(net.conv3_3, in_place=True) + + if nopool: + name = 'conv3_4' + net[name] = L.Convolution(net.relu3_3, num_output=256, pad=1, kernel_size=3, stride=2, **kwargs) + else: + name = 'pool3' + net[name] = L.Pooling(net.relu3_3, pool=P.Pooling.MAX, kernel_size=2, stride=2) + + net.conv4_1 = L.Convolution(net[name], num_output=512, pad=1, kernel_size=3, **kwargs) + net.relu4_1 = L.ReLU(net.conv4_1, in_place=True) + net.conv4_2 = L.Convolution(net.relu4_1, num_output=512, pad=1, kernel_size=3, **kwargs) + net.relu4_2 = L.ReLU(net.conv4_2, in_place=True) + net.conv4_3 = L.Convolution(net.relu4_2, num_output=512, pad=1, kernel_size=3, **kwargs) + net.relu4_3 = L.ReLU(net.conv4_3, in_place=True) + + if nopool: + name = 'conv4_4' + net[name] = L.Convolution(net.relu4_3, num_output=512, pad=1, kernel_size=3, stride=2, **kwargs) + else: + name = 'pool4' + if dilate_pool4: + net[name] = L.Pooling(net.relu4_3, pool=P.Pooling.MAX, kernel_size=3, stride=1, pad=1) + dilation = 2 + else: + net[name] = L.Pooling(net.relu4_3, pool=P.Pooling.MAX, kernel_size=2, stride=2) + dilation = 1 + + kernel_size = 3 + pad = int((kernel_size + (dilation - 1) * (kernel_size - 1)) - 1) / 2 + net.conv5_1 = L.Convolution(net[name], num_output=512, pad=pad, kernel_size=kernel_size, dilation=dilation, **kwargs) + net.relu5_1 = L.ReLU(net.conv5_1, in_place=True) + net.conv5_2 = L.Convolution(net.relu5_1, num_output=512, pad=pad, kernel_size=kernel_size, dilation=dilation, **kwargs) + net.relu5_2 = L.ReLU(net.conv5_2, in_place=True) + net.conv5_3 = L.Convolution(net.relu5_2, num_output=512, pad=pad, kernel_size=kernel_size, dilation=dilation, **kwargs) + net.relu5_3 = L.ReLU(net.conv5_3, in_place=True) + + if need_fc: + if dilated: + if nopool: + name = 'conv5_4' + net[name] = L.Convolution(net.relu5_3, num_output=512, pad=1, kernel_size=3, stride=1, **kwargs) + else: + name = 'pool5' + net[name] = L.Pooling(net.relu5_3, pool=P.Pooling.MAX, pad=1, kernel_size=3, stride=1) + else: + if nopool: + name = 'conv5_4' + net[name] = L.Convolution(net.relu5_3, num_output=512, pad=1, kernel_size=3, stride=2, **kwargs) + else: + name = 'pool5' + net[name] = L.Pooling(net.relu5_3, pool=P.Pooling.MAX, kernel_size=2, stride=2) + + if fully_conv: + if dilated: + if reduced: + dilation = dilation * 6 + kernel_size = 3 + num_output = 1024 + else: + dilation = dilation * 2 + kernel_size = 7 + num_output = 4096 + else: + if reduced: + dilation = dilation * 3 + kernel_size = 3 + num_output = 1024 + else: + kernel_size = 7 + num_output = 4096 + pad = int((kernel_size + (dilation - 1) * (kernel_size - 1)) - 1) / 2 + net.fc6 = L.Convolution(net[name], num_output=num_output, pad=pad, kernel_size=kernel_size, dilation=dilation, **kwargs) + + net.relu6 = L.ReLU(net.fc6, in_place=True) + if dropout: + net.drop6 = L.Dropout(net.relu6, dropout_ratio=0.5, in_place=True) + + if reduced: + net.fc7 = L.Convolution(net.relu6, num_output=1024, kernel_size=1, **kwargs) + else: + net.fc7 = L.Convolution(net.relu6, num_output=4096, kernel_size=1, **kwargs) + net.relu7 = L.ReLU(net.fc7, in_place=True) + if dropout: + net.drop7 = L.Dropout(net.relu7, dropout_ratio=0.5, in_place=True) + else: + net.fc6 = L.InnerProduct(net.pool5, num_output=4096) + net.relu6 = L.ReLU(net.fc6, in_place=True) + if dropout: + net.drop6 = L.Dropout(net.relu6, dropout_ratio=0.5, in_place=True) + net.fc7 = L.InnerProduct(net.relu6, num_output=4096) + net.relu7 = L.ReLU(net.fc7, in_place=True) + if dropout: + net.drop7 = L.Dropout(net.relu7, dropout_ratio=0.5, in_place=True) + + # Update freeze layers. + kwargs['param'] = [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)] + layers = net.keys() + for freeze_layer in freeze_layers: + if freeze_layer in layers: + net.update(freeze_layer, kwargs) + + return net + + +def ResNet101Body(net, from_layer, use_pool5=True, use_dilation_conv5=False, **bn_param): + conv_prefix = '' + conv_postfix = '' + bn_prefix = 'bn_' + bn_postfix = '' + scale_prefix = 'scale_' + scale_postfix = '' + ConvBNLayer(net, from_layer, 'conv1', use_bn=True, use_relu=True, + num_output=64, kernel_size=7, pad=3, stride=2, + conv_prefix=conv_prefix, conv_postfix=conv_postfix, + bn_prefix=bn_prefix, bn_postfix=bn_postfix, + scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param) + + net.pool1 = L.Pooling(net.conv1, pool=P.Pooling.MAX, kernel_size=3, stride=2) + + ResBody(net, 'pool1', '2a', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=True, **bn_param) + ResBody(net, 'res2a', '2b', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=False, **bn_param) + ResBody(net, 'res2b', '2c', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=False, **bn_param) + + ResBody(net, 'res2c', '3a', out2a=128, out2b=128, out2c=512, stride=2, use_branch1=True, **bn_param) + + from_layer = 'res3a' + for i in xrange(1, 4): + block_name = '3b{}'.format(i) + ResBody(net, from_layer, block_name, out2a=128, out2b=128, out2c=512, stride=1, use_branch1=False, **bn_param) + from_layer = 'res{}'.format(block_name) + + ResBody(net, from_layer, '4a', out2a=256, out2b=256, out2c=1024, stride=2, use_branch1=True, **bn_param) + + from_layer = 'res4a' + for i in xrange(1, 23): + block_name = '4b{}'.format(i) + ResBody(net, from_layer, block_name, out2a=256, out2b=256, out2c=1024, stride=1, use_branch1=False, **bn_param) + from_layer = 'res{}'.format(block_name) + + stride = 2 + dilation = 1 + if use_dilation_conv5: + stride = 1 + dilation = 2 + + ResBody(net, from_layer, '5a', out2a=512, out2b=512, out2c=2048, stride=stride, use_branch1=True, dilation=dilation, **bn_param) + ResBody(net, 'res5a', '5b', out2a=512, out2b=512, out2c=2048, stride=1, use_branch1=False, dilation=dilation, **bn_param) + ResBody(net, 'res5b', '5c', out2a=512, out2b=512, out2c=2048, stride=1, use_branch1=False, dilation=dilation, **bn_param) + + if use_pool5: + net.pool5 = L.Pooling(net.res5c, pool=P.Pooling.AVE, global_pooling=True) + + return net + + +def ResNet152Body(net, from_layer, use_pool5=True, use_dilation_conv5=False, **bn_param): + conv_prefix = '' + conv_postfix = '' + bn_prefix = 'bn_' + bn_postfix = '' + scale_prefix = 'scale_' + scale_postfix = '' + ConvBNLayer(net, from_layer, 'conv1', use_bn=True, use_relu=True, + num_output=64, kernel_size=7, pad=3, stride=2, + conv_prefix=conv_prefix, conv_postfix=conv_postfix, + bn_prefix=bn_prefix, bn_postfix=bn_postfix, + scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param) + + net.pool1 = L.Pooling(net.conv1, pool=P.Pooling.MAX, kernel_size=3, stride=2) + + ResBody(net, 'pool1', '2a', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=True, **bn_param) + ResBody(net, 'res2a', '2b', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=False, **bn_param) + ResBody(net, 'res2b', '2c', out2a=64, out2b=64, out2c=256, stride=1, use_branch1=False, **bn_param) + + ResBody(net, 'res2c', '3a', out2a=128, out2b=128, out2c=512, stride=2, use_branch1=True, **bn_param) + + from_layer = 'res3a' + for i in xrange(1, 8): + block_name = '3b{}'.format(i) + ResBody(net, from_layer, block_name, out2a=128, out2b=128, out2c=512, stride=1, use_branch1=False, **bn_param) + from_layer = 'res{}'.format(block_name) + + ResBody(net, from_layer, '4a', out2a=256, out2b=256, out2c=1024, stride=2, use_branch1=True, **bn_param) + + from_layer = 'res4a' + for i in xrange(1, 36): + block_name = '4b{}'.format(i) + ResBody(net, from_layer, block_name, out2a=256, out2b=256, out2c=1024, stride=1, use_branch1=False, **bn_param) + from_layer = 'res{}'.format(block_name) + + stride = 2 + dilation = 1 + if use_dilation_conv5: + stride = 1 + dilation = 2 + + ResBody(net, from_layer, '5a', out2a=512, out2b=512, out2c=2048, stride=stride, use_branch1=True, dilation=dilation, **bn_param) + ResBody(net, 'res5a', '5b', out2a=512, out2b=512, out2c=2048, stride=1, use_branch1=False, dilation=dilation, **bn_param) + ResBody(net, 'res5b', '5c', out2a=512, out2b=512, out2c=2048, stride=1, use_branch1=False, dilation=dilation, **bn_param) + + if use_pool5: + net.pool5 = L.Pooling(net.res5c, pool=P.Pooling.AVE, global_pooling=True) + + return net + + +def InceptionV3Body(net, from_layer, output_pred=False, **bn_param): + # scale is fixed to 1, thus we ignore it. + use_scale = False + + out_layer = 'conv' + ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, + num_output=32, kernel_size=3, pad=0, stride=2, use_scale=use_scale, + **bn_param) + from_layer = out_layer + + out_layer = 'conv_1' + ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, + num_output=32, kernel_size=3, pad=0, stride=1, use_scale=use_scale, + **bn_param) + from_layer = out_layer + + out_layer = 'conv_2' + ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, + num_output=64, kernel_size=3, pad=1, stride=1, use_scale=use_scale, + **bn_param) + from_layer = out_layer + + out_layer = 'pool' + net[out_layer] = L.Pooling(net[from_layer], pool=P.Pooling.MAX, + kernel_size=3, stride=2, pad=0) + from_layer = out_layer + + out_layer = 'conv_3' + ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, + num_output=80, kernel_size=1, pad=0, stride=1, use_scale=use_scale, + **bn_param) + from_layer = out_layer + + out_layer = 'conv_4' + ConvBNLayer(net, from_layer, out_layer, use_bn=True, use_relu=True, + num_output=192, kernel_size=3, pad=0, stride=1, use_scale=use_scale, + **bn_param) + from_layer = out_layer + + out_layer = 'pool_1' + net[out_layer] = L.Pooling(net[from_layer], pool=P.Pooling.MAX, + kernel_size=3, stride=2, pad=0) + from_layer = out_layer + + # inceptions with 1x1, 3x3, 5x5 convolutions + for inception_id in xrange(0, 3): + if inception_id == 0: + out_layer = 'mixed' + tower_2_conv_num_output = 32 + else: + out_layer = 'mixed_{}'.format(inception_id) + tower_2_conv_num_output = 64 + towers = [] + tower_name = '{}'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=64, kernel_size=1, pad=0, stride=1), + ], **bn_param) + towers.append(tower) + tower_name = '{}/tower'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=48, kernel_size=1, pad=0, stride=1), + dict(name='conv_1', num_output=64, kernel_size=5, pad=2, stride=1), + ], **bn_param) + towers.append(tower) + tower_name = '{}/tower_1'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=64, kernel_size=1, pad=0, stride=1), + dict(name='conv_1', num_output=96, kernel_size=3, pad=1, stride=1), + dict(name='conv_2', num_output=96, kernel_size=3, pad=1, stride=1), + ], **bn_param) + towers.append(tower) + tower_name = '{}/tower_2'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='pool', pool=P.Pooling.AVE, kernel_size=3, pad=1, stride=1), + dict(name='conv', num_output=tower_2_conv_num_output, kernel_size=1, pad=0, stride=1), + ], **bn_param) + towers.append(tower) + out_layer = '{}/join'.format(out_layer) + net[out_layer] = L.Concat(*towers, axis=1) + from_layer = out_layer + + # inceptions with 1x1, 3x3(in sequence) convolutions + out_layer = 'mixed_3' + towers = [] + tower_name = '{}'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=384, kernel_size=3, pad=0, stride=2), + ], **bn_param) + towers.append(tower) + tower_name = '{}/tower'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=64, kernel_size=1, pad=0, stride=1), + dict(name='conv_1', num_output=96, kernel_size=3, pad=1, stride=1), + dict(name='conv_2', num_output=96, kernel_size=3, pad=0, stride=2), + ], **bn_param) + towers.append(tower) + tower_name = '{}'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='pool', pool=P.Pooling.MAX, kernel_size=3, pad=0, stride=2), + ], **bn_param) + towers.append(tower) + out_layer = '{}/join'.format(out_layer) + net[out_layer] = L.Concat(*towers, axis=1) + from_layer = out_layer + + # inceptions with 1x1, 7x1, 1x7 convolutions + for inception_id in xrange(4, 8): + if inception_id == 4: + num_output = 128 + elif inception_id == 5 or inception_id == 6: + num_output = 160 + elif inception_id == 7: + num_output = 192 + out_layer = 'mixed_{}'.format(inception_id) + towers = [] + tower_name = '{}'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1), + ], **bn_param) + towers.append(tower) + tower_name = '{}/tower'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=num_output, kernel_size=1, pad=0, stride=1), + dict(name='conv_1', num_output=num_output, kernel_size=[1, 7], pad=[0, 3], stride=[1, 1]), + dict(name='conv_2', num_output=192, kernel_size=[7, 1], pad=[3, 0], stride=[1, 1]), + ], **bn_param) + towers.append(tower) + tower_name = '{}/tower_1'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=num_output, kernel_size=1, pad=0, stride=1), + dict(name='conv_1', num_output=num_output, kernel_size=[7, 1], pad=[3, 0], stride=[1, 1]), + dict(name='conv_2', num_output=num_output, kernel_size=[1, 7], pad=[0, 3], stride=[1, 1]), + dict(name='conv_3', num_output=num_output, kernel_size=[7, 1], pad=[3, 0], stride=[1, 1]), + dict(name='conv_4', num_output=192, kernel_size=[1, 7], pad=[0, 3], stride=[1, 1]), + ], **bn_param) + towers.append(tower) + tower_name = '{}/tower_2'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='pool', pool=P.Pooling.AVE, kernel_size=3, pad=1, stride=1), + dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1), + ], **bn_param) + towers.append(tower) + out_layer = '{}/join'.format(out_layer) + net[out_layer] = L.Concat(*towers, axis=1) + from_layer = out_layer + + # inceptions with 1x1, 3x3, 1x7, 7x1 filters + out_layer = 'mixed_8' + towers = [] + tower_name = '{}/tower'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1), + dict(name='conv_1', num_output=320, kernel_size=3, pad=0, stride=2), + ], **bn_param) + towers.append(tower) + tower_name = '{}/tower_1'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1), + dict(name='conv_1', num_output=192, kernel_size=[1, 7], pad=[0, 3], stride=[1, 1]), + dict(name='conv_2', num_output=192, kernel_size=[7, 1], pad=[3, 0], stride=[1, 1]), + dict(name='conv_3', num_output=192, kernel_size=3, pad=0, stride=2), + ], **bn_param) + towers.append(tower) + tower_name = '{}'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='pool', pool=P.Pooling.MAX, kernel_size=3, pad=0, stride=2), + ], **bn_param) + towers.append(tower) + out_layer = '{}/join'.format(out_layer) + net[out_layer] = L.Concat(*towers, axis=1) + from_layer = out_layer + + for inception_id in xrange(9, 11): + num_output = 384 + num_output2 = 448 + if inception_id == 9: + pool = P.Pooling.AVE + else: + pool = P.Pooling.MAX + out_layer = 'mixed_{}'.format(inception_id) + towers = [] + tower_name = '{}'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=320, kernel_size=1, pad=0, stride=1), + ], **bn_param) + towers.append(tower) + + tower_name = '{}/tower'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=num_output, kernel_size=1, pad=0, stride=1), + ], **bn_param) + subtowers = [] + subtower_name = '{}/mixed'.format(tower_name) + subtower = InceptionTower(net, '{}/conv'.format(tower_name), subtower_name, [ + dict(name='conv', num_output=num_output, kernel_size=[1, 3], pad=[0, 1], stride=[1, 1]), + ], **bn_param) + subtowers.append(subtower) + subtower = InceptionTower(net, '{}/conv'.format(tower_name), subtower_name, [ + dict(name='conv_1', num_output=num_output, kernel_size=[3, 1], pad=[1, 0], stride=[1, 1]), + ], **bn_param) + subtowers.append(subtower) + net[subtower_name] = L.Concat(*subtowers, axis=1) + towers.append(net[subtower_name]) + + tower_name = '{}/tower_1'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='conv', num_output=num_output2, kernel_size=1, pad=0, stride=1), + dict(name='conv_1', num_output=num_output, kernel_size=3, pad=1, stride=1), + ], **bn_param) + subtowers = [] + subtower_name = '{}/mixed'.format(tower_name) + subtower = InceptionTower(net, '{}/conv_1'.format(tower_name), subtower_name, [ + dict(name='conv', num_output=num_output, kernel_size=[1, 3], pad=[0, 1], stride=[1, 1]), + ], **bn_param) + subtowers.append(subtower) + subtower = InceptionTower(net, '{}/conv_1'.format(tower_name), subtower_name, [ + dict(name='conv_1', num_output=num_output, kernel_size=[3, 1], pad=[1, 0], stride=[1, 1]), + ], **bn_param) + subtowers.append(subtower) + net[subtower_name] = L.Concat(*subtowers, axis=1) + towers.append(net[subtower_name]) + + tower_name = '{}/tower_2'.format(out_layer) + tower = InceptionTower(net, from_layer, tower_name, [ + dict(name='pool', pool=pool, kernel_size=3, pad=1, stride=1), + dict(name='conv', num_output=192, kernel_size=1, pad=0, stride=1), + ], **bn_param) + towers.append(tower) + out_layer = '{}/join'.format(out_layer) + net[out_layer] = L.Concat(*towers, axis=1) + from_layer = out_layer + + if output_pred: + net.pool_3 = L.Pooling(net[from_layer], pool=P.Pooling.AVE, kernel_size=8, pad=0, stride=1) + net.softmax = L.InnerProduct(net.pool_3, num_output=1008) + net.softmax_prob = L.Softmax(net.softmax) + + return net + +def CreateMultiBoxHead(net, data_layer="data", num_classes=[], from_layers=[], + use_objectness=False, normalizations=[], use_batchnorm=True, lr_mult=1, + use_scale=True, min_sizes=[], max_sizes=[], prior_variance = [0.1], + aspect_ratios=[], steps=[], img_height=0, img_width=0, share_location=True, + flip=True, clip=True, offset=0.5, inter_layer_depth=[], kernel_size=1, pad=0, + conf_postfix='', loc_postfix='', **bn_param): + assert num_classes, "must provide num_classes" + assert num_classes > 0, "num_classes must be positive number" + if normalizations: + assert len(from_layers) == len(normalizations), "from_layers and normalizations should have same length" + assert len(from_layers) == len(min_sizes), "from_layers and min_sizes should have same length" + if max_sizes: + assert len(from_layers) == len(max_sizes), "from_layers and max_sizes should have same length" + if aspect_ratios: + assert len(from_layers) == len(aspect_ratios), "from_layers and aspect_ratios should have same length" + if steps: + assert len(from_layers) == len(steps), "from_layers and steps should have same length" + net_layers = net.keys() + assert data_layer in net_layers, "data_layer is not in net's layers" + if inter_layer_depth: + assert len(from_layers) == len(inter_layer_depth), "from_layers and inter_layer_depth should have same length" + + num = len(from_layers) + priorbox_layers = [] + loc_layers = [] + conf_layers = [] + objectness_layers = [] + for i in range(0, num): + from_layer = from_layers[i] + + # Get the normalize value. + if normalizations: + if normalizations[i] != -1: + norm_name = "{}_norm".format(from_layer) + net[norm_name] = L.Normalize(net[from_layer], scale_filler=dict(type="constant", value=normalizations[i]), + across_spatial=False, channel_shared=False) + from_layer = norm_name + + # Add intermediate layers. + if inter_layer_depth: + if inter_layer_depth[i] > 0: + inter_name = "{}_inter".format(from_layer) + ConvBNLayer(net, from_layer, inter_name, use_bn=use_batchnorm, use_relu=True, lr_mult=lr_mult, + num_output=inter_layer_depth[i], kernel_size=3, pad=1, stride=1, **bn_param) + from_layer = inter_name + + # Estimate number of priors per location given provided parameters. + min_size = min_sizes[i] + if type(min_size) is not list: + min_size = [min_size] + aspect_ratio = [] + if len(aspect_ratios) > i: + aspect_ratio = aspect_ratios[i] + if type(aspect_ratio) is not list: + aspect_ratio = [aspect_ratio] + max_size = [] + if len(max_sizes) > i: + max_size = max_sizes[i] + if type(max_size) is not list: + max_size = [max_size] + if max_size: + assert len(max_size) == len(min_size), "max_size and min_size should have same length." + if max_size: + num_priors_per_location = (2 + len(aspect_ratio)) * len(min_size) + else: + num_priors_per_location = (1 + len(aspect_ratio)) * len(min_size) + if flip: + num_priors_per_location += len(aspect_ratio) * len(min_size) + step = [] + if len(steps) > i: + step = steps[i] + + # Create location prediction layer. + name = "{}_mbox_loc{}".format(from_layer, loc_postfix) + num_loc_output = num_priors_per_location * 4; + if not share_location: + num_loc_output *= num_classes + ConvBNLayer(net, from_layer, name, use_bn=use_batchnorm, use_relu=False, lr_mult=lr_mult, + num_output=num_loc_output, kernel_size=kernel_size, pad=pad, stride=1, **bn_param) + permute_name = "{}_perm".format(name) + net[permute_name] = L.Permute(net[name], order=[0, 2, 3, 1]) + flatten_name = "{}_flat".format(name) + net[flatten_name] = L.Flatten(net[permute_name], axis=1) + loc_layers.append(net[flatten_name]) + + # Create confidence prediction layer. + name = "{}_mbox_conf{}".format(from_layer, conf_postfix) + num_conf_output = num_priors_per_location * num_classes; + ConvBNLayer(net, from_layer, name, use_bn=use_batchnorm, use_relu=False, lr_mult=lr_mult, + num_output=num_conf_output, kernel_size=kernel_size, pad=pad, stride=1, **bn_param) + permute_name = "{}_perm".format(name) + net[permute_name] = L.Permute(net[name], order=[0, 2, 3, 1]) + flatten_name = "{}_flat".format(name) + net[flatten_name] = L.Flatten(net[permute_name], axis=1) + conf_layers.append(net[flatten_name]) + + # Create prior generation layer. + name = "{}_mbox_priorbox".format(from_layer) + net[name] = L.PriorBox(net[from_layer], net[data_layer], min_size=min_size, + clip=clip, variance=prior_variance, offset=offset) + if max_size: + net.update(name, {'max_size': max_size}) + if aspect_ratio: + net.update(name, {'aspect_ratio': aspect_ratio, 'flip': flip}) + if step: + net.update(name, {'step': step}) + if img_height != 0 and img_width != 0: + if img_height == img_width: + net.update(name, {'img_size': img_height}) + else: + net.update(name, {'img_h': img_height, 'img_w': img_width}) + priorbox_layers.append(net[name]) + + # Create objectness prediction layer. + if use_objectness: + name = "{}_mbox_objectness".format(from_layer) + num_obj_output = num_priors_per_location * 2; + ConvBNLayer(net, from_layer, name, use_bn=use_batchnorm, use_relu=False, lr_mult=lr_mult, + num_output=num_obj_output, kernel_size=kernel_size, pad=pad, stride=1, **bn_param) + permute_name = "{}_perm".format(name) + net[permute_name] = L.Permute(net[name], order=[0, 2, 3, 1]) + flatten_name = "{}_flat".format(name) + net[flatten_name] = L.Flatten(net[permute_name], axis=1) + objectness_layers.append(net[flatten_name]) + + # Concatenate priorbox, loc, and conf layers. + mbox_layers = [] + name = "mbox_loc" + net[name] = L.Concat(*loc_layers, axis=1) + mbox_layers.append(net[name]) + name = "mbox_conf" + net[name] = L.Concat(*conf_layers, axis=1) + mbox_layers.append(net[name]) + name = "mbox_priorbox" + net[name] = L.Concat(*priorbox_layers, axis=2) + mbox_layers.append(net[name]) + if use_objectness: + name = "mbox_objectness" + net[name] = L.Concat(*objectness_layers, axis=1) + mbox_layers.append(net[name]) + + return mbox_layers diff --git a/python/caffe/net_spec.py b/python/caffe/net_spec.py old mode 100644 new mode 100755 index 5fb1f0b3fb1..10ee4d4f19c --- a/python/caffe/net_spec.py +++ b/python/caffe/net_spec.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """Python net specification. This module provides a way to write nets directly in Python, using a natural, @@ -93,6 +129,9 @@ def to_proto(self): return to_proto(self) + def _update(self, params): + self.fn._update(params) + def _to_proto(self, layers, names, autonames): return self.fn._to_proto(layers, names, autonames) @@ -103,6 +142,10 @@ class Function(object): def __init__(self, type_name, inputs, params): self.type_name = type_name + for index, input in enumerate(inputs): + if not isinstance(input, Top): + raise TypeError('%s input %d is not a Top (type is %s)' % + (type_name, index, type(input))) self.inputs = inputs self.params = params self.ntop = self.params.get('ntop', 1) @@ -128,6 +171,9 @@ def _get_top_name(self, top, names, autonames): names[top] = top.fn.type_name + str(autonames[top.fn.type_name]) return names[top] + def _update(self, params): + self.params.update(params) + def _to_proto(self, layers, names, autonames): if self in layers: return @@ -181,6 +227,20 @@ def __setitem__(self, key, value): def __getitem__(self, item): return self.__getattr__(item) + def __delitem__(self, name): + del self.tops[name] + + def keys(self): + keys = [k for k, v in six.iteritems(self.tops)] + return keys + + def vals(self): + vals = [v for k, v in six.iteritems(self.tops)] + return vals + + def update(self, name, params): + self.tops[name]._update(params) + def to_proto(self): names = {v: k for k, v in six.iteritems(self.tops)} autonames = Counter() diff --git a/python/caffe/pycaffe.py b/python/caffe/pycaffe.py old mode 100644 new mode 100755 index 5bae18d9a4d..bc606148d0f --- a/python/caffe/pycaffe.py +++ b/python/caffe/pycaffe.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Wrap the internal caffe C++ module (_caffe.so) with a clean, Pythonic interface. @@ -43,6 +79,16 @@ def _Net_blob_loss_weights(self): self._blob_loss_weights)) return self._blob_loss_weights_dict +@property +def _Net_layer_dict(self): + """ + An OrderedDict (bottom to top, i.e., input to output) of network + layers indexed by name + """ + if not hasattr(self, '_layer_dict'): + self._layer_dict = OrderedDict(zip(self._layer_names, self.layers)) + return self._layer_dict + @property def _Net_params(self): @@ -103,7 +149,7 @@ def _Net_forward(self, blobs=None, start=None, end=None, **kwargs): if end is not None: end_ind = list(self._layer_names).index(end) - outputs = set([end] + blobs) + outputs = set(self.top_names[end] + blobs) else: end_ind = len(self.layers) - 1 outputs = set(self.outputs + blobs) @@ -151,7 +197,7 @@ def _Net_backward(self, diffs=None, start=None, end=None, **kwargs): if end is not None: end_ind = list(self._layer_names).index(end) - outputs = set([end] + diffs) + outputs = set(self.bottom_names[end] + diffs) else: end_ind = 0 outputs = set(self.inputs + diffs) @@ -321,6 +367,7 @@ def get_id_name(self): # Attach methods to Net. Net.blobs = _Net_blobs Net.blob_loss_weights = _Net_blob_loss_weights +Net.layer_dict = _Net_layer_dict Net.params = _Net_params Net.forward = _Net_forward Net.backward = _Net_backward diff --git a/python/caffe/test/test_coord_map.py b/python/caffe/test/test_coord_map.py old mode 100644 new mode 100755 index 613260e25df..25048526d13 --- a/python/caffe/test/test_coord_map.py +++ b/python/caffe/test/test_coord_map.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import unittest import numpy as np diff --git a/python/caffe/test/test_draw.py b/python/caffe/test/test_draw.py new file mode 100644 index 00000000000..835bb5df010 --- /dev/null +++ b/python/caffe/test/test_draw.py @@ -0,0 +1,37 @@ +import os +import unittest + +from google.protobuf import text_format + +import caffe.draw +from caffe.proto import caffe_pb2 + +def getFilenames(): + """Yields files in the source tree which are Net prototxts.""" + result = [] + + root_dir = os.path.abspath(os.path.join( + os.path.dirname(__file__), '..', '..', '..')) + assert os.path.exists(root_dir) + + for dirname in ('models', 'examples'): + dirname = os.path.join(root_dir, dirname) + assert os.path.exists(dirname) + for cwd, _, filenames in os.walk(dirname): + for filename in filenames: + filename = os.path.join(cwd, filename) + if filename.endswith('.prototxt') and 'solver' not in filename: + yield os.path.join(dirname, filename) + + +class TestDraw(unittest.TestCase): + def test_draw_net(self): + for filename in getFilenames(): + net = caffe_pb2.NetParameter() + with open(filename) as infile: + text_format.Merge(infile.read(), net) + caffe.draw.draw_net(net, 'LR') + + +if __name__ == "__main__": + unittest.main() diff --git a/python/caffe/test/test_io.py b/python/caffe/test/test_io.py old mode 100644 new mode 100755 index 4a16b5b9128..774c7b9d718 --- a/python/caffe/test/test_io.py +++ b/python/caffe/test/test_io.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import numpy as np import unittest diff --git a/python/caffe/test/test_layer_type_list.py b/python/caffe/test/test_layer_type_list.py old mode 100644 new mode 100755 index 47f4cf6d008..d9eb7d67801 --- a/python/caffe/test/test_layer_type_list.py +++ b/python/caffe/test/test_layer_type_list.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import unittest import caffe diff --git a/python/caffe/test/test_net.py b/python/caffe/test/test_net.py old mode 100644 new mode 100755 index e1090934d73..04198f06d67 --- a/python/caffe/test/test_net.py +++ b/python/caffe/test/test_net.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import unittest import tempfile import os @@ -25,16 +61,31 @@ def simple_net_file(num_output): bias_filler { type: 'constant' value: 2 } } param { decay_mult: 1 } param { decay_mult: 0 } } - layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip' + layer { type: 'InnerProduct' name: 'ip' bottom: 'conv' top: 'ip_blob' inner_product_param { num_output: """ + str(num_output) + """ weight_filler { type: 'gaussian' std: 2.5 } bias_filler { type: 'constant' value: -3 } } } - layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip' bottom: 'label' + layer { type: 'SoftmaxWithLoss' name: 'loss' bottom: 'ip_blob' bottom: 'label' top: 'loss' }""") f.close() return f.name +class TestEngine(unittest.TestCase): + def setUp(self): + self.num_output = 13 + net_file = simple_net_file(self.num_output) + self.net = caffe.Net(net_file, caffe.TRAIN, engine='MKL2017') + # fill in valid labels + self.net.blobs['label'].data[...] = \ + np.random.randint(self.num_output, + size=self.net.blobs['label'].data.shape) + os.remove(net_file) + + def test_forward_backward(self): + self.net.forward() + self.net.backward() + class TestNet(unittest.TestCase): def setUp(self): self.num_output = 13 @@ -60,10 +111,35 @@ def test_memory(self): for bl in blobs: total += bl.data.sum() + bl.diff.sum() + def test_layer_dict(self): + layer_dict = self.net.layer_dict + self.assertEqual(list(layer_dict.keys()), list(self.net._layer_names)) + for i, name in enumerate(self.net._layer_names): + self.assertEqual(layer_dict[name].type, + self.net.layers[i].type) + def test_forward_backward(self): self.net.forward() self.net.backward() + def test_forward_start_end(self): + conv_blob=self.net.blobs['conv']; + ip_blob=self.net.blobs['ip_blob']; + sample_data=np.random.uniform(size=conv_blob.data.shape); + sample_data=sample_data.astype(np.float32); + conv_blob.data[:]=sample_data; + forward_blob=self.net.forward(start='ip',end='ip'); + self.assertIn('ip_blob',forward_blob); + + manual_forward=[]; + for i in range(0,conv_blob.data.shape[0]): + dot=np.dot(self.net.params['ip'][0].data, + conv_blob.data[i].reshape(-1)); + manual_forward.append(dot+self.net.params['ip'][1].data); + manual_forward=np.array(manual_forward); + + np.testing.assert_allclose(ip_blob.data,manual_forward,rtol=1e-3); + def test_clear_param_diffs(self): # Run a forward/backward step to have non-zero diffs self.net.forward() @@ -83,13 +159,13 @@ def test_top_bottom_names(self): self.assertEqual(self.net.top_names, OrderedDict([('data', ['data', 'label']), ('conv', ['conv']), - ('ip', ['ip']), + ('ip', ['ip_blob']), ('loss', ['loss'])])) self.assertEqual(self.net.bottom_names, OrderedDict([('data', []), ('conv', ['data']), ('ip', ['conv']), - ('loss', ['ip', 'label'])])) + ('loss', ['ip_blob', 'label'])])) def test_save_and_read(self): f = tempfile.NamedTemporaryFile(mode='w+', delete=False) @@ -173,12 +249,12 @@ class TestLevels(unittest.TestCase): """ def setUp(self): - self.f = tempfile.NamedTemporaryFile(mode='w+') + self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False) self.f.write(self.TEST_NET) - self.f.flush() + self.f.close() def tearDown(self): - self.f.close() + os.remove(self.f.name) def check_net(self, net, blobs): net_blobs = [b for b in net.blobs.keys() if 'data' not in b] @@ -238,12 +314,12 @@ class TestStages(unittest.TestCase): """ def setUp(self): - self.f = tempfile.NamedTemporaryFile(mode='w+') + self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False) self.f.write(self.TEST_NET) - self.f.flush() + self.f.close() def tearDown(self): - self.f.close() + os.remove(self.f.name) def check_net(self, net, blobs): net_blobs = [b for b in net.blobs.keys() if 'data' not in b] @@ -320,12 +396,12 @@ class TestAllInOne(unittest.TestCase): """ def setUp(self): - self.f = tempfile.NamedTemporaryFile(mode='w+') + self.f = tempfile.NamedTemporaryFile(mode='w+', delete=False) self.f.write(self.TEST_NET) - self.f.flush() + self.f.close() def tearDown(self): - self.f.close() + os.remove(self.f.name) def check_net(self, net, outputs): self.assertEqual(list(net.blobs['data'].shape), [1,1,10,10]) diff --git a/python/caffe/test/test_net_spec.py b/python/caffe/test/test_net_spec.py old mode 100644 new mode 100755 index fee3c0aaebe..36520c2a5b3 --- a/python/caffe/test/test_net_spec.py +++ b/python/caffe/test/test_net_spec.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import unittest import tempfile import caffe @@ -79,3 +115,11 @@ def test_zero_tops(self): net_proto = silent_net() net = self.load_net(net_proto) self.assertEqual(len(net.forward()), 0) + + def test_type_error(self): + """Test that a TypeError is raised when a Function input isn't a Top.""" + data = L.DummyData(ntop=2) # data is a 2-tuple of Tops + r = r"^Silence input 0 is not a Top \(type is <(type|class) 'tuple'>\)$" + with self.assertRaisesRegexp(TypeError, r): + L.Silence(data, ntop=0) # should raise: data is a tuple, not a Top + L.Silence(*data, ntop=0) # shouldn't raise: each elt of data is a Top diff --git a/python/caffe/test/test_python_layer.py b/python/caffe/test/test_python_layer.py old mode 100644 new mode 100755 index 899514e90f1..b7b469627a5 --- a/python/caffe/test/test_python_layer.py +++ b/python/caffe/test/test_python_layer.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import unittest import tempfile import os diff --git a/python/caffe/test/test_python_layer_with_param_str.py b/python/caffe/test/test_python_layer_with_param_str.py old mode 100644 new mode 100755 index c36048ae9f0..474e19aab22 --- a/python/caffe/test/test_python_layer_with_param_str.py +++ b/python/caffe/test/test_python_layer_with_param_str.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import unittest import tempfile import os diff --git a/python/caffe/test/test_solver.py b/python/caffe/test/test_solver.py old mode 100644 new mode 100755 index f618fded8cd..8e0624d81c4 --- a/python/caffe/test/test_solver.py +++ b/python/caffe/test/test_solver.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import unittest import tempfile import os diff --git a/python/classify.py b/python/classify.py index 4544c51b4c2..3d3dd1cc640 100755 --- a/python/classify.py +++ b/python/classify.py @@ -1,4 +1,40 @@ #!/usr/bin/env python +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ classify.py is an out-of-the-box image classifer callable from the command line. diff --git a/python/detect.py b/python/detect.py index 1aba964a9d8..5d6d6ce2e67 100755 --- a/python/detect.py +++ b/python/detect.py @@ -1,4 +1,40 @@ #!/usr/bin/env python +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ detector.py is an out-of-the-box windowed detector callable from the command line. diff --git a/python/draw_net.py b/python/draw_net.py index dfe70d26a71..7e7ce9a2329 100755 --- a/python/draw_net.py +++ b/python/draw_net.py @@ -1,4 +1,40 @@ #!/usr/bin/env python +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Draw a graph of the net architecture. """ diff --git a/scripts/SystemTap/CMakeLists.txt b/scripts/SystemTap/CMakeLists.txt new file mode 100644 index 00000000000..4c58e6f1787 --- /dev/null +++ b/scripts/SystemTap/CMakeLists.txt @@ -0,0 +1,14 @@ +# Installation/customization of system taps scripts +if(USE_SYSTEMTAP) + # system Tap fails with OpenCV (no idea why) + if(USE_OPENCV) + message(FATAL "SystemTap is not working when OpenCV is supported!") + endif() + + # Copy profile.stp and put absolute path to binary and nn lib + file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/profile_selected_function.stp DESTINATION ${CMAKE_BINARY_DIR}) + file(READ ${CMAKE_BINARY_DIR}/profile_selected_function.stp PROFILE_STP_STRING) + string(REPLACE "[DIRNAME_OF_CAFFE_TOOL_BINARY]" "\"${CMAKE_BINARY_DIR}/tools/caffe\"" PROFILE_STP_STRING ${PROFILE_STP_STRING}) + string(REPLACE "[DIRNAME_OF_LIBCAFFE]" "\"${CMAKE_BINARY_DIR}/lib/libcaffe.so\"" PROFILE_STP_STRING ${PROFILE_STP_STRING}) + file(WRITE ${CMAKE_BINARY_DIR}/profile_selected_function.stp ${PROFILE_STP_STRING}) +endif() diff --git a/scripts/SystemTap/profile_selected_function.stp b/scripts/SystemTap/profile_selected_function.stp new file mode 100644 index 00000000000..7590fa3e7e9 --- /dev/null +++ b/scripts/SystemTap/profile_selected_function.stp @@ -0,0 +1,54 @@ +#Example of running: +#stap /scripts/profile.stp -v --dyninst -c /tools/caffe | c++filt + +global timings_start # Giving size of array here will cause error if size is exceeded , but allow avoid realocation of arrays +global timings +global counts +global histo + +global final_results # Container to combine data from many threads + +##@define function_name %( "*sync*" %) # Handy macro to define name of functions to be traced +# +@define make_entry_and_exit_probe(process_name,func_name) %( + +probe process(@process_name).function(@func_name).call +{ + timings_start[ppfunc(),tid()] = gettimeofday_us() + counts[ppfunc()] <<< 1 +} + +probe process(@process_name).function(@func_name).return +{ + timings[ppfunc(),tid()] += gettimeofday_us() - timings_start[ppfunc(),tid()]; + histo[ppfunc()] <<< gettimeofday_us() - timings_start[ppfunc(),tid()]; +} + +%) + +# For Caffe C++ we need mangled names put here (can use wildcards "*" for rescue) +@make_entry_and_exit_probe([DIRNAME_OF_CAFFE_TOOL_BINARY],"_Z4timev") + +# Tracing functions of shared lib, example of using wildcards for names of functions +@make_entry_and_exit_probe([DIRNAME_OF_LIBCAFFE],"*caffe_set*") # caffe_set + +probe end +{ + # Get per thread results and put them in final container + foreach([x,tid] in timings) + { + final_results[x] += timings[x,tid] + } + + # Presents results + printf("\n\nInclusive profiling results:\n") + foreach([x] in final_results-) { printf("%s, count: %d total time: %d us\n",x,@count(counts[x]),final_results[x])} + + foreach([x] in histo-) + { + printf("Histogram of func: %s:\n",[x]); + print(@hist_log(histo[x])) + } +} + + diff --git a/scripts/build_docs.sh b/scripts/build_docs.sh index 0e28bd71631..4652af0228b 100755 --- a/scripts/build_docs.sh +++ b/scripts/build_docs.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # Build documentation for display in web browser. PORT=${1:-4000} diff --git a/scripts/convert_inception_v3.py b/scripts/convert_inception_v3.py new file mode 100644 index 00000000000..30efb450634 --- /dev/null +++ b/scripts/convert_inception_v3.py @@ -0,0 +1,453 @@ +from __future__ import print_function +import os.path +import re +import sys +import tarfile +import time +from datetime import datetime + +# pylint: disable=unused-import,g-bad-import-order +import tensorflow.python.platform +from six.moves import urllib +import numpy as np +import tensorflow as tf +# pylint: enable=unused-import,g-bad-import-order + +from tensorflow.python.platform import gfile +import h5py +import math + +os.environ["GLOG_minloglevel"] ="3" +import caffe +from caffe.model_libs import * +from google.protobuf import text_format + +paddings = {'VALID': [0, 0], 'SAME': [1, 1]} + +FLAGS = tf.app.flags.FLAGS + +# classify_image_graph_def.pb: +# Binary representation of the GraphDef protocol buffer. +# imagenet_synset_to_human_label_map.txt: +# Map from synset ID to a human readable string. +# imagenet_2012_challenge_label_map_proto.pbtxt: +# Text representation of a protocol buffer mapping a label to synset ID. +tf.app.flags.DEFINE_string( + 'model_dir', '/tmp/imagenet', + """Path to classify_image_graph_def.pb, """ + """imagenet_synset_to_human_label_map.txt, and """ + """imagenet_2012_challenge_label_map_proto.pbtxt.""") +tf.app.flags.DEFINE_string('image_file', '', + """Absolute path to image file.""") +tf.app.flags.DEFINE_integer('num_top_predictions', 5, + """Display this many predictions.""") + +# pylint: disable=line-too-long +DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz' +# pylint: enable=line-too-long + +cur_dir = os.path.dirname(os.path.realpath(__file__)) +caffe_root = '{}/../'.format(cur_dir) +labelmap_file = caffe_root + 'data/ILSVRC2016/labelmap_ilsvrc_clsloc.prototxt' +file = open(labelmap_file, 'r') +labelmap = caffe_pb2.LabelMap() +text_format.Merge(str(file.read()), labelmap) + +def get_labelname(label): + num_labels = len(labelmap.item) + found = False + for i in xrange(0, num_labels): + if label == labelmap.item[i].label: + found = True + return labelmap.item[i].display_name + assert found == True + +def create_graph(): + """Creates a graph from saved GraphDef file and returns a saver.""" + # Creates graph from saved graph_def.pb. + with gfile.FastGFile(os.path.join( + FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + with tf.device('/cpu:0'): + _ = tf.import_graph_def(graph_def, name='') + +def make_padding(padding_name, conv_shape): + if padding_name == 'VALID': + return [0, 0] + elif padding_name == 'SAME': + return [int(math.ceil(conv_shape[0]/2)), int(math.ceil(conv_shape[1]/2))] + else: + sys.exit('Invalid padding name '+padding_name) + +def dump_inputlayer(sess, net, operation='create'): + if operation == 'create': + resize = sess.graph.get_tensor_by_name('ResizeBilinear/size:0').eval() + [height, width] = resize + sub = sess.graph.get_tensor_by_name('Sub/y:0').eval() + mean = sub + if not type(mean) is list: + mean = [float(mean)] + else: + mean = [int(x) for x in mean] + mul = sess.graph.get_tensor_by_name('Mul/y:0').eval() + scale = float(mul) + net['data'] = L.Input(shape=dict(dim=[1, 3, int(height), int(width)]), transform_param=dict(mean_value=mean, scale=scale)) + +def dump_convbn(sess, net, from_layer, out_layer, operation='create'): + conv = sess.graph.get_operation_by_name(out_layer + '/Conv2D') + + weights = sess.graph.get_tensor_by_name(out_layer + '/conv2d_params:0').eval() + padding = make_padding(conv.get_attr('padding'), weights.shape) + strides = conv.get_attr('strides') + + beta = sess.graph.get_tensor_by_name(out_layer + '/batchnorm/beta:0').eval() + gamma = sess.graph.get_tensor_by_name(out_layer + '/batchnorm/gamma:0').eval() + mean = sess.graph.get_tensor_by_name(out_layer + '/batchnorm/moving_mean:0').eval() + std = sess.graph.get_tensor_by_name(out_layer + '/batchnorm/moving_variance:0').eval() + + # TF weight matrix is of order: height x width x input_channels x output_channels + # make it to caffe format: output_channels x input_channels x height x width + weights = np.transpose(weights, (3, 2, 0, 1)) + + if operation == 'create': + assert from_layer in net.keys(), '{} not in net'.format(from_layer) + + [num_output, channels, kernel_h, kernel_w] = weights.shape + [pad_h, pad_w] = padding + [stride_h, stride_w] = strides[1:3] + std_eps = 0.001 + + # parameters for convolution layer with batchnorm. + conv_prefix = '' + conv_postfix = '' + kwargs = { + 'param': [dict(lr_mult=1, decay_mult=1)], + 'weight_filler': dict(type='gaussian', std=0.01), + 'bias_term': False, + } + conv_name = '{}{}{}'.format(conv_prefix, out_layer, conv_postfix) + if kernel_h != kernel_w: + net[conv_name] = L.Convolution(net[from_layer], num_output=num_output, + kernel_h=kernel_h, kernel_w=kernel_w, pad_h=pad_h, pad_w=pad_w, + stride_h=stride_h, stride_w=stride_w, **kwargs) + else: + net[conv_name] = L.Convolution(net[from_layer], num_output=num_output, + kernel_size=kernel_h, pad=pad_h, stride=stride_h, **kwargs) + + # parameters for batchnorm layer. + bn_prefix = '' + bn_postfix = '_bn' + bn_kwargs = { + 'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)], + } + bn_name = '{}{}{}'.format(bn_prefix, conv_name, bn_postfix) + net[bn_name] = L.BatchNorm(net[conv_name], in_place=True, + batch_norm_param=dict(eps=std_eps), **bn_kwargs) + + # parameters for scale bias layer after batchnorm. + bias_prefix = '' + bias_postfix = '_bias' + bias_kwargs = { + 'param': [dict(lr_mult=1, decay_mult=0)], + 'filler': dict(type='constant', value=0.0), + } + bias_name = '{}{}{}'.format(bias_prefix, conv_name, bias_postfix) + net[bias_name] = L.Bias(net[bn_name], in_place=True, **bias_kwargs) + + # relu layer. + relu_name = '{}_relu'.format(conv_name) + net[relu_name] = L.ReLU(net[conv_name], in_place=True) + elif operation == 'save': + conv_prefix = '' + conv_postfix = '' + conv_name = '{}{}{}'.format(conv_prefix, out_layer, conv_postfix) + net.params[conv_name][0].data.flat = weights.flat + + # Copy bn parameters. + bn_prefix = '' + bn_postfix = '_bn' + bn_name = '{}{}{}'.format(bn_prefix, conv_name, bn_postfix) + net.params[bn_name][0].data.flat = mean + net.params[bn_name][1].data.flat = std + net.params[bn_name][2].data.flat = 1. + + # Copy scale parameters. + bias_prefix = '' + bias_postfix = '_bias' + bias_name = '{}{}{}'.format(bias_prefix, conv_name, bias_postfix) + net.params[bias_name][0].data.flat = beta + +def dump_pool(sess, net, from_layer, out_layer, operation='create'): + pooling = sess.graph.get_operation_by_name(out_layer) + ismax = pooling.type=='MaxPool' and 1 or 0 + ksize = pooling.get_attr('ksize') + padding = make_padding(pooling.get_attr('padding'), ksize[1:3]) + strides = pooling.get_attr('strides') + + if operation == 'create': + if ismax: + pool = P.Pooling.MAX + else: + pool = P.Pooling.AVE + assert from_layer in net.keys() + [kernel_h, kernel_w] = ksize[1:3] + [pad_h, pad_w] = padding + [stride_h, stride_w] = strides[1:3] + if kernel_h != kernel_w: + net[out_layer] = L.Pooling(net[from_layer], pool=pool, + kernel_h=kernel_h, kernel_w=kernel_w, pad_h=pad_h, pad_w=pad_w, + stride_h=stride_h, stride_w=stride_w) + else: + net[out_layer] = L.Pooling(net[from_layer], pool=pool, + kernel_size=kernel_h, pad=pad_h, stride=stride_h) + +def dump_softmax(sess, net, from_layer, out_layer, operation='create'): + softmax_w = sess.graph.get_tensor_by_name('softmax/weights:0').eval() + softmax_b = sess.graph.get_tensor_by_name('softmax/biases:0').eval() + + softmax_w = np.transpose(softmax_w, (1, 0)) + + if operation == 'create': + assert from_layer in net.keys() + kwargs = { + 'param': [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)], + 'weight_filler': dict(type='xavier'), + 'bias_filler': dict(type='constant', value=0) + } + [num_output, channels] = softmax_w.shape + net[out_layer] = L.InnerProduct(net[from_layer], num_output=num_output, **kwargs) + prob_layer = '{}_prob'.format(out_layer) + net[prob_layer] = L.Softmax(net[out_layer]) + elif operation == 'save': + net.params[out_layer][0].data.flat = softmax_w.flat + net.params[out_layer][1].data.flat = softmax_b + +def dump_tower(sess, net, from_layer, tower_name, tower_layers, operation='create'): + for tower_layer in tower_layers: + tower_layer = '{}/{}'.format(tower_name, tower_layer) + if 'pool' in tower_layer: + dump_pool(sess, net, from_layer, tower_layer, operation) + else: + dump_convbn(sess, net, from_layer, tower_layer, operation) + from_layer = tower_layer + +def dump_inception(sess, net, inception_name, tower_names, operation='create', final=True): + if operation == 'create': + towers_layers = [] + for tower_name in tower_names: + tower_name = '{}/{}'.format(inception_name, tower_name) + assert tower_name in net.keys(), tower_name + towers_layers.append(net[tower_name]) + if final: + inception_name = '{}/join'.format(inception_name) + net[inception_name] = L.Concat(*towers_layers, axis=1) + +def run_inference_on_image(image): + if not gfile.Exists(image): + tf.logging.fatal('File does not exist %s', image) + image_data = gfile.FastGFile(image).read() + + # Creates graph from saved GraphDef. + create_graph() + + # sess = tf.InteractiveSession(config=tf.ConfigProto( + # allow_soft_placement=True)) + sess = tf.InteractiveSession() + ops = sess.graph.get_operations() + for op in ops: + print(op.name) + # Run the graph until softmax + # start = datetime.now() + data_tensor = sess.graph.get_tensor_by_name('Mul:0') + softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') + data = sess.run(data_tensor, {'DecodeJpeg/contents:0': image_data}) + predictions = sess.run(softmax_tensor, + {'DecodeJpeg/contents:0': image_data}) + # time_len = datetime.now() - start + # print(time_len.microseconds / 1000) + # print predictions indices and values + predictions = np.squeeze(predictions) + top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1] + for p in top_k: + print(get_labelname(p), predictions[p]) + sess.close() + + deploy_net_file = 'models/inception_v3/inception_v3_deploy.prototxt' + model_file = 'models/inception_v3/inception_v3.caffemodel' + net = caffe.Net(deploy_net_file, model_file, caffe.TEST) + net.blobs['data'].reshape(1, 3, 299, 299) + data = data.transpose(0, 3, 1, 2) + + net.blobs['data'].data.flat = data.flat + output = net.forward() + predictions = output['softmax_prob'] + predictions = np.squeeze(predictions) + top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1] + for p in top_k: + print(get_labelname(p), predictions[p]) + +def dump_model(operation='create', redo=False): + # Creates graph from saved GraphDef. + create_graph() + sess = tf.InteractiveSession() + + # Creates caffe model. + deploy_net_file = 'models/inception_v3/inception_v3_deploy.prototxt' + model_file = 'models/inception_v3/inception_v3.caffemodel' + net = [] + + if operation == 'create' and (not os.path.exists(deploy_net_file) or redo): + net = caffe.NetSpec() + elif operation == 'save' and (not os.path.exists(model_file) or redo): + caffe.set_device(1) + caffe.set_mode_gpu() + net = caffe.Net(deploy_net_file, caffe.TEST) + else: + return + + # dump the preprocessing parameters + dump_inputlayer(sess, net, operation) + + # dump the filters + dump_convbn(sess, net, 'data', 'conv', operation) + dump_convbn(sess, net, 'conv', 'conv_1', operation) + dump_convbn(sess, net, 'conv_1', 'conv_2', operation) + dump_pool(sess, net, 'conv_2', 'pool', operation) + dump_convbn(sess, net, 'pool', 'conv_3', operation) + dump_convbn(sess, net, 'conv_3', 'conv_4', operation) + dump_pool(sess, net, 'conv_4', 'pool_1', operation) + + # inceptions with 1x1, 3x3, 5x5 convolutions + from_layer = 'pool_1' + for inception_id in xrange(0, 3): + if inception_id == 0: + out_layer = 'mixed' + else: + out_layer = 'mixed_{}'.format(inception_id) + dump_tower(sess, net, from_layer, out_layer, + ['conv'], operation) + dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), + ['conv', 'conv_1'], operation) + dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer), + ['conv', 'conv_1', 'conv_2'], operation) + dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer), + ['pool', 'conv'], operation) + dump_inception(sess, net, out_layer, + ['conv', 'tower/conv_1', 'tower_1/conv_2', 'tower_2/conv'], operation) + from_layer = '{}/join'.format(out_layer) + + # inceptions with 1x1, 3x3(in sequence) convolutions + out_layer = 'mixed_3' + dump_tower(sess, net, from_layer, out_layer, + ['conv'], operation) + dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), + ['conv', 'conv_1', 'conv_2'], operation) + dump_tower(sess, net, from_layer, out_layer, + ['pool'], operation) + dump_inception(sess, net, out_layer, + ['conv', 'tower/conv_2', 'pool'], operation) + from_layer = '{}/join'.format(out_layer) + + # inceptions with 1x1, 7x1, 1x7 convolutions + for inception_id in xrange(4, 8): + out_layer = 'mixed_{}'.format(inception_id) + dump_tower(sess, net, from_layer, out_layer, + ['conv'], operation) + dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), + ['conv', 'conv_1', 'conv_2'], operation) + dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer), + ['conv', 'conv_1', 'conv_2', 'conv_3', 'conv_4'], operation) + dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer), + ['pool', 'conv'], operation) + dump_inception(sess, net, out_layer, + ['conv', 'tower/conv_2', 'tower_1/conv_4', 'tower_2/conv'], operation) + from_layer = '{}/join'.format(out_layer) + + # inceptions with 1x1, 3x3, 1x7, 7x1 filters + out_layer = 'mixed_8' + dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), + ['conv', 'conv_1'], operation) + dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer), + ['conv', 'conv_1', 'conv_2', 'conv_3'], operation) + dump_tower(sess, net, from_layer, out_layer, + ['pool'], operation) + dump_inception(sess, net, out_layer, + ['tower/conv_1', 'tower_1/conv_3', 'pool'], operation) + from_layer = '{}/join'.format(out_layer) + + for inception_id in xrange(9, 11): + out_layer = 'mixed_{}'.format(inception_id) + dump_tower(sess, net, from_layer, out_layer, + ['conv'], operation) + dump_tower(sess, net, from_layer, '{}/tower'.format(out_layer), + ['conv'], operation) + dump_tower(sess, net, '{}/tower/conv'.format(out_layer), + '{}/tower/mixed'.format(out_layer), ['conv'], operation) + dump_tower(sess, net, '{}/tower/conv'.format(out_layer), + '{}/tower/mixed'.format(out_layer), ['conv_1'], operation) + dump_inception(sess, net, '{}/tower/mixed'.format(out_layer), + ['conv', 'conv_1'], operation, False) + dump_tower(sess, net, from_layer, '{}/tower_1'.format(out_layer), + ['conv', 'conv_1'], operation) + dump_tower(sess, net, '{}/tower_1/conv_1'.format(out_layer), + '{}/tower_1/mixed'.format(out_layer), ['conv'], operation) + dump_tower(sess, net, '{}/tower_1/conv_1'.format(out_layer), + '{}/tower_1/mixed'.format(out_layer), ['conv_1'], operation) + dump_inception(sess, net, '{}/tower_1/mixed'.format(out_layer), + ['conv', 'conv_1'], operation, False) + dump_tower(sess, net, from_layer, '{}/tower_2'.format(out_layer), + ['pool', 'conv'], operation) + dump_inception(sess, net, out_layer, + ['conv', 'tower/mixed', 'tower_1/mixed', 'tower_2/conv'], operation) + from_layer = '{}/join'.format(out_layer) + + dump_pool(sess, net, from_layer, 'pool_3', operation) + dump_softmax(sess, net, 'pool_3', 'softmax', operation) + + if operation == 'create' and (not os.path.exists(deploy_net_file) or redo): + model_dir = os.path.dirname(deploy_net_file) + if not os.path.exists(model_dir): + os.makedirs(model_dir) + with open(deploy_net_file, 'w') as f: + print('name: "inception_v3_deploy"', file=f) + print(net.to_proto(), file=f) + elif operation == 'save' and (not os.path.exists(model_file) or redo): + net.save(model_file) + sess.close() + +def maybe_download_and_extract(): + """Download and extract model tar file.""" + dest_directory = FLAGS.model_dir + if not os.path.exists(dest_directory): + os.makedirs(dest_directory) + filename = DATA_URL.split('/')[-1] + filepath = os.path.join(dest_directory, filename) + if not os.path.exists(filepath): + def _progress(count, block_size, total_size): + sys.stdout.write('\r>> Downloading %s %.1f%%' % ( + filename, float(count * block_size) / float(total_size) * 100.0)) + sys.stdout.flush() + filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, + reporthook=_progress) + print() + statinfo = os.stat(filepath) + print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.') + modelfilepath = os.path.join(dest_directory, 'classify_image_graph_def.pb') + if not os.path.exists(modelfilepath): + tarfile.open(filepath, 'r:gz').extractall(dest_directory) + +def main(_): + maybe_download_and_extract() + redo = True + operations = ['create', 'save'] + for operation in operations: + dump_model(operation, redo) + eval = True + if eval: + image = (FLAGS.image_file if FLAGS.image_file else + os.path.join(FLAGS.model_dir, 'cropped_panda.jpg')) + run_inference_on_image(image) + +if __name__ == '__main__': + tf.app.run() diff --git a/scripts/copy_notebook.py b/scripts/copy_notebook.py index e4c6385bef8..1514bd95d80 100755 --- a/scripts/copy_notebook.py +++ b/scripts/copy_notebook.py @@ -1,4 +1,40 @@ #!/usr/bin/env python +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Takes as arguments: 1. the path to a JSON file (such as an IPython notebook). diff --git a/scripts/cpp_lint.py b/scripts/cpp_lint.py index 14c76ecd6bf..5bcaab337f8 100755 --- a/scripts/cpp_lint.py +++ b/scripts/cpp_lint.py @@ -1595,10 +1595,10 @@ def CheckCaffeAlternatives(filename, clean_lines, linenum, error): def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error): """Except the base classes, Caffe DataLayer should define DataLayerSetUp instead of LayerSetUp. - + The base DataLayers define common SetUp steps, the subclasses should not override them. - + Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. @@ -1608,9 +1608,11 @@ def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] ix = line.find('DataLayer::LayerSetUp') if ix >= 0 and ( + line.find('void AnnotatedDataLayer::LayerSetUp') != -1 or line.find('void DataLayer::LayerSetUp') != -1 or line.find('void ImageDataLayer::LayerSetUp') != -1 or line.find('void MemoryDataLayer::LayerSetUp') != -1 or + line.find('void VideoDataLayer::LayerSetUp') != -1 or line.find('void WindowDataLayer::LayerSetUp') != -1): error(filename, linenum, 'caffe/data_layer_setup', 2, 'Except the base classes, Caffe DataLayer should define' @@ -1620,9 +1622,11 @@ def CheckCaffeDataLayerSetUp(filename, clean_lines, linenum, error): ix = line.find('DataLayer::DataLayerSetUp') if ix >= 0 and ( line.find('void Base') == -1 and + line.find('void AnnotatedDataLayer::DataLayerSetUp') == -1 and line.find('void DataLayer::DataLayerSetUp') == -1 and line.find('void ImageDataLayer::DataLayerSetUp') == -1 and line.find('void MemoryDataLayer::DataLayerSetUp') == -1 and + line.find('void VideoDataLayer::DataLayerSetUp') == -1 and line.find('void WindowDataLayer::DataLayerSetUp') == -1): error(filename, linenum, 'caffe/data_layer_setup', 2, 'Except the base classes, Caffe DataLayer should define' diff --git a/scripts/create_annoset.py b/scripts/create_annoset.py new file mode 100644 index 00000000000..eed11ab9556 --- /dev/null +++ b/scripts/create_annoset.py @@ -0,0 +1,167 @@ +import argparse +import os +import shutil +import subprocess +import sys + +from caffe.proto import caffe_pb2 +from google.protobuf import text_format + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Create AnnotatedDatum database") + parser.add_argument("root", + help="The root directory which contains the images and annotations.") + parser.add_argument("listfile", + help="The file which contains image paths and annotation info.") + parser.add_argument("outdir", + help="The output directory which stores the database file.") + parser.add_argument("exampledir", + help="The directory to store the link of the database files.") + parser.add_argument("--redo", default = False, action = "store_true", + help="Recreate the database.") + parser.add_argument("--anno-type", default = "classification", + help="The type of annotation {classification, detection}.") + parser.add_argument("--label-type", default = "xml", + help="The type of label file format for detection {xml, json, txt}.") + parser.add_argument("--backend", default = "lmdb", + help="The backend {lmdb, leveldb} for storing the result") + parser.add_argument("--check-size", default = False, action = "store_true", + help="Check that all the datum have the same size.") + parser.add_argument("--encode-type", default = "", + help="What type should we encode the image as ('png','jpg',...).") + parser.add_argument("--encoded", default = False, action = "store_true", + help="The encoded image will be save in datum.") + parser.add_argument("--gray", default = False, action = "store_true", + help="Treat images as grayscale ones.") + parser.add_argument("--label-map-file", default = "", + help="A file with LabelMap protobuf message.") + parser.add_argument("--min-dim", default = 0, type = int, + help="Minimum dimension images are resized to.") + parser.add_argument("--max-dim", default = 0, type = int, + help="Maximum dimension images are resized to.") + parser.add_argument("--resize-height", default = 0, type = int, + help="Height images are resized to.") + parser.add_argument("--resize-width", default = 0, type = int, + help="Width images are resized to.") + parser.add_argument("--shuffle", default = False, action = "store_true", + help="Randomly shuffle the order of images and their labels.") + parser.add_argument("--check-label", default = False, action = "store_true", + help="Check that there is no duplicated name/label.") + + args = parser.parse_args() + root_dir = args.root + list_file = args.listfile + out_dir = args.outdir + example_dir = args.exampledir + + redo = args.redo + anno_type = args.anno_type + label_type = args.label_type + backend = args.backend + check_size = args.check_size + encode_type = args.encode_type + encoded = args.encoded + gray = args.gray + label_map_file = args.label_map_file + min_dim = args.min_dim + max_dim = args.max_dim + resize_height = args.resize_height + resize_width = args.resize_width + shuffle = args.shuffle + check_label = args.check_label + + # check if root directory exists + if not os.path.exists(root_dir): + print "root directory: {} does not exist".format(root_dir) + sys.exit() + # add "/" to root directory if needed + if root_dir[-1] != "/": + root_dir += "/" + # check if list file exists + if not os.path.exists(list_file): + print "list file: {} does not exist".format(list_file) + sys.exit() + # check list file format is correct + with open(list_file, "r") as lf: + for line in lf.readlines(): + img_file, anno = line.strip("\n").split(" ") + if not os.path.exists(root_dir + img_file): + print "image file: {} does not exist".format(root_dir + img_file) + if anno_type == "classification": + if not anno.isdigit(): + print "annotation: {} is not an integer".format(anno) + elif anno_type == "detection": + if not os.path.exists(root_dir + anno): + print "annofation file: {} does not exist".format(root_dir + anno) + sys.exit() + break + # check if label map file exist + if anno_type == "detection": + if not os.path.exists(label_map_file): + print "label map file: {} does not exist".format(label_map_file) + sys.exit() + label_map = caffe_pb2.LabelMap() + lmf = open(label_map_file, "r") + try: + text_format.Merge(str(lmf.read()), label_map) + except: + print "Cannot parse label map file: {}".format(label_map_file) + sys.exit() + out_parent_dir = os.path.dirname(out_dir) + if not os.path.exists(out_parent_dir): + os.makedirs(out_parent_dir) + if os.path.exists(out_dir) and not redo: + print "{} already exists and I do not hear redo".format(out_dir) + sys.exit() + if os.path.exists(out_dir): + shutil.rmtree(out_dir) + + # get caffe root directory + caffe_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + if anno_type == "detection": + cmd = "{}/build/tools/convert_annoset" \ + " --anno_type={}" \ + " --label_type={}" \ + " --label_map_file={}" \ + " --check_label={}" \ + " --min_dim={}" \ + " --max_dim={}" \ + " --resize_height={}" \ + " --resize_width={}" \ + " --backend={}" \ + " --shuffle={}" \ + " --check_size={}" \ + " --encode_type={}" \ + " --encoded={}" \ + " --gray={}" \ + " {} {} {}" \ + .format(caffe_root, anno_type, label_type, label_map_file, check_label, + min_dim, max_dim, resize_height, resize_width, backend, shuffle, + check_size, encode_type, encoded, gray, root_dir, list_file, out_dir) + elif anno_type == "classification": + cmd = "{}/build/tools/convert_annoset" \ + " --anno_type={}" \ + " --min_dim={}" \ + " --max_dim={}" \ + " --resize_height={}" \ + " --resize_width={}" \ + " --backend={}" \ + " --shuffle={}" \ + " --check_size={}" \ + " --encode_type={}" \ + " --encoded={}" \ + " --gray={}" \ + " {} {} {}" \ + .format(caffe_root, anno_type, min_dim, max_dim, resize_height, + resize_width, backend, shuffle, check_size, encode_type, encoded, + gray, root_dir, list_file, out_dir) + print cmd + process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) + output = process.communicate()[0] + + if not os.path.exists(example_dir): + os.makedirs(example_dir) + link_dir = os.path.join(example_dir, os.path.basename(out_dir)) + if os.path.exists(link_dir): + os.unlink(link_dir) + os.symlink(out_dir, link_dir) diff --git a/scripts/deploy_docs.sh b/scripts/deploy_docs.sh index fdf97f71d3a..ab562ca4f80 100755 --- a/scripts/deploy_docs.sh +++ b/scripts/deploy_docs.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # Publish documentation to the gh-pages site. # The remote for pushing the docs (defaults to origin). diff --git a/scripts/download_model_binary.py b/scripts/download_model_binary.py index fcdbb5a91a2..e39c2b2abb0 100755 --- a/scripts/download_model_binary.py +++ b/scripts/download_model_binary.py @@ -1,4 +1,40 @@ #!/usr/bin/env python +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# import os import sys import time diff --git a/scripts/download_model_from_gist.sh b/scripts/download_model_from_gist.sh index 89527b7516f..7686e76a750 100755 --- a/scripts/download_model_from_gist.sh +++ b/scripts/download_model_from_gist.sh @@ -1,4 +1,40 @@ #!/usr/bin/env sh +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# GIST=$1 DIRNAME=${2:-./models} diff --git a/scripts/gather_examples.sh b/scripts/gather_examples.sh index 3fc726065ba..576ece21467 100755 --- a/scripts/gather_examples.sh +++ b/scripts/gather_examples.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # Assemble documentation for the project into one directory via symbolic links. # Find the docs dir, no matter where the script is called diff --git a/scripts/travis/build.sh b/scripts/travis/build.sh index bb9406f046c..e0b6c256e79 100755 --- a/scripts/travis/build.sh +++ b/scripts/travis/build.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # build the project BASEDIR=$(dirname $0) diff --git a/scripts/travis/configure-cmake.sh b/scripts/travis/configure-cmake.sh index 772f1e2ce8d..594562705f5 100644 --- a/scripts/travis/configure-cmake.sh +++ b/scripts/travis/configure-cmake.sh @@ -1,4 +1,40 @@ # CMake configuration +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# mkdir -p build cd build diff --git a/scripts/travis/configure-make.sh b/scripts/travis/configure-make.sh index ddc40fffa9d..9851b136a13 100644 --- a/scripts/travis/configure-make.sh +++ b/scripts/travis/configure-make.sh @@ -1,4 +1,40 @@ # raw Makefile configuration +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# LINE () { echo "$@" >> Makefile.config diff --git a/scripts/travis/configure.sh b/scripts/travis/configure.sh index ef740c8982e..56417569cd6 100755 --- a/scripts/travis/configure.sh +++ b/scripts/travis/configure.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # configure the project BASEDIR=$(dirname $0) diff --git a/scripts/travis/defaults.sh b/scripts/travis/defaults.sh index d69c0a7d964..80eaf1c2f50 100755 --- a/scripts/travis/defaults.sh +++ b/scripts/travis/defaults.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # set default environment variables set -e diff --git a/scripts/travis/install-deps.sh b/scripts/travis/install-deps.sh index ee16d36a7fc..4cc15dd14f1 100755 --- a/scripts/travis/install-deps.sh +++ b/scripts/travis/install-deps.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # install dependencies # (this script must be run as root) @@ -12,6 +48,7 @@ apt-get install -y --no-install-recommends \ libboost-python-dev \ libboost-system-dev \ libboost-thread-dev \ + libboost-regex-dev \ libgflags-dev \ libgoogle-glog-dev \ libhdf5-serial-dev \ diff --git a/scripts/travis/install-python-deps.sh b/scripts/travis/install-python-deps.sh index eeec302791f..0b9325dbe1c 100755 --- a/scripts/travis/install-python-deps.sh +++ b/scripts/travis/install-python-deps.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # install extra Python dependencies # (must come after setup-venv) diff --git a/scripts/travis/setup-venv.sh b/scripts/travis/setup-venv.sh index 81245f146da..07905471c6b 100755 --- a/scripts/travis/setup-venv.sh +++ b/scripts/travis/setup-venv.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # setup a Python virtualenv # (must come after install-deps) diff --git a/scripts/travis/test.sh b/scripts/travis/test.sh index fedd7e6b56e..653db9d6af1 100755 --- a/scripts/travis/test.sh +++ b/scripts/travis/test.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # test the project BASEDIR=$(dirname $0) diff --git a/scripts/upload_model_to_gist.sh b/scripts/upload_model_to_gist.sh index 3c4fd64e3fc..1106abb05eb 100755 --- a/scripts/upload_model_to_gist.sh +++ b/scripts/upload_model_to_gist.sh @@ -1,4 +1,40 @@ #!/bin/bash +# +# All modification made by Intel Corporation: © 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# # Check for valid directory DIRNAME=$1 diff --git a/src/caffe/CMakeLists.txt b/src/caffe/CMakeLists.txt index 8a80c940488..ba2f0c32df3 100644 --- a/src/caffe/CMakeLists.txt +++ b/src/caffe/CMakeLists.txt @@ -18,6 +18,10 @@ if(HAVE_CUDA) endif() add_library(caffe ${srcs}) +# Add dependency to force building MKLDNN when not given by ENV var +if(NOT DEFINED ENV{MKLDNNROOT}) +add_dependencies(caffe mkldnn) +endif() target_link_libraries(caffe proto ${Caffe_LINKER_LIBS}) caffe_default_properties(caffe) set_target_properties(caffe PROPERTIES diff --git a/src/caffe/blob.cpp b/src/caffe/blob.cpp index 4a34e4c5856..dd5546bde55 100644 --- a/src/caffe/blob.cpp +++ b/src/caffe/blob.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -24,20 +61,31 @@ void Blob::Reshape(const vector& shape) { CHECK_LE(shape.size(), kMaxBlobAxes); count_ = 1; shape_.resize(shape.size()); + +#ifndef CPU_ONLY if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) { shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int))); } int* shape_data = static_cast(shape_data_->mutable_cpu_data()); +#endif + bool actual_reshaping = false; for (int i = 0; i < shape.size(); ++i) { CHECK_GE(shape[i], 0); if (count_ != 0) { CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX"; } count_ *= shape[i]; - shape_[i] = shape[i]; + if (shape_[i] != shape[i]) { + actual_reshaping = true; + shape_[i] = shape[i]; + } +#ifndef CPU_ONLY shape_data[i] = shape[i]; +#endif } - if (count_ > capacity_) { + // We restart sync objects when there was change of shape + // requested count is bgger than current capacity + if ( (actual_reshaping == true) || (count_ > capacity_) ) { capacity_ = count_; data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype))); @@ -71,14 +119,17 @@ template Blob::Blob(const vector& shape) // capacity_ must be initialized before calling Reshape : capacity_(0) { + Reshape(shape); } +#ifndef CPU_ONLY template const int* Blob::gpu_shape() const { CHECK(shape_data_); return (const int*)shape_data_->gpu_data(); } +#endif template const Dtype* Blob::cpu_data() const { @@ -93,6 +144,12 @@ void Blob::set_cpu_data(Dtype* data) { } template +void Blob::set_cpu_diff(Dtype* diff) { + CHECK(diff); + diff_->set_cpu_data(diff); +} + +template const Dtype* Blob::gpu_data() const { CHECK(data_); return (const Dtype*)data_->gpu_data(); @@ -135,6 +192,57 @@ Dtype* Blob::mutable_gpu_diff() { } template +const Dtype* Blob::prv_data() const { + CHECK(data_); + return (const Dtype*)data_->prv_data(); +} + +template +Dtype* Blob::mutable_prv_data() { + CHECK(data_); + return static_cast(data_->mutable_prv_data()); +} + +template +const Dtype* Blob::prv_diff() const { + CHECK(diff_); + return (const Dtype*)diff_->prv_data(); +} + +template +Dtype* Blob::mutable_prv_diff() { + CHECK(diff_); + return static_cast(diff_->mutable_prv_data()); +} + + +template +void Blob::set_prv_data_descriptor(shared_ptr descriptor, + bool same_data) { + CHECK(data_); + data_->set_prv_descriptor(descriptor, same_data); +} + +template +void Blob::set_prv_diff_descriptor(shared_ptr descriptor, + bool same_data) { + CHECK(diff_); + diff_->set_prv_descriptor(descriptor, same_data); +} + +template +shared_ptr Blob::get_prv_data_descriptor() { + CHECK(data_); + return data_->prv_descriptor_; +} + +template +shared_ptr Blob::get_prv_diff_descriptor() { + CHECK(diff_); + return diff_->prv_descriptor_; +} + +template void Blob::ShareData(const Blob& other) { CHECK_EQ(count_, other.count()); data_ = other.data(); @@ -151,11 +259,23 @@ void Blob::ShareDiff(const Blob& other) { // Blob or Blob. template <> void Blob::Update() { NOT_IMPLEMENTED; } template <> void Blob::Update() { NOT_IMPLEMENTED; } +template <> void Blob::Update() { NOT_IMPLEMENTED; } template void Blob::Update() { // We will perform update based on where the data is located. switch (data_->head()) { + case SyncedMemory::SYNCED_PRV: + case SyncedMemory::HEAD_AT_PRV: + if ((diff_->head() == SyncedMemory::SYNCED_PRV) || + (diff_->head() == SyncedMemory::HEAD_AT_PRV)) { + CHECK_EQ(true, get_prv_data_descriptor()->layout_compare( + get_prv_diff_descriptor())); + caffe_axpy(prv_diff_count(), Dtype(-1), + static_cast(diff_->prv_data()), + static_cast(data_->mutable_prv_data())); + break; + } case SyncedMemory::HEAD_AT_CPU: // perform computation on CPU caffe_axpy(count_, Dtype(-1), @@ -188,10 +308,25 @@ template <> int Blob::asum_data() const { return 0; } +template <> bool Blob::asum_data() const { + NOT_IMPLEMENTED; + return 0; +} + template Dtype Blob::asum_data() const { if (!data_) { return 0; } switch (data_->head()) { + case SyncedMemory::SYNCED_PRV: + { + const Dtype* prv_ptr = prv_data(); + if (prv_ptr == NULL) + return caffe_cpu_asum(count_, cpu_data()); + else + return caffe_cpu_asum(prv_data_count(), prv_data()); + } + case SyncedMemory::HEAD_AT_PRV: + return caffe_cpu_asum(prv_data_count(), prv_data()); case SyncedMemory::HEAD_AT_CPU: return caffe_cpu_asum(count_, cpu_data()); case SyncedMemory::HEAD_AT_GPU: @@ -223,10 +358,18 @@ template <> int Blob::asum_diff() const { return 0; } +template <> bool Blob::asum_diff() const { + NOT_IMPLEMENTED; + return 0; +} + template Dtype Blob::asum_diff() const { if (!diff_) { return 0; } switch (diff_->head()) { + case SyncedMemory::SYNCED_PRV: + case SyncedMemory::HEAD_AT_PRV: + return caffe_cpu_asum( prv_diff_count(), prv_diff()); case SyncedMemory::HEAD_AT_CPU: return caffe_cpu_asum(count_, cpu_diff()); case SyncedMemory::HEAD_AT_GPU: @@ -258,12 +401,22 @@ template <> int Blob::sumsq_data() const { return 0; } +template <> bool Blob::sumsq_data() const { + NOT_IMPLEMENTED; + return 0; +} + template Dtype Blob::sumsq_data() const { Dtype sumsq; const Dtype* data; if (!data_) { return 0; } switch (data_->head()) { + case SyncedMemory::SYNCED_PRV: + case SyncedMemory::HEAD_AT_PRV: + data = prv_data(); + sumsq = caffe_cpu_dot(prv_data_count(), data, data); + break; case SyncedMemory::HEAD_AT_CPU: data = cpu_data(); sumsq = caffe_cpu_dot(count_, data, data); @@ -295,12 +448,22 @@ template <> int Blob::sumsq_diff() const { return 0; } +template <> bool Blob::sumsq_diff() const { + NOT_IMPLEMENTED; + return 0; +} + template Dtype Blob::sumsq_diff() const { Dtype sumsq; const Dtype* diff; if (!diff_) { return 0; } switch (diff_->head()) { + case SyncedMemory::SYNCED_PRV: + case SyncedMemory::HEAD_AT_PRV: + diff = prv_diff(); + sumsq = caffe_cpu_dot(prv_diff_count(), diff, diff); + break; case SyncedMemory::HEAD_AT_CPU: diff = cpu_diff(); sumsq = caffe_cpu_dot(count_, diff, diff); @@ -317,7 +480,7 @@ Dtype Blob::sumsq_diff() const { case SyncedMemory::UNINITIALIZED: return 0; default: - LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head(); + LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head(); } return sumsq; } @@ -330,11 +493,20 @@ template <> void Blob::scale_data(int scale_factor) { NOT_IMPLEMENTED; } +template <> void Blob::scale_data(bool scale_factor) { + NOT_IMPLEMENTED; +} + template void Blob::scale_data(Dtype scale_factor) { Dtype* data; if (!data_) { return; } switch (data_->head()) { + case SyncedMemory::SYNCED_PRV: + case SyncedMemory::HEAD_AT_PRV: + data = mutable_prv_data(); + caffe_scal(prv_data_count(), scale_factor, data); + break; case SyncedMemory::HEAD_AT_CPU: data = mutable_cpu_data(); caffe_scal(count_, scale_factor, data); @@ -363,11 +535,20 @@ template <> void Blob::scale_diff(int scale_factor) { NOT_IMPLEMENTED; } +template <> void Blob::scale_diff(bool scale_factor) { + NOT_IMPLEMENTED; +} + template void Blob::scale_diff(Dtype scale_factor) { Dtype* diff; if (!diff_) { return; } switch (diff_->head()) { + case SyncedMemory::SYNCED_PRV: + case SyncedMemory::HEAD_AT_PRV: + diff = mutable_prv_diff(); + caffe_scal(prv_diff_count(), scale_factor, diff); + break; case SyncedMemory::HEAD_AT_CPU: diff = mutable_cpu_diff(); caffe_scal(count_, scale_factor, diff); @@ -536,8 +717,9 @@ void Blob::ToProto(BlobProto* proto, bool write_diff) const { } INSTANTIATE_CLASS(Blob); +template class Blob; template class Blob; +template class Blob; template class Blob; } // namespace caffe - diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index dee681654aa..3d1a483250e 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include @@ -7,6 +44,10 @@ #include "caffe/common.hpp" #include "caffe/util/rng.hpp" +#ifdef _OPENMP +#include +#endif + namespace caffe { // Make sure each thread can have different values. @@ -21,6 +62,9 @@ Caffe& Caffe::Get() { // random seeding int64_t cluster_seedgen(void) { +#ifdef DETERMINISTIC + return 4013; +#else int64_t s, seed, pid; FILE* f = fopen("/dev/urandom", "rb"); if (f && fread(&seed, 1, sizeof(seed), f) == sizeof(seed)) { @@ -37,6 +81,7 @@ int64_t cluster_seedgen(void) { s = time(NULL); seed = std::abs(((s * 181) * ((pid - 83) * 359)) % 104729); return seed; +#endif } @@ -53,7 +98,7 @@ void GlobalInit(int* pargc, char*** pargv) { Caffe::Caffe() : random_generator_(), mode_(Caffe::CPU), - solver_count_(1), root_solver_(true) { } + solver_count_(1), root_solver_(true), iter_size_(1) { } Caffe::~Caffe() { } @@ -106,7 +151,7 @@ void* Caffe::RNG::generator() { Caffe::Caffe() : cublas_handle_(NULL), curand_generator_(NULL), random_generator_(), - mode_(Caffe::CPU), solver_count_(1), root_solver_(true) { + mode_(Caffe::CPU), solver_count_(1), root_solver_(true), iter_size_(1) { // Try to create a cublas handler, and report an error if failed (but we will // keep the program running as one might just want to run CPU code). if (cublasCreate(&cublas_handle_) != CUBLAS_STATUS_SUCCESS) { @@ -122,10 +167,16 @@ Caffe::Caffe() } Caffe::~Caffe() { +#ifdef _OPENMP + if (0 == omp_get_thread_num()) { +#endif if (cublas_handle_) CUBLAS_CHECK(cublasDestroy(cublas_handle_)); if (curand_generator_) { CURAND_CHECK(curandDestroyGenerator(curand_generator_)); } +#ifdef _OPENMP + } +#endif } void Caffe::set_random_seed(const unsigned int seed) { diff --git a/src/caffe/data_reader.cpp b/src/caffe/data_reader.cpp index 9f019bbfcb7..69e8f20f5ae 100644 --- a/src/caffe/data_reader.cpp +++ b/src/caffe/data_reader.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include @@ -7,7 +44,7 @@ #include "caffe/data_reader.hpp" #include "caffe/layers/data_layer.hpp" #include "caffe/proto/caffe.pb.h" - +#include "caffe/multinode/mlsl.hpp" namespace caffe { using boost::weak_ptr; @@ -44,12 +81,12 @@ DataReader::~DataReader() { DataReader::QueuePair::QueuePair(int size) { // Initialize the free queue with requested number of datums for (int i = 0; i < size; ++i) { - free_.push(new Datum()); + free_.push(new string("empty buffer")); } } DataReader::QueuePair::~QueuePair() { - Datum* datum; + string* datum; while (free_.try_pop(&datum)) { delete datum; } @@ -62,7 +99,7 @@ DataReader::QueuePair::~QueuePair() { DataReader::Body::Body(const LayerParameter& param) : param_(param), - new_queue_pairs_() { + new_queue_pairs_(), first_read_(true) { StartInternalThread(); } @@ -71,9 +108,13 @@ DataReader::Body::~Body() { } void DataReader::Body::InternalThreadEntry() { - shared_ptr db(db::GetDB(param_.data_param().backend())); - db->Open(param_.data_param().source(), db::READ); - shared_ptr cursor(db->NewCursor()); + const caffe::DataParameter *data_param = ¶m_.data_param(); + CHECK(data_param) << "Failed to obtain data_param"; + + shared_ptr dbw(data_param->shuffle() ? + static_cast(new DBShuffle(param_)): + static_cast(new DBSequential(param_))); + vector > qps; try { int solver_count = param_.phase() == TRAIN ? Caffe::solver_count() : 1; @@ -83,13 +124,13 @@ void DataReader::Body::InternalThreadEntry() { // so read one item, then wait for the next solver. for (int i = 0; i < solver_count; ++i) { shared_ptr qp(new_queue_pairs_.pop()); - read_one(cursor.get(), qp.get()); + read_one(dbw.get(), qp.get()); qps.push_back(qp); } // Main loop while (!must_stop()) { for (int i = 0; i < solver_count; ++i) { - read_one(cursor.get(), qps[i].get()); + read_one(dbw.get(), qps[i].get()); } // Check no additional readers have been created. This can happen if // more than one net is trained at a time per process, whether single @@ -102,13 +143,82 @@ void DataReader::Body::InternalThreadEntry() { } } -void DataReader::Body::read_one(db::Cursor* cursor, QueuePair* qp) { - Datum* datum = qp->free_.pop(); +void DataReader::Body::read_one(DBWrapper* dbw, QueuePair* qp) { + CHECK(dbw); + CHECK(qp); + +#ifdef USE_MLSL + string* data = qp->free_.pop(); + if(first_read_) { /* move each node’s file position to its node ID – this part can be move to the initialization */ + for(int i=0;iNext(); + } + first_read_ = false; + } + *data = dbw->value(); + qp->full_.push(data); + for(int i=0;iNext(); + } +#else + string* data = qp->free_.pop(); // TODO deserialize in-place instead of copy? - datum->ParseFromString(cursor->value()); - qp->full_.push(datum); + *data = dbw->value(); + qp->full_.push(data); + + dbw->Next(); +#endif +} + + - // go to the next iter +DataReader::DBWrapper::DBWrapper(const LayerParameter& param) { + db.reset(db::GetDB(param.data_param().backend())); + db->Open(param.data_param().source(), db::READ); + cursor.reset(db->NewCursor()); +} + +DataReader::DBShuffle::DBShuffle(const LayerParameter& param):DBWrapper(param) { + CHECK(param.data_param().backend() != DataParameter_DB_LEVELDB) + << "LevelDB doesn't support shuffle"; + while (cursor->valid()) { + image_pointers_.push_back(cursor->valuePointer()); + cursor->Next(); + } + CHECK(!image_pointers_.empty()); + current_image_ = image_pointers_.begin(); + + // randomly shuffle data + LOG(INFO) << "Shuffling data"; +#ifdef USE_MLSL + mn::Distribution * distrib = mn::get_distrib(); + float fetch_seed; + fetch_seed = static_cast(caffe_rng_rand() % 15); + distrib->bcast(&fetch_seed, 1); + LOG(INFO) << "Random seed for shuffling: " << fetch_seed; + prefetch_rng_.reset(new Caffe::RNG(static_cast(fetch_seed))); +#else + const unsigned int prefetch_rng_seed = caffe_rng_rand(); + prefetch_rng_.reset(new Caffe::RNG(prefetch_rng_seed)); +#endif + ShuffleImages(); +} + +void DataReader::DBShuffle::Next() { + current_image_++; + if (current_image_ == image_pointers_.end()) { + ShuffleImages(); + current_image_ = image_pointers_.begin(); + } +} + +void DataReader::DBShuffle::ShuffleImages() { + caffe::rng_t* prefetch_rng = + static_cast(prefetch_rng_->generator()); + shuffle(image_pointers_.begin(), image_pointers_.end(), prefetch_rng); +} + +void DataReader::DBSequential::Next() { cursor->Next(); if (!cursor->valid()) { DLOG(INFO) << "Restarting data prefetching from start."; @@ -116,4 +226,6 @@ void DataReader::Body::read_one(db::Cursor* cursor, QueuePair* qp) { } } + + } // namespace caffe diff --git a/src/caffe/data_transformer.cpp b/src/caffe/data_transformer.cpp index 7189d67e289..1eadefd78bb 100644 --- a/src/caffe/data_transformer.cpp +++ b/src/caffe/data_transformer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include #endif // USE_OPENCV @@ -5,17 +42,18 @@ #include #include +#include "caffe/data_reader.hpp" #include "caffe/data_transformer.hpp" +#include "caffe/util/bbox_util.hpp" +#include "caffe/util/im_transforms.hpp" #include "caffe/util/io.hpp" -#include "caffe/util/math_functions.hpp" -#include "caffe/util/rng.hpp" namespace caffe { template DataTransformer::DataTransformer(const TransformationParameter& param, Phase phase) - : param_(param), phase_(phase) { + : param_(param), phase_(phase), data_reader_used(NULL) { // check if we want to use mean_file if (param_.has_mean_file()) { CHECK_EQ(param_.mean_value_size(), 0) << @@ -36,22 +74,132 @@ DataTransformer::DataTransformer(const TransformationParameter& param, mean_values_.push_back(param_.mean_value(c)); } } + if (param_.has_resize_param()) { + CHECK_GT(param_.resize_param().height(), 0); + CHECK_GT(param_.resize_param().width(), 0); + } + if (param_.has_expand_param()) { + CHECK_GT(param_.expand_param().max_expand_ratio(), 1.); + } +} + +template +void DataTransformer::Transform(const Datum& datum, Dtype* transformed_data, + NormalizedBBox* crop_bbox, RandNumbers& rand_num, + const bool do_mirror, const bool has_uint8, + const bool has_mean_file, const bool has_mean_values) +{ + int transform_func_id = (do_mirror << 2) + + (has_mean_file << 1) + + has_mean_values; + + if (!has_uint8) { + switch (transform_func_id) { + case 0: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 1: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 2: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 3: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 4: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 5: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 6: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 7: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + } + } else { + switch (transform_func_id) { + case 0: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 1: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 2: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 3: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 4: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 5: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 6: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + case 7: Transform(datum, transformed_data, + crop_bbox, rand_num); break; + } + } +} + +namespace { + // Based on the path we're in (detection or classification), perform transformations on + // annotations. + template + void call_annotation_handler(AnnotationHandler& anno_handler, const bool do_resize, const bool do_mirror) + { + anno_handler(do_resize, do_mirror); + } + + template<> + void call_annotation_handler(EmptyType&, const bool, const bool) + { + } } template +template void DataTransformer::Transform(const Datum& datum, - Dtype* transformed_data) { + Dtype* transformed_data, + NormalizedBBox* crop_bbox, + RandNumbers& rand_num, + AnnotationHandler anno_handler) +{ + const bool do_mirror = param_.mirror() && rand_num(2); const string& data = datum.data(); - const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); + const bool has_uint8 = data.size() > 0; + const bool has_mean_file = param_.has_mean_file(); + const bool has_mean_values = mean_values_.size() > 0; + + Transform(datum, transformed_data, crop_bbox, rand_num, + do_mirror, has_uint8, has_mean_file, has_mean_values); + + call_annotation_handler(anno_handler, /* do_resize */ true, do_mirror); +} + +template +template +void DataTransformer::Transform(const Datum& datum_in, + Dtype* transformed_data, + NormalizedBBox* crop_bbox, + RandNumbers& rand_num) { + const Datum *datum = &datum_in; + Datum resized_datum; + if (param_.has_random_resize_param()) { +#ifdef USE_OPENCV + RandomResizeImage(datum_in, &resized_datum); + datum = &resized_datum; +#else + LOG(FATAL) << "Random image resizing requires OpenCV; compile with USE_OPENCV."; +#endif + } else if (param_.has_random_aspect_ratio_param()) { +#ifdef USE_OPENCV + RandomAlterAspectRatio(datum_in, &resized_datum); + datum = &resized_datum; +#else + LOG(FATAL) << "Aspect ratio changes require OpenCV; compile with USE_OPENCV."; +#endif + } + const string& data = datum->data(); + const int datum_channels = datum->channels(); + const int datum_height = datum->height(); + const int datum_width = datum->width(); const int crop_size = param_.crop_size(); const Dtype scale = param_.scale(); - const bool do_mirror = param_.mirror() && Rand(2); - const bool has_mean_file = param_.has_mean_file(); - const bool has_uint8 = data.size() > 0; - const bool has_mean_values = mean_values_.size() > 0; CHECK_GT(datum_channels, 0); CHECK_GE(datum_height, crop_size); @@ -85,14 +233,20 @@ void DataTransformer::Transform(const Datum& datum, width = crop_size; // We only do random crop when we do training. if (phase_ == TRAIN) { - h_off = Rand(datum_height - crop_size + 1); - w_off = Rand(datum_width - crop_size + 1); + h_off = rand_num(datum_height - crop_size + 1); + w_off = rand_num(datum_width - crop_size + 1); } else { h_off = (datum_height - crop_size) / 2; w_off = (datum_width - crop_size) / 2; } } + // Return the normalized crop bbox. + crop_bbox->set_xmin(Dtype(w_off) / datum_width); + crop_bbox->set_ymin(Dtype(h_off) / datum_height); + crop_bbox->set_xmax(Dtype(w_off + width) / datum_width); + crop_bbox->set_ymax(Dtype(h_off + height) / datum_height); + Dtype datum_element; int top_index, data_index; for (int c = 0; c < datum_channels; ++c) { @@ -108,7 +262,7 @@ void DataTransformer::Transform(const Datum& datum, datum_element = static_cast(static_cast(data[data_index])); } else { - datum_element = datum.float_data(data_index); + datum_element = datum->float_data(data_index); } if (has_mean_file) { transformed_data[top_index] = @@ -126,10 +280,29 @@ void DataTransformer::Transform(const Datum& datum, } } +template +void DataTransformer::GenerateRandNumbers(PreclcRandomNumbers& rn, bool sample_bboxes) { + int count = (sample_bboxes ? 1 : 0) + (param_.mirror()? 1:0) + + ((phase_ == TRAIN && param_.crop_size())? 2 : 0); + rn.FillRandomNumbers(count, rand_num_); +} template void DataTransformer::Transform(const Datum& datum, - Blob* transformed_blob) { + Dtype* transformed_data, + RandNumbers& rand_num) { + NormalizedBBox crop_bbox; + Transform(datum, transformed_data, &crop_bbox, rand_num); +} + +template +template +void DataTransformer::Transform(const Datum& datum, + Blob* transformed_blob, + NormalizedBBox* crop_bbox, + RandNumbers& rand_num, + AnnotationHandler anno_handler) +{ // If datum is encoded, decoded and transform the cv::image. if (datum.encoded()) { #ifdef USE_OPENCV @@ -143,7 +316,7 @@ void DataTransformer::Transform(const Datum& datum, cv_img = DecodeDatumToCVMatNative(datum); } // Transform the cv::image into blob. - return Transform(cv_img, transformed_blob); + return Transform(cv_img, transformed_blob, crop_bbox, rand_num, anno_handler); #else LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; #endif // USE_OPENCV @@ -178,7 +351,14 @@ void DataTransformer::Transform(const Datum& datum, } Dtype* transformed_data = transformed_blob->mutable_cpu_data(); - Transform(datum, transformed_data); + Transform(datum, transformed_data, crop_bbox, rand_num, anno_handler); +} + +template +void DataTransformer::Transform(const Datum& datum, + Blob* transformed_blob, RandNumbers& rand_num) { + NormalizedBBox crop_bbox; + Transform(datum, transformed_blob, &crop_bbox, rand_num); } template @@ -201,6 +381,336 @@ void DataTransformer::Transform(const vector & datum_vector, } } +template +void DataTransformer::Transform(const AnnotatedDatum& anno_datum, + Blob* transformed_blob, + RepeatedPtrField* transformed_anno_group_all, + RandNumbers& rand_num) { + // Transform datum. + const Datum& datum = anno_datum.datum(); + NormalizedBBox crop_bbox; + + // We need to call TransformAnnotation after do_mirror is set, based on precalculated + // values from RNG. RNG generates only one value for do_mirror, so the variable + // can be set only once. Otherwise, RNG's queue will be empty. + auto transform_annotation = [&](const bool do_resize, const bool do_mirror) -> void { + TransformAnnotation(anno_datum, do_resize, crop_bbox, do_mirror, + transformed_anno_group_all); + }; + + Transform(datum, transformed_blob, &crop_bbox, rand_num, transform_annotation); +} + +template +void DataTransformer::Transform(const AnnotatedDatum& anno_datum, + Blob* transformed_blob, + vector* transformed_anno_vec, + RandNumbers& rand_num) { + RepeatedPtrField transformed_anno_group_all; + Transform(anno_datum, transformed_blob, &transformed_anno_group_all, rand_num); + + for (int g = 0; g < transformed_anno_group_all.size(); ++g) { + transformed_anno_vec->push_back(transformed_anno_group_all.Get(g)); + } +} + +template +void DataTransformer::Transform(const AnnotatedDatum& anno_datum, + Blob* transformed_blob, + vector* transformed_anno_vec) { + Transform(anno_datum, transformed_blob, transformed_anno_vec, rand_num_); +} + +template +//template +void DataTransformer::TransformAnnotation(const AnnotatedDatum& anno_datum, + const bool do_resize, + const NormalizedBBox& crop_bbox, + const bool do_mirror, + RepeatedPtrField* transformed_anno_group_all) { + const int img_height = anno_datum.datum().height(); + const int img_width = anno_datum.datum().width(); + if (anno_datum.type() == AnnotatedDatum_AnnotationType_BBOX) { + // Go through each AnnotationGroup. + for (int g = 0; g < anno_datum.annotation_group_size(); ++g) { + const AnnotationGroup& anno_group = anno_datum.annotation_group(g); + AnnotationGroup transformed_anno_group; + // Go through each Annotation. + bool has_valid_annotation = false; + for (int a = 0; a < anno_group.annotation_size(); ++a) { + const Annotation& anno = anno_group.annotation(a); + const NormalizedBBox& bbox = anno.bbox(); + // Adjust bounding box annotation. + NormalizedBBox resize_bbox = bbox; + if (do_resize && param_.has_resize_param()) { + CHECK_GT(img_height, 0); + CHECK_GT(img_width, 0); + UpdateBBoxByResizePolicy(param_.resize_param(), img_width, img_height, + &resize_bbox); + } + if (param_.has_emit_constraint() && + !MeetEmitConstraint(crop_bbox, resize_bbox, + param_.emit_constraint())) { + continue; + } + NormalizedBBox proj_bbox; + if (ProjectBBox(crop_bbox, resize_bbox, &proj_bbox)) { + has_valid_annotation = true; + Annotation* transformed_anno = + transformed_anno_group.add_annotation(); + transformed_anno->set_instance_id(anno.instance_id()); + NormalizedBBox* transformed_bbox = transformed_anno->mutable_bbox(); + transformed_bbox->CopyFrom(proj_bbox); + if (do_mirror) { + Dtype temp = transformed_bbox->xmin(); + transformed_bbox->set_xmin(1 - transformed_bbox->xmax()); + transformed_bbox->set_xmax(1 - temp); + } + if (do_resize && param_.has_resize_param()) { + ExtrapolateBBox(param_.resize_param(), img_height, img_width, + crop_bbox, transformed_bbox); + } + } + } + // Save for output. + if (has_valid_annotation) { + transformed_anno_group.set_group_label(anno_group.group_label()); + transformed_anno_group_all->Add()->CopyFrom(transformed_anno_group); + } + } + } else { + LOG(FATAL) << "Unknown annotation type."; + } +} + +template +void DataTransformer::CropImage(const Datum& datum, + const NormalizedBBox& bbox, + Datum* crop_datum) { + // If datum is encoded, decode and crop the cv::image. + if (datum.encoded()) { +#ifdef USE_OPENCV + CHECK(!(param_.force_color() && param_.force_gray())) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // Crop the image. + cv::Mat crop_img; + CropImage(cv_img, bbox, &crop_img); + // Save the image into datum. + EncodeCVMatToDatum(crop_img, "jpg", crop_datum); + crop_datum->set_label(datum.label()); + return; +#else + LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; +#endif // USE_OPENCV + } else { + if (param_.force_color() || param_.force_gray()) { + LOG(ERROR) << "force_color and force_gray only for encoded datum"; + } + } + + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + + // Get the bbox dimension. + NormalizedBBox clipped_bbox; + ClipBBox(bbox, &clipped_bbox); + NormalizedBBox scaled_bbox; + ScaleBBox(clipped_bbox, datum_height, datum_width, &scaled_bbox); + const int w_off = static_cast(scaled_bbox.xmin()); + const int h_off = static_cast(scaled_bbox.ymin()); + const int width = static_cast(scaled_bbox.xmax() - scaled_bbox.xmin()); + const int height = static_cast(scaled_bbox.ymax() - scaled_bbox.ymin()); + + // Crop the image using bbox. + crop_datum->set_channels(datum_channels); + crop_datum->set_height(height); + crop_datum->set_width(width); + crop_datum->set_label(datum.label()); + crop_datum->clear_data(); + crop_datum->clear_float_data(); + crop_datum->set_encoded(false); + const int crop_datum_size = datum_channels * height * width; + const std::string& datum_buffer = datum.data(); + std::string buffer(crop_datum_size, ' '); + for (int h = h_off; h < h_off + height; ++h) { + for (int w = w_off; w < w_off + width; ++w) { + for (int c = 0; c < datum_channels; ++c) { + int datum_index = (c * datum_height + h) * datum_width + w; + int crop_datum_index = (c * height + h - h_off) * width + w - w_off; + buffer[crop_datum_index] = datum_buffer[datum_index]; + } + } + } + crop_datum->set_data(buffer); +} + +template +void DataTransformer::CropImage(const AnnotatedDatum& anno_datum, + const NormalizedBBox& bbox, + AnnotatedDatum* cropped_anno_datum) { + // Crop the datum. + CropImage(anno_datum.datum(), bbox, cropped_anno_datum->mutable_datum()); + cropped_anno_datum->set_type(anno_datum.type()); + + // Transform the annotation according to crop_bbox. + const bool do_resize = false; + const bool do_mirror = false; + NormalizedBBox crop_bbox; + ClipBBox(bbox, &crop_bbox); + TransformAnnotation(anno_datum, do_resize, crop_bbox, do_mirror, + cropped_anno_datum->mutable_annotation_group()); +} + +template +void DataTransformer::ExpandImage(const Datum& datum, + const float expand_ratio, + NormalizedBBox* expand_bbox, + Datum* expand_datum) { + // If datum is encoded, decode and crop the cv::image. + if (datum.encoded()) { +#ifdef USE_OPENCV + CHECK(!(param_.force_color() && param_.force_gray())) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // Expand the image. + cv::Mat expand_img; + ExpandImage(cv_img, expand_ratio, expand_bbox, &expand_img); + // Save the image into datum. + EncodeCVMatToDatum(expand_img, "jpg", expand_datum); + expand_datum->set_label(datum.label()); + return; +#else + LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; +#endif // USE_OPENCV + } else { + if (param_.force_color() || param_.force_gray()) { + LOG(ERROR) << "force_color and force_gray only for encoded datum"; + } + } + + const int datum_channels = datum.channels(); + const int datum_height = datum.height(); + const int datum_width = datum.width(); + + // Get the bbox dimension. + int height = static_cast(datum_height * expand_ratio); + int width = static_cast(datum_width * expand_ratio); + float h_off, w_off; + caffe_rng_uniform(1, 0.f, static_cast(height - datum_height), &h_off); + caffe_rng_uniform(1, 0.f, static_cast(width - datum_width), &w_off); + h_off = floor(h_off); + w_off = floor(w_off); + expand_bbox->set_xmin(-w_off/datum_width); + expand_bbox->set_ymin(-h_off/datum_height); + expand_bbox->set_xmax((width - w_off)/datum_width); + expand_bbox->set_ymax((height - h_off)/datum_height); + + // Crop the image using bbox. + expand_datum->set_channels(datum_channels); + expand_datum->set_height(height); + expand_datum->set_width(width); + expand_datum->set_label(datum.label()); + expand_datum->clear_data(); + expand_datum->clear_float_data(); + expand_datum->set_encoded(false); + const int expand_datum_size = datum_channels * height * width; + const std::string& datum_buffer = datum.data(); + std::string buffer(expand_datum_size, ' '); + for (int h = h_off; h < h_off + datum_height; ++h) { + for (int w = w_off; w < w_off + datum_width; ++w) { + for (int c = 0; c < datum_channels; ++c) { + int datum_index = + (c * datum_height + h - h_off) * datum_width + w - w_off; + int expand_datum_index = (c * height + h) * width + w; + buffer[expand_datum_index] = datum_buffer[datum_index]; + } + } + } + expand_datum->set_data(buffer); +} + +template +void DataTransformer::ExpandImage(const AnnotatedDatum& anno_datum, + AnnotatedDatum* expanded_anno_datum) { + if (!param_.has_expand_param()) { + expanded_anno_datum->CopyFrom(anno_datum); + return; + } + const ExpansionParameter& expand_param = param_.expand_param(); + const float expand_prob = expand_param.prob(); + float prob; + caffe_rng_uniform(1, 0.f, 1.f, &prob); + if (prob > expand_prob) { + expanded_anno_datum->CopyFrom(anno_datum); + return; + } + const float max_expand_ratio = expand_param.max_expand_ratio(); + if (fabs(max_expand_ratio - 1.) < 1e-2) { + expanded_anno_datum->CopyFrom(anno_datum); + return; + } + float expand_ratio; + caffe_rng_uniform(1, 1.f, max_expand_ratio, &expand_ratio); + // Expand the datum. + NormalizedBBox expand_bbox; + ExpandImage(anno_datum.datum(), expand_ratio, &expand_bbox, + expanded_anno_datum->mutable_datum()); + expanded_anno_datum->set_type(anno_datum.type()); + + // Transform the annotation according to crop_bbox. + const bool do_resize = false; + const bool do_mirror = false; + TransformAnnotation(anno_datum, do_resize, expand_bbox, do_mirror, + expanded_anno_datum->mutable_annotation_group()); +} + +template +void DataTransformer::DistortImage(const Datum& datum, + Datum* distort_datum) { + if (!param_.has_distort_param()) { + distort_datum->CopyFrom(datum); + return; + } + // If datum is encoded, decode and crop the cv::image. + if (datum.encoded()) { +#ifdef USE_OPENCV + CHECK(!(param_.force_color() && param_.force_gray())) + << "cannot set both force_color and force_gray"; + cv::Mat cv_img; + if (param_.force_color() || param_.force_gray()) { + // If force_color then decode in color otherwise decode in gray. + cv_img = DecodeDatumToCVMat(datum, param_.force_color()); + } else { + cv_img = DecodeDatumToCVMatNative(datum); + } + // Distort the image. + cv::Mat distort_img = ApplyDistort(cv_img, param_.distort_param()); + // Save the image into datum. + EncodeCVMatToDatum(distort_img, "jpg", distort_datum); + distort_datum->set_label(datum.label()); + return; +#else + LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; +#endif // USE_OPENCV + } else { + LOG(ERROR) << "Only support encoded datum now"; + } +} + #ifdef USE_OPENCV template void DataTransformer::Transform(const vector & mat_vector, @@ -223,12 +733,67 @@ void DataTransformer::Transform(const vector & mat_vector, } template +template void DataTransformer::Transform(const cv::Mat& cv_img, - Blob* transformed_blob) { + Blob* transformed_blob, + NormalizedBBox* crop_bbox, + RandNumbers& rand_num, + AnnotationHandler anno_handler) +{ + const bool do_mirror = param_.mirror() && rand_num(2); + const bool has_mean_file = param_.has_mean_file(); + const bool has_mean_values = mean_values_.size() > 0; + + int transform_func_id = (do_mirror << 2) + + (has_mean_file << 1) + + has_mean_values; + + switch (transform_func_id) { + case 0: Transform(cv_img, transformed_blob, crop_bbox, rand_num); + break; + case 1: Transform(cv_img, transformed_blob, crop_bbox, rand_num); + break; + case 2: Transform(cv_img, transformed_blob, crop_bbox, rand_num); + break; + case 3: Transform(cv_img, transformed_blob, crop_bbox, rand_num); + break; + case 4: Transform(cv_img, transformed_blob, crop_bbox, rand_num); + break; + case 5: Transform(cv_img, transformed_blob, crop_bbox, rand_num); + break; + case 6: Transform(cv_img, transformed_blob, crop_bbox, rand_num); + break; + case 7: Transform(cv_img, transformed_blob, crop_bbox, rand_num); + break; + } + + // const bool do_resize = true; + call_annotation_handler(anno_handler, /* do_resize*/ true, do_mirror); +} + +template +template +void DataTransformer::Transform(const cv::Mat& cv_img_in, + Blob* transformed_blob, NormalizedBBox* crop_bbox, RandNumbers& rand_num) { + const cv::Mat *cv_img = &cv_img_in; + cv::Mat resized_img; + if (param_.has_random_resize_param()) { +#ifdef USE_OPENCV + RandomResizeImage(cv_img_in, &resized_img); + cv_img = &resized_img; +#else + LOG(FATAL) << "Random image resizing requires OpenCV; compile with USE_OPENCV."; +#endif + } else if (param_.has_random_aspect_ratio_param()) { +#ifdef USE_OPENCV + RandomAlterAspectRatio(cv_img_in, &resized_img); + cv_img = &resized_img; +#else + LOG(FATAL) << "Aspect ratio changes require OpenCV; compile with USE_OPENCV."; +#endif + } const int crop_size = param_.crop_size(); - const int img_channels = cv_img.channels(); - const int img_height = cv_img.rows; - const int img_width = cv_img.cols; + const int img_channels = cv_img->channels(); // Check dimensions. const int channels = transformed_blob->channels(); @@ -237,31 +802,22 @@ void DataTransformer::Transform(const cv::Mat& cv_img, const int num = transformed_blob->num(); CHECK_EQ(channels, img_channels); - CHECK_LE(height, img_height); - CHECK_LE(width, img_width); CHECK_GE(num, 1); - CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; + CHECK(cv_img->depth() == CV_8U) << "Image data type must be unsigned byte"; const Dtype scale = param_.scale(); - const bool do_mirror = param_.mirror() && Rand(2); - const bool has_mean_file = param_.has_mean_file(); - const bool has_mean_values = mean_values_.size() > 0; CHECK_GT(img_channels, 0); - CHECK_GE(img_height, crop_size); - CHECK_GE(img_width, crop_size); Dtype* mean = NULL; if (has_mean_file) { CHECK_EQ(img_channels, data_mean_.channels()); - CHECK_EQ(img_height, data_mean_.height()); - CHECK_EQ(img_width, data_mean_.width()); mean = data_mean_.mutable_cpu_data(); } if (has_mean_values) { CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << - "Specify either 1 mean_value or as many as channels: " << img_channels; + "Specify either 1 mean_value or as many as channels: " << img_channels; if (img_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < img_channels; ++c) { @@ -269,28 +825,48 @@ void DataTransformer::Transform(const cv::Mat& cv_img, } } } + cv::Mat cv_resized_img, cv_noised_img; + if (param_.has_resize_param()) { + cv_resized_img = ApplyResize(*cv_img, param_.resize_param()); + } else { + cv_resized_img = *cv_img; + } + if (param_.has_noise_param()) { + cv_noised_img = ApplyNoise(cv_resized_img, param_.noise_param()); + } else { + cv_noised_img = cv_resized_img; + } + int img_height = cv_noised_img.rows; + int img_width = cv_noised_img.cols; + CHECK_GE(img_height, crop_size); + CHECK_GE(img_width, crop_size); int h_off = 0; int w_off = 0; - cv::Mat cv_cropped_img = cv_img; + cv::Mat cv_cropped_img = *cv_img; if (crop_size) { CHECK_EQ(crop_size, height); CHECK_EQ(crop_size, width); // We only do random crop when we do training. if (phase_ == TRAIN) { - h_off = Rand(img_height - crop_size + 1); - w_off = Rand(img_width - crop_size + 1); + h_off = rand_num(img_height - crop_size + 1); + w_off = rand_num(img_width - crop_size + 1); } else { h_off = (img_height - crop_size) / 2; w_off = (img_width - crop_size) / 2; } cv::Rect roi(w_off, h_off, crop_size, crop_size); - cv_cropped_img = cv_img(roi); + cv_cropped_img = (*cv_img)(roi); } else { - CHECK_EQ(img_height, height); - CHECK_EQ(img_width, width); + cv_cropped_img = cv_noised_img; } + // Return the normalized crop bbox. + crop_bbox->set_xmin(Dtype(w_off) / img_width); + crop_bbox->set_ymin(Dtype(h_off) / img_height); + crop_bbox->set_xmax(Dtype(w_off + width) / img_width); + crop_bbox->set_ymax(Dtype(h_off + height) / img_height); + CHECK(cv_cropped_img.data); Dtype* transformed_data = transformed_blob->mutable_cpu_data(); @@ -305,7 +881,7 @@ void DataTransformer::Transform(const cv::Mat& cv_img, } else { top_index = (c * height + h) * width + w; } - // int top_index = (c * height + h) * width + w; + // int top_index = (c * height + h) * width + w; Dtype pixel = static_cast(ptr[img_index++]); if (has_mean_file) { int mean_index = (c * img_height + h_off + h) * img_width + w_off + w; @@ -323,6 +899,276 @@ void DataTransformer::Transform(const cv::Mat& cv_img, } } } + +template +void DataTransformer::TransformInv(const Dtype* data, cv::Mat* cv_img, + const int height, const int width, + const int channels) { + const Dtype scale = param_.scale(); + const bool has_mean_file = param_.has_mean_file(); + const bool has_mean_values = mean_values_.size() > 0; + + Dtype* mean = NULL; + if (has_mean_file) { + CHECK_EQ(channels, data_mean_.channels()); + CHECK_EQ(height, data_mean_.height()); + CHECK_EQ(width, data_mean_.width()); + mean = data_mean_.mutable_cpu_data(); + } + if (has_mean_values) { + CHECK(mean_values_.size() == 1 || mean_values_.size() == channels) << + "Specify either 1 mean_value or as many as channels: " << channels; + if (channels > 1 && mean_values_.size() == 1) { + // Replicate the mean_value for simplicity + for (int c = 1; c < channels; ++c) { + mean_values_.push_back(mean_values_[0]); + } + } + } + + const int img_type = channels == 3 ? CV_8UC3 : CV_8UC1; + cv::Mat orig_img(height, width, img_type, cv::Scalar(0, 0, 0)); + for (int h = 0; h < height; ++h) { + uchar* ptr = orig_img.ptr(h); + int img_idx = 0; + for (int w = 0; w < width; ++w) { + for (int c = 0; c < channels; ++c) { + int idx = (c * height + h) * width + w; + if (has_mean_file) { + ptr[img_idx++] = static_cast(data[idx] / scale + mean[idx]); + } else { + if (has_mean_values) { + ptr[img_idx++] = + static_cast(data[idx] / scale + mean_values_[c]); + } else { + ptr[img_idx++] = static_cast(data[idx] / scale); + } + } + } + } + } + + if (param_.has_resize_param()) { + *cv_img = ApplyResize(orig_img, param_.resize_param()); + } else { + *cv_img = orig_img; + } +} + +template +void DataTransformer::TransformInv(const Blob* blob, + vector* cv_imgs) { + const int channels = blob->channels(); + const int height = blob->height(); + const int width = blob->width(); + const int num = blob->num(); + CHECK_GE(num, 1); + const Dtype* image_data = blob->cpu_data(); + + for (int i = 0; i < num; ++i) { + cv::Mat cv_img; + TransformInv(image_data, &cv_img, height, width, channels); + cv_imgs->push_back(cv_img); + image_data += blob->offset(1); + } +} + +template +void DataTransformer::Transform(const cv::Mat& cv_img, + Blob* transformed_blob, + RandNumbers& rand_num) { + NormalizedBBox crop_bbox; + Transform(cv_img, transformed_blob, &crop_bbox, rand_num); +} + +template +void DataTransformer::CropImage(const cv::Mat& img, + const NormalizedBBox& bbox, + cv::Mat* crop_img) { + const int img_height = img.rows; + const int img_width = img.cols; + + // Get the bbox dimension. + NormalizedBBox clipped_bbox; + ClipBBox(bbox, &clipped_bbox); + NormalizedBBox scaled_bbox; + ScaleBBox(clipped_bbox, img_height, img_width, &scaled_bbox); + + // Crop the image using bbox. + int w_off = static_cast(scaled_bbox.xmin()); + int h_off = static_cast(scaled_bbox.ymin()); + int width = static_cast(scaled_bbox.xmax() - scaled_bbox.xmin()); + int height = static_cast(scaled_bbox.ymax() - scaled_bbox.ymin()); + cv::Rect bbox_roi(w_off, h_off, width, height); + + img(bbox_roi).copyTo(*crop_img); +} + +template +void DataTransformer::ExpandImage(const cv::Mat& img, + const float expand_ratio, + NormalizedBBox* expand_bbox, + cv::Mat* expand_img) { + const int img_height = img.rows; + const int img_width = img.cols; + const int img_channels = img.channels(); + + // Get the bbox dimension. + int height = static_cast(img_height * expand_ratio); + int width = static_cast(img_width * expand_ratio); + float h_off, w_off; + caffe_rng_uniform(1, 0.f, static_cast(height - img_height), &h_off); + caffe_rng_uniform(1, 0.f, static_cast(width - img_width), &w_off); + h_off = floor(h_off); + w_off = floor(w_off); + expand_bbox->set_xmin(-w_off/img_width); + expand_bbox->set_ymin(-h_off/img_height); + expand_bbox->set_xmax((width - w_off)/img_width); + expand_bbox->set_ymax((height - h_off)/img_height); + + expand_img->create(height, width, img.type()); + expand_img->setTo(cv::Scalar(0)); + const bool has_mean_file = param_.has_mean_file(); + const bool has_mean_values = mean_values_.size() > 0; + + if (has_mean_file) { + CHECK_EQ(img_channels, data_mean_.channels()); + CHECK_EQ(height, data_mean_.height()); + CHECK_EQ(width, data_mean_.width()); + Dtype* mean = data_mean_.mutable_cpu_data(); + for (int h = 0; h < height; ++h) { + uchar* ptr = expand_img->ptr(h); + int img_index = 0; + for (int w = 0; w < width; ++w) { + for (int c = 0; c < img_channels; ++c) { + int blob_index = (c * height + h) * width + w; + ptr[img_index++] = static_cast(mean[blob_index]); + } + } + } + } + if (has_mean_values) { + CHECK(mean_values_.size() == 1 || mean_values_.size() == img_channels) << + "Specify either 1 mean_value or as many as channels: " << img_channels; + if (img_channels > 1 && mean_values_.size() == 1) { + // Replicate the mean_value for simplicity + for (int c = 1; c < img_channels; ++c) { + mean_values_.push_back(mean_values_[0]); + } + } + vector channels(img_channels); + cv::split(*expand_img, channels); + CHECK_EQ(channels.size(), mean_values_.size()); + for (int c = 0; c < img_channels; ++c) { + channels[c] = mean_values_[c]; + } + cv::merge(channels, *expand_img); + } + + cv::Rect bbox_roi(w_off, h_off, img_width, img_height); + img.copyTo((*expand_img)(bbox_roi)); +} + +static cv::Mat ResizeImagePerShorterSize(const cv::Mat& img, int shorter_size, ResizeParameter resize_param) { + int h = img.size().height; + int w = img.size().width; + resize_param.set_height(shorter_size); + resize_param.set_width(shorter_size); + if (h < w) { + resize_param.set_width(int(float(w) / h * shorter_size)); + } else { + resize_param.set_height(int(float(h) / w * shorter_size)); + } + return ApplyResize(img, resize_param); +} + +template +void DataTransformer::RandomResizeImage(const Datum& datum, Datum *resized_datum) { + shared_ptr img; + if (datum.encoded()) { + img = shared_ptr(new cv::Mat(DecodeDatumToCVMatNative(datum))); + } else { + img = shared_ptr(new cv::Mat( + cv::Size(datum.width(), datum.height()), + CV_8UC(datum.channels()), + (void*)datum.data().data())); + } + cv::Mat resized_img; + RandomResizeImage(*img, &resized_img); + CVMatToDatum(resized_img, resized_datum); +} + +template +void DataTransformer::RandomResizeImage(const cv::Mat& img, cv::Mat *resized_img) { + int h = img.size().height; + int w = img.size().width; + int min_size = param_.random_resize_param().min_size(); + int max_size = param_.random_resize_param().max_size(); + ResizeParameter resize_param = param_.random_resize_param().resize_param(); + if (min_size == 0) min_size = std::min(h,w); + if (max_size == 0) max_size = std::max(h,w); + int shorter_size = rand_num_(max_size - min_size + 1) + min_size; + *resized_img = ResizeImagePerShorterSize(img, shorter_size, resize_param); +} + +template +void DataTransformer::RandomAlterAspectRatio(const Datum& datum, Datum *resized_datum) { + shared_ptr img; + if (datum.encoded()) { + img = shared_ptr(new cv::Mat(DecodeDatumToCVMatNative(datum))); + } else { + img = shared_ptr(new cv::Mat( + cv::Size(datum.width(), datum.height()), + CV_8UC(datum.channels()), + (void*)datum.data().data())); + } + cv::Mat resized_img; + RandomAlterAspectRatio(*img, &resized_img); + CVMatToDatum(resized_img, resized_datum); +} + +static float RandRatio(float min, float max, RandNumbers& rand_num) { + return (rand_num(int((max - min) * 1000 + 1)) + min * 1000) / 1000; +} + +template +void DataTransformer::RandomAlterAspectRatio(const cv::Mat& img, cv::Mat *resized_img) { + const int crop_size = param_.crop_size(); + const int h = img.size().height; + const int w = img.size().width; + const float area = h * w; + const float min_area_ratio = param_.random_aspect_ratio_param().min_area_ratio(); + const float max_area_ratio = param_.random_aspect_ratio_param().max_area_ratio(); + const float min_aspect_ratio_change = + param_.random_aspect_ratio_param().aspect_ratio_change(); + CHECK(crop_size > 0); + CHECK(max_area_ratio >= min_area_ratio); + ResizeParameter resize_param = param_.random_aspect_ratio_param().resize_param(); + int attempt = 0; + while (attempt++ < 10) { + float area_ratio = RandRatio(min_area_ratio, max_area_ratio, rand_num_); + float aspect_ratio_change = + RandRatio(min_aspect_ratio_change, 1 / min_aspect_ratio_change, rand_num_); + float new_area = area_ratio * area; + int new_h = int(sqrt(new_area) * aspect_ratio_change); + int new_w = int(sqrt(new_area) / aspect_ratio_change); + if (RandRatio(0, 1, rand_num_) < 0.5) { + int tmp = new_h; new_h = new_w; new_w = tmp; + } + if (new_h <= h && new_w <= w) { + int y = rand_num_(h - new_h + 1); + int x = rand_num_(w - new_w + 1); + cv::Rect roi(x, y, new_w, new_h); + cv::Mat croppedImg = img(roi); + resize_param.set_height(crop_size); + resize_param.set_width(crop_size); + *resized_img = ApplyResize(croppedImg, resize_param); + return; + } + } + *resized_img = ResizeImagePerShorterSize(img, crop_size, resize_param); +} + #endif // USE_OPENCV template @@ -358,7 +1204,7 @@ void DataTransformer::Transform(Blob* input_blob, const Dtype scale = param_.scale(); - const bool do_mirror = param_.mirror() && Rand(2); + const bool do_mirror = param_.mirror() && rand_num_(2); const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; @@ -369,8 +1215,8 @@ void DataTransformer::Transform(Blob* input_blob, CHECK_EQ(crop_size, width); // We only do random crop when we do training. if (phase_ == TRAIN) { - h_off = Rand(input_height - crop_size + 1); - w_off = Rand(input_width - crop_size + 1); + h_off = rand_num_(input_height - crop_size + 1); + w_off = rand_num_(input_width - crop_size + 1); } else { h_off = (input_height - crop_size) / 2; w_off = (input_width - crop_size) / 2; @@ -388,13 +1234,14 @@ void DataTransformer::Transform(Blob* input_blob, for (int n = 0; n < input_num; ++n) { int offset = input_blob->offset(n); caffe_sub(data_mean_.count(), input_data + offset, - data_mean_.cpu_data(), input_data + offset); + data_mean_.cpu_data(), input_data + offset); } } if (has_mean_values) { - CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) << - "Specify either 1 mean_value or as many as channels: " << input_channels; + CHECK(mean_values_.size() == 1 || mean_values_.size() == input_channels) + << "Specify either 1 mean_value or as many as channels: " + << input_channels; if (mean_values_.size() == 1) { caffe_add_scalar(input_blob->count(), -(mean_values_[0]), input_data); } else { @@ -402,7 +1249,7 @@ void DataTransformer::Transform(Blob* input_blob, for (int c = 0; c < input_channels; ++c) { int offset = input_blob->offset(n, c); caffe_add_scalar(input_height * input_width, -(mean_values_[c]), - input_data + offset); + input_data + offset); } } } @@ -457,14 +1304,22 @@ vector DataTransformer::InferBlobShape(const Datum& datum) { LOG(FATAL) << "Encoded datum requires OpenCV; compile with USE_OPENCV."; #endif // USE_OPENCV } + const int crop_size = param_.crop_size(); const int datum_channels = datum.channels(); - const int datum_height = datum.height(); - const int datum_width = datum.width(); + int datum_height = datum.height(); + int datum_width = datum.width(); + // Check dimensions. CHECK_GT(datum_channels, 0); CHECK_GE(datum_height, crop_size); CHECK_GE(datum_width, crop_size); + + if (param_.has_resize_param()) { + InferNewSize(param_.resize_param(), datum_width, datum_height, + &datum_width, &datum_height); + } + // Build BlobShape. vector shape(4); shape[0] = 1; @@ -491,12 +1346,23 @@ template vector DataTransformer::InferBlobShape(const cv::Mat& cv_img) { const int crop_size = param_.crop_size(); const int img_channels = cv_img.channels(); - const int img_height = cv_img.rows; - const int img_width = cv_img.cols; + int img_height = cv_img.rows; + int img_width = cv_img.cols; // Check dimensions. CHECK_GT(img_channels, 0); - CHECK_GE(img_height, crop_size); - CHECK_GE(img_width, crop_size); + + if (param_.has_random_resize_param() || param_.has_random_aspect_ratio_param()) { + CHECK_GT(crop_size, 0); + } else { + CHECK_GE(img_height, crop_size); + CHECK_GE(img_width, crop_size); + } + + if (param_.has_resize_param()) { + InferNewSize(param_.resize_param(), img_width, img_height, + &img_width, &img_height); + } + // Build BlobShape. vector shape(4); shape[0] = 1; @@ -522,22 +1388,22 @@ vector DataTransformer::InferBlobShape( template void DataTransformer::InitRand() { const bool needs_rand = param_.mirror() || + param_.has_random_resize_param() || + param_.has_random_aspect_ratio_param() || (phase_ == TRAIN && param_.crop_size()); + if (needs_rand) { - const unsigned int rng_seed = caffe_rng_rand(); - rng_.reset(new Caffe::RNG(rng_seed)); + rand_num_.Init(); } else { - rng_.reset(); + rand_num_.Reset(); } } template -int DataTransformer::Rand(int n) { - CHECK(rng_); - CHECK_GT(n, 0); - caffe::rng_t* rng = - static_cast(rng_->generator()); - return ((*rng)() % n); +void DataTransformer::ReinitRand() { + if (rand_num_.IsEmpty()) { + rand_num_.Init(); + } } INSTANTIATE_CLASS(DataTransformer); diff --git a/src/caffe/internal_thread.cpp b/src/caffe/internal_thread.cpp index 104884e0295..a22a719e9a6 100644 --- a/src/caffe/internal_thread.cpp +++ b/src/caffe/internal_thread.cpp @@ -1,7 +1,45 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include "caffe/internal_thread.hpp" +#include "caffe/util/cpu_info.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { @@ -48,6 +86,10 @@ void InternalThread::entry(int device, Caffe::Brew mode, int rand_seed, Caffe::set_solver_count(solver_count); Caffe::set_root_solver(root_solver); +#ifdef _OPENMP + caffe::cpu::OpenMpManager::bindCurrentThreadToNonPrimaryCoreIfPossible(); +#endif + InternalThreadEntry(); } diff --git a/src/caffe/layer.cpp b/src/caffe/layer.cpp index 3b9128986ae..ad642269426 100644 --- a/src/caffe/layer.cpp +++ b/src/caffe/layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layer.hpp" @@ -22,6 +59,55 @@ void Layer::Unlock() { } } +#ifdef USE_MLSL +template +mn::Distribution & Layer::GetDistribution() { + const MultinodeLayerParameter &mn_layer_param = layer_param_.multinode(); + int num_nodes = mn_layer_param.num_nodes(); + int model_parts = mn_layer_param.model_parts(); + mn::GetCanonicalMnParam(num_nodes, model_parts); + return *mn::get_distrib(num_nodes/model_parts, model_parts); +} + +template +bool Layer::Bypass(const vector*>& bottom, + const vector*>& top) { + return GetDistribution().get_group_id() > 0; +} + +template +void Layer::MultinodeSetUp(const vector*>& bottom, + const vector*>& top) { + if (this->layerOp != NULL || this->phase_ != TRAIN || Bypass(bottom, top)) { + return; + } + + int num_nodes = layer_param_.multinode().num_nodes(); + int model_parts = layer_param_.multinode().model_parts(); + mn::GetCanonicalMnParam(num_nodes, model_parts); + int data_parts = num_nodes / model_parts; + + if (data_parts <= 1 || this->blobs_.size() == 0) return; + + // We only initialize data parallelism here so operation type is + // irrelevant here, hard-code to OT_CC + mn::OpRegInfo reg_info(mn::train::get_session(), MLSL::OT_CC); + reg_info.set_name(this->layer_param().name()); + for (int i = 0; i < this->blobs_.size(); i++) { + int hw = 1, ic = 1, oc = 1; + const vector &shape = this->blobs_[i]->shape(); + CHECK_GT(shape.size(), 0); + oc = shape[0]; + if (shape.size() > 1) ic = shape[1]; + if (shape.size() >= 4) hw = shape[2] * shape[3]; + // Note that MLSL expects the entire weights from a model group. + // So we should multiply by model_parts here. + reg_info.add_parameter_set(ic * oc * model_parts, hw); + } + this->layerOp = mn::train::add_operation(reg_info, this->GetDistribution()); +} +#endif + INSTANTIATE_CLASS(Layer); } // namespace caffe diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp index e967bd6181c..2b52007cc3c 100644 --- a/src/caffe/layer_factory.cpp +++ b/src/caffe/layer_factory.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // Make sure we include Python.h before any system header // to avoid _POSIX_C_SOURCE redefinition #ifdef WITH_PYTHON_LAYER @@ -5,15 +42,25 @@ #endif #include +#include "caffe/engine_parser.hpp" #include "caffe/layer.hpp" #include "caffe/layer_factory.hpp" +#include "caffe/layers/batch_norm_layer.hpp" +#include "caffe/layers/concat_layer.hpp" #include "caffe/layers/conv_layer.hpp" +#include "caffe/layers/inner_product_layer.hpp" #include "caffe/layers/lrn_layer.hpp" #include "caffe/layers/pooling_layer.hpp" #include "caffe/layers/relu_layer.hpp" #include "caffe/layers/sigmoid_layer.hpp" #include "caffe/layers/softmax_layer.hpp" #include "caffe/layers/tanh_layer.hpp" +#ifdef MKL2017_SUPPORTED +#include "caffe/layers/mkl_layers.hpp" +#endif +#ifdef MKLDNN_SUPPORTED +#include "caffe/layers/mkldnn_layers.hpp" +#endif #include "caffe/proto/caffe.pb.h" #ifdef USE_CUDNN @@ -39,7 +86,8 @@ shared_ptr > GetConvolutionLayer( const LayerParameter& param) { ConvolutionParameter conv_param = param.convolution_param(); ConvolutionParameter_Engine engine = conv_param.engine(); -#ifdef USE_CUDNN + +#if defined(USE_CUDNN) || defined(MKL2017_SUPPORTED) || defined(MKLDNN_SUPPORTED) bool use_dilation = false; for (int i = 0; i < conv_param.dilation_size(); ++i) { if (conv_param.dilation(i) > 1) { @@ -47,6 +95,31 @@ shared_ptr > GetConvolutionLayer( } } #endif + + // New, more flexible way of providing engine + if (engine == ConvolutionParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + + if (ep.isEngine("CAFFE")) { + engine = ConvolutionParameter_Engine_CAFFE; + } +#ifdef USE_CUDNN + else if (!use_dilation && ep.isEngine("CUDNN")) { + engine = ConvolutionParameter_Engine_CUDNN; + } +#endif +#ifdef MKL2017_SUPPORTED + else if (!use_dilation && ep.isEngine("MKL2017")) { + engine = ConvolutionParameter_Engine_MKL2017; + } +#endif +#ifdef MKLDNN_SUPPORTED + else if (!use_dilation && ep.isEngine("MKLDNN")) { + engine = ConvolutionParameter_Engine_MKLDNN; + } +#endif + } + if (engine == ConvolutionParameter_Engine_DEFAULT) { engine = ConvolutionParameter_Engine_CAFFE; #ifdef USE_CUDNN @@ -65,17 +138,155 @@ shared_ptr > GetConvolutionLayer( } return shared_ptr >(new CuDNNConvolutionLayer(param)); #endif +#ifdef MKL2017_SUPPORTED + } else if (engine == ConvolutionParameter_Engine_MKL2017) { + if (use_dilation) { + LOG(FATAL) << "MKL2017 doesn't support the dilated convolution at Layer " + << param.name(); + } + return shared_ptr >(new MKLConvolutionLayer(param)); +#endif +#ifdef MKLDNN_SUPPORTED + } else if (engine == ConvolutionParameter_Engine_MKLDNN) { + if (use_dilation) { + LOG(FATAL) << "MKLDNN doesn't support the dilated convolution at Layer " + << param.name(); + } + return shared_ptr >(new MKLDNNConvolutionLayer(param)); +#endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; } + return shared_ptr >(); } REGISTER_LAYER_CREATOR(Convolution, GetConvolutionLayer); +// Get deconvolution layer according to engine. +template +shared_ptr > GetDeconvolutionLayer( + const LayerParameter& param) { + ConvolutionParameter conv_param = param.convolution_param(); + ConvolutionParameter_Engine engine = conv_param.engine(); + +#if defined(MKL2017_SUPPORTED) + bool use_dilation = false; + for (int i = 0; i < conv_param.dilation_size(); ++i) { + if (conv_param.dilation(i) > 1) { + use_dilation = true; + } + } +#endif + + // New, more flexible way of providing engine + if (engine == ConvolutionParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + + if (ep.isEngine("CAFFE")) { + engine = ConvolutionParameter_Engine_CAFFE; + } +#ifdef MKL2017_SUPPORTED + else if (!use_dilation && ep.isEngine("MKL2017")) { + engine = ConvolutionParameter_Engine_MKL2017; + } +#endif + + } + + if (engine == ConvolutionParameter_Engine_DEFAULT) { + engine = ConvolutionParameter_Engine_CAFFE; + } + if (engine == ConvolutionParameter_Engine_CAFFE) { + return shared_ptr >(new DeconvolutionLayer(param)); +#ifdef MKL2017_SUPPORTED + } else if (engine == ConvolutionParameter_Engine_MKL2017) { + if (use_dilation) { + LOG(FATAL) << "MKL2017 doesn't support the dilated convolution at Layer " + << param.name(); + } + return shared_ptr >(new MKLDeconvolutionLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } + return shared_ptr >(); +} + +REGISTER_LAYER_CREATOR(Deconvolution, GetDeconvolutionLayer); + +// Get inner_product layer according to engine. +template +shared_ptr > GetInnerProductLayer( + const LayerParameter& param) { + InnerProductParameter ip_param = param.inner_product_param(); + InnerProductParameter_Engine engine = ip_param.engine(); + + // New, more flexible way of providing engine + if (engine == InnerProductParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + if (ep.isEngine("CAFFE")) { + engine = InnerProductParameter_Engine_CAFFE; + } +#ifdef MKLDNN_SUPPORTED + else if (ep.isEngine("MKLDNN") && !ip_param.transpose()) { + engine = InnerProductParameter_Engine_MKLDNN; + } +#endif + } + + if (engine == InnerProductParameter_Engine_DEFAULT) { + engine = InnerProductParameter_Engine_CAFFE; + } + if (engine == InnerProductParameter_Engine_CAFFE) { + return shared_ptr >(new InnerProductLayer(param)); +#ifdef MKLDNN_SUPPORTED + } else if (engine == InnerProductParameter_Engine_MKLDNN) { + if (ip_param.transpose()) { + LOG(FATAL) << "MKL-DNN doesn't support transposed weights at Layer " + << param.name(); + } + return shared_ptr >(new MKLDNNInnerProductLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } + + return shared_ptr >(new InnerProductLayer(param)); +} + +REGISTER_LAYER_CREATOR(InnerProduct, GetInnerProductLayer); + // Get pooling layer according to engine. template shared_ptr > GetPoolingLayer(const LayerParameter& param) { PoolingParameter_Engine engine = param.pooling_param().engine(); + + // New, more flexible way of providing engine + if (engine == PoolingParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + + if (ep.isEngine("CAFFE")) { + engine = PoolingParameter_Engine_CAFFE; + } +#ifdef USE_CUDNN + else if (ep.isEngine("CUDNN")) { + engine = PoolingParameter_Engine_CUDNN; + } +#endif +#ifdef MKL2017_SUPPORTED + else if (ep.isEngine("MKL2017")) { + engine = PoolingParameter_Engine_MKL2017; + } +#endif +#ifdef MKLDNN_SUPPORTED + else if (ep.isEngine("MKLDNN")) { + PoolingParameter_PoolMethod method = param.pooling_param().pool(); + if (method != PoolingParameter_PoolMethod_STOCHASTIC) + engine = PoolingParameter_Engine_MKLDNN; + } +#endif + } + if (engine == PoolingParameter_Engine_DEFAULT) { engine = PoolingParameter_Engine_CAFFE; #ifdef USE_CUDNN @@ -102,9 +313,18 @@ shared_ptr > GetPoolingLayer(const LayerParameter& param) { return shared_ptr >(new CuDNNPoolingLayer(param)); } #endif +#ifdef MKL2017_SUPPORTED + } else if (engine == PoolingParameter_Engine_MKL2017) { + return shared_ptr >(new MKLPoolingLayer(param)); +#endif +#ifdef MKLDNN_SUPPORTED + } else if (engine == PoolingParameter_Engine_MKLDNN) { + return shared_ptr >(new MKLDNNPoolingLayer(param)); +#endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; } + return shared_ptr >(); } REGISTER_LAYER_CREATOR(Pooling, GetPoolingLayer); @@ -114,11 +334,32 @@ template shared_ptr > GetLRNLayer(const LayerParameter& param) { LRNParameter_Engine engine = param.lrn_param().engine(); + // New, more flexible way of providing engine + if (engine == LRNParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + + if (ep.isEngine("CAFFE")) + engine = LRNParameter_Engine_CAFFE; +#ifdef USE_CUDNN + else if (ep.isEngine("CUDNN")) + engine = LRNParameter_Engine_CUDNN; +#endif +#ifdef MKL2017_SUPPORTED + else if (ep.isEngine("MKL2017") && param.lrn_param().norm_region() + == LRNParameter_NormRegion_ACROSS_CHANNELS) + engine = LRNParameter_Engine_MKL2017; +#endif +#ifdef MKLDNN_SUPPORTED + else if (ep.isEngine("MKLDNN") && param.lrn_param().norm_region() + == LRNParameter_NormRegion_ACROSS_CHANNELS) + engine = LRNParameter_Engine_MKLDNN; +#endif + } + if (engine == LRNParameter_Engine_DEFAULT) { + engine = LRNParameter_Engine_CAFFE; #ifdef USE_CUDNN engine = LRNParameter_Engine_CUDNN; -#else - engine = LRNParameter_Engine_CAFFE; #endif } @@ -139,17 +380,132 @@ shared_ptr > GetLRNLayer(const LayerParameter& param) { } } #endif +#ifdef MKL2017_SUPPORTED + } else if (engine == LRNParameter_Engine_MKL2017) { + return shared_ptr >(new MKLLRNLayer(param)); +#endif +#if MKLDNN_SUPPORTED + } else if (engine == LRNParameter_Engine_MKLDNN) { + return shared_ptr >(new MKLDNNLRNLayer(param)); +#endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; } + return shared_ptr >(); } REGISTER_LAYER_CREATOR(LRN, GetLRNLayer); -// Get relu layer according to engine. +// Get BatchNorm layer according to engine +template +shared_ptr > GetBatchNormLayer(const LayerParameter& param) { + BatchNormParameter_Engine engine = param.batch_norm_param().engine(); + +// New, more flexible way of providing engine + if (engine == BatchNormParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + + if (ep.isEngine("CAFFE")) + engine = BatchNormParameter_Engine_CAFFE; +#if defined(MKL2017_SUPPORTED) + else if (ep.isEngine("MKL2017")) + engine = BatchNormParameter_Engine_MKL2017; +#endif +#if defined(MKLDNN_SUPPORTED) + else if (ep.isEngine("MKLDNN")) + engine = BatchNormParameter_Engine_MKLDNN; +#endif + } + + if (engine == BatchNormParameter_Engine_DEFAULT) { + engine = BatchNormParameter_Engine_CAFFE; + } + + if (engine == BatchNormParameter_Engine_CAFFE) { + return shared_ptr >(new BatchNormLayer(param)); +#if defined(MKL2017_SUPPORTED) + } else if (engine == BatchNormParameter_Engine_MKL2017) { + return shared_ptr >(new MKLBatchNormLayer(param)); +#endif +#ifdef MKLDNN_SUPPORTED + } else if (engine == BatchNormParameter_Engine_MKLDNN) { + return shared_ptr >(new MKLDNNBatchNormLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } + return shared_ptr >(); +} + +REGISTER_LAYER_CREATOR(BatchNorm, GetBatchNormLayer); + +// Get Split layer according to engine +template +shared_ptr > GetSplitLayer(const LayerParameter& param) { + SplitParameter_Engine engine = param.split_param().engine(); + + // New, more flexible way of providing engine + if (engine == SplitParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + if (ep.isEngine("CAFFE")) + engine = SplitParameter_Engine_CAFFE; +#if defined(MKL2017_SUPPORTED) + else if (ep.isEngine("MKL2017")) + engine = SplitParameter_Engine_MKL2017; +#endif +#if defined(MKLDNN_SUPPORTED) + else if (ep.isEngine("MKLDNN")) + engine = SplitParameter_Engine_MKLDNN; +#endif + } + + if (engine == SplitParameter_Engine_DEFAULT) { + engine = SplitParameter_Engine_CAFFE; + } + + if (engine == SplitParameter_Engine_CAFFE) { + return shared_ptr >(new SplitLayer(param)); +#if defined(MKL2017_SUPPORTED) + } else if (engine == SplitParameter_Engine_MKL2017) { + return shared_ptr >(new MKLSplitLayer(param)); +#endif +#if defined(MKLDNN_SUPPORTED) + } else if(engine == SplitParameter_Engine_MKLDNN) { + return shared_ptr >(new MKLDNNSplitLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; + } + return shared_ptr >(); +} + +REGISTER_LAYER_CREATOR(Split, GetSplitLayer); + +// Get ReLU layer according to engine. template shared_ptr > GetReLULayer(const LayerParameter& param) { ReLUParameter_Engine engine = param.relu_param().engine(); + + // New, more flexible way of providing engine + if (engine == ReLUParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + + if (ep.isEngine("CAFFE")) + engine = ReLUParameter_Engine_CAFFE; +#ifdef USE_CUDNN + else if (ep.isEngine("CUDNN")) + engine = ReLUParameter_Engine_CUDNN; +#endif +#if defined(MKL2017_SUPPORTED) + else if (ep.isEngine("MKL2017")) + engine = ReLUParameter_Engine_MKL2017; +#endif +#if defined(MKLDNN_SUPPORTED) + else if (ep.isEngine("MKLDNN")) + engine = ReLUParameter_Engine_MKLDNN; +#endif + } + if (engine == ReLUParameter_Engine_DEFAULT) { engine = ReLUParameter_Engine_CAFFE; #ifdef USE_CUDNN @@ -162,17 +518,122 @@ shared_ptr > GetReLULayer(const LayerParameter& param) { } else if (engine == ReLUParameter_Engine_CUDNN) { return shared_ptr >(new CuDNNReLULayer(param)); #endif +#ifdef MKL2017_SUPPORTED + } else if (engine == ReLUParameter_Engine_MKL2017) { + return shared_ptr >(new MKLReLULayer(param)); +#endif +#ifdef MKLDNN_SUPPORTED + } else if (engine == ReLUParameter_Engine_MKLDNN) { + return shared_ptr >(new MKLDNNReLULayer(param)); +#endif } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; } + return shared_ptr >(); } REGISTER_LAYER_CREATOR(ReLU, GetReLULayer); +// Get concat layer according to engine. +template +shared_ptr > GetConcatLayer(const LayerParameter& param) { + ConcatParameter_Engine engine = param.concat_param().engine(); + + // New, more flexible way of providing engine + if (engine == ConcatParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + + if (ep.isEngine("CAFFE")) + engine = ConcatParameter_Engine_CAFFE; +#if defined(MKL2017_SUPPORTED) + else if (ep.isEngine("MKL2017") && param.concat_param().axis() == 1) + engine = ConcatParameter_Engine_MKL2017; +#endif +#if defined(MKLDNN_SUPPORTED) + else if (ep.isEngine("MKLDNN")) + engine = ConcatParameter_Engine_MKLDNN; +#endif + } + + if (engine == ConcatParameter_Engine_DEFAULT) { + engine = ConcatParameter_Engine_CAFFE; + } + if (engine == ConcatParameter_Engine_CAFFE) { + return shared_ptr >(new ConcatLayer(param)); +#if defined(MKL2017_SUPPORTED) + } else if (engine == ConcatParameter_Engine_MKL2017) { + return shared_ptr >(new MKLConcatLayer(param)); +#endif +#ifdef MKLDNN_SUPPORTED + } else if (engine == ConcatParameter_Engine_MKLDNN) { + return shared_ptr >(new MKLDNNConcatLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknow engine."; + } + return shared_ptr >(); +} + +REGISTER_LAYER_CREATOR(Concat, GetConcatLayer); + +// Get Eltwise layer according to engine. +template +shared_ptr > GetEltwiseLayer(const LayerParameter& param) { + EltwiseParameter_Engine engine = param.eltwise_param().engine(); + + // New, more flexible way of providing engine + if (engine == EltwiseParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + if (ep.isEngine("CAFFE")) + engine = EltwiseParameter_Engine_CAFFE; +#if defined(MKL2017_SUPPORTED) + else if (ep.isEngine("MKL2017")) + engine = EltwiseParameter_Engine_MKL2017; +#endif +#if defined(MKLDNN_SUPPORTED) + else if (ep.isEngine("MKLDNN")) + engine = EltwiseParameter_Engine_MKLDNN; +#endif + } + + if (engine == EltwiseParameter_Engine_DEFAULT) { + engine = EltwiseParameter_Engine_CAFFE; + } + if (engine == EltwiseParameter_Engine_CAFFE) { + return shared_ptr >(new EltwiseLayer(param)); +#if defined(MKL2017_SUPPORTED) + } else if (engine == EltwiseParameter_Engine_MKL2017) { + return shared_ptr >(new MKLEltwiseLayer(param)); +#endif +#ifdef MKLDNN_SUPPORTED + } else if (engine == EltwiseParameter_Engine_MKLDNN) { + return shared_ptr >(new MKLDNNEltwiseLayer(param)); +#endif + } else { + LOG(FATAL) << "Layer " << param.name() << " has unknow engine."; + } + return shared_ptr >(); +} + +REGISTER_LAYER_CREATOR(Eltwise, GetEltwiseLayer); + + // Get sigmoid layer according to engine. template shared_ptr > GetSigmoidLayer(const LayerParameter& param) { SigmoidParameter_Engine engine = param.sigmoid_param().engine(); + + // New, more flexible way of providing engine + if (engine == SigmoidParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + if (ep.isEngine("CAFFE")) + engine = SigmoidParameter_Engine_CAFFE; +#ifdef USE_CUDNN + else if (ep.isEngine("CUDNN")) + engine = SigmoidParameter_Engine_CUDNN; +#endif + } + if (engine == SigmoidParameter_Engine_DEFAULT) { engine = SigmoidParameter_Engine_CAFFE; #ifdef USE_CUDNN @@ -188,6 +649,7 @@ shared_ptr > GetSigmoidLayer(const LayerParameter& param) { } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; } + return shared_ptr >(); } REGISTER_LAYER_CREATOR(Sigmoid, GetSigmoidLayer); @@ -196,6 +658,18 @@ REGISTER_LAYER_CREATOR(Sigmoid, GetSigmoidLayer); template shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { SoftmaxParameter_Engine engine = param.softmax_param().engine(); + + // New, more flexible way of providing engine + if (engine == SoftmaxParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + if (ep.isEngine("CAFFE")) + engine = SoftmaxParameter_Engine_CAFFE; +#ifdef USE_CUDNN + if (ep.isEngine("CUDNN")) + engine = SoftmaxParameter_Engine_CUDNN; +#endif + } + if (engine == SoftmaxParameter_Engine_DEFAULT) { engine = SoftmaxParameter_Engine_CAFFE; #ifdef USE_CUDNN @@ -211,6 +685,7 @@ shared_ptr > GetSoftmaxLayer(const LayerParameter& param) { } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; } + return shared_ptr >(); } REGISTER_LAYER_CREATOR(Softmax, GetSoftmaxLayer); @@ -219,6 +694,18 @@ REGISTER_LAYER_CREATOR(Softmax, GetSoftmaxLayer); template shared_ptr > GetTanHLayer(const LayerParameter& param) { TanHParameter_Engine engine = param.tanh_param().engine(); + + // New, more flexible way of providing engine + if (engine == TanHParameter_Engine_DEFAULT && param.engine() != "") { + EngineParser ep(param.engine()); + if (ep.isEngine("CAFFE")) + engine = TanHParameter_Engine_CAFFE; +#ifdef USE_CUDNN + if (ep.isEngine("CUDNN")) + engine = TanHParameter_Engine_CUDNN; +#endif + } + if (engine == TanHParameter_Engine_DEFAULT) { engine = TanHParameter_Engine_CAFFE; #ifdef USE_CUDNN @@ -234,6 +721,7 @@ shared_ptr > GetTanHLayer(const LayerParameter& param) { } else { LOG(FATAL) << "Layer " << param.name() << " has unknown engine."; } + return shared_ptr >(); } REGISTER_LAYER_CREATOR(TanH, GetTanHLayer); diff --git a/src/caffe/layers/absval_layer.cpp b/src/caffe/layers/absval_layer.cpp index 855bf0bfacb..b8a4e5f8dca 100644 --- a/src/caffe/layers/absval_layer.cpp +++ b/src/caffe/layers/absval_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/absval_layer.hpp" diff --git a/src/caffe/layers/accuracy_layer.cpp b/src/caffe/layers/accuracy_layer.cpp index 4eddbb5c850..934ddfb3639 100644 --- a/src/caffe/layers/accuracy_layer.cpp +++ b/src/caffe/layers/accuracy_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/layers/annotated_data_layer.cpp b/src/caffe/layers/annotated_data_layer.cpp new file mode 100644 index 00000000000..c53f5126e05 --- /dev/null +++ b/src/caffe/layers/annotated_data_layer.cpp @@ -0,0 +1,588 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_OPENCV +#include +#endif // USE_OPENCV +#include + +#include +#include +#include + +#include + +#include "caffe/data_transformer.hpp" +#include "caffe/layers/annotated_data_layer.hpp" +#include "caffe/util/benchmark.hpp" +#include "caffe/util/sampler.hpp" + +namespace caffe { + +template +AnnotatedDataLayer::AnnotatedDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param), + reader_(param) { +} + +template +AnnotatedDataLayer::~AnnotatedDataLayer() { + this->StopInternalThread(); +} + +template +void AnnotatedDataLayer::DataLayerSetUp( + const vector*>& bottom, const vector*>& top) { + const int batch_size = this->layer_param_.data_param().batch_size(); + const AnnotatedDataParameter& anno_data_param = + this->layer_param_.annotated_data_param(); + for (int i = 0; i < anno_data_param.batch_sampler_size(); ++i) { + batch_samplers_.push_back(anno_data_param.batch_sampler(i)); + } + label_map_file_ = anno_data_param.label_map_file(); + // Make sure dimension is consistent within batch. + const TransformationParameter& transform_param = + this->layer_param_.transform_param(); + if (transform_param.has_resize_param()) { + if (transform_param.resize_param().resize_mode() == + ResizeParameter_Resize_mode_FIT_SMALL_SIZE) { + CHECK_EQ(batch_size, 1) + << "Only support batch size of 1 for FIT_SMALL_SIZE."; + } + } + + // Read a data point, and use it to initialize the top blob. + AnnotatedDatum anno_datum; + anno_datum.ParseFromString(*(reader_.full().peek())); + + // Use data_transformer to infer the expected blob shape from anno_datum. + vector top_shape = + this->data_transformer_->InferBlobShape(anno_datum.datum()); + this->transformed_data_.Reshape(top_shape); + // Reshape top[0] and prefetch_data according to the batch_size. + top_shape[0] = batch_size; + top[0]->Reshape(top_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].data_.Reshape(top_shape); + } + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); + // label + if (this->output_labels_) { + has_anno_type_ = anno_datum.has_type() || anno_data_param.has_anno_type(); + vector label_shape(4, 1); + if (has_anno_type_) { + anno_type_ = anno_datum.type(); + if (anno_data_param.has_anno_type()) { + // If anno_type is provided in AnnotatedDataParameter, replace + // the type stored in each individual AnnotatedDatum. + LOG(WARNING) << "type stored in AnnotatedDatum is shadowed."; + anno_type_ = anno_data_param.anno_type(); + } + // Infer the label shape from anno_datum.AnnotationGroup(). + int num_bboxes = 0; + if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) { + // Since the number of bboxes can be different for each image, + // we store the bbox information in a specific format. In specific: + // All bboxes are stored in one spatial plane (num and channels are 1) + // And each row contains one and only one box in the following format: + // [item_id, group_label, instance_id, xmin, ymin, xmax, ymax, diff] + // Note: Refer to caffe.proto for details about group_label and + // instance_id. + for (int g = 0; g < anno_datum.annotation_group_size(); ++g) { + num_bboxes += anno_datum.annotation_group(g).annotation_size(); + } + label_shape[0] = 1; + label_shape[1] = 1; + // BasePrefetchingDataLayer::LayerSetUp() requires to call + // cpu_data and gpu_data for consistent prefetch thread. Thus we make + // sure there is at least one bbox. + label_shape[2] = std::max(num_bboxes, 1); + label_shape[3] = 8; + } else { + LOG(FATAL) << "Unknown annotation type."; + } + } else { + label_shape[0] = batch_size; + } + top[1]->Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } + } +} + +// This function is called on prefetch thread +#ifdef _OPENMP +template +void AnnotatedDataLayer::load_batch(Batch* batch) { + CPUTimer batch_timer; + CPUTimer trans_timer; + batch_timer.Start(); + double read_time = 0; + double trans_time = 0; + CPUTimer timer; + CHECK(batch->data_.count()); + + // Reshape according to the first anno_datum of each batch + // on single input batches allows for inputs of varying dimension. + const int batch_size = this->layer_param_.data_param().batch_size(); + const AnnotatedDataParameter& anno_data_param = + this->layer_param_.annotated_data_param(); + const TransformationParameter& transform_param = + this->layer_param_.transform_param(); + AnnotatedDatum anno_datum; + anno_datum.ParseFromString(*(reader_.full().peek())); + // Use data_transformer to infer the expected blob shape from anno_datum. + vector top_shape = + this->data_transformer_->InferBlobShape(anno_datum.datum()); + // Reshape batch according to the batch_size. + top_shape[0] = batch_size; + batch->data_.Reshape(top_shape); + + Dtype* top_data = batch->data_.mutable_cpu_data(); + Dtype* top_label = NULL; // suppress warnings about uninitialized variables + if (this->output_labels_ && !has_anno_type_) { + top_label = batch->label_.mutable_cpu_data(); + } + + // Store transformed annotation. + std::vector> all_anno(batch_size); + std::vector> expand_data(batch_size, nullptr); + std::vector> sampled_bboxes(batch_size); + boost::container::vector have_samples(batch_size, false); + + int num_bboxes = 0; + + trans_timer.Start(); + +// Single loop was split into two loops. SSD samples patches in the first loop, and randomly +// chooses a patch in the second loop. Sampling has to be done in the separate loop, before +// RNG precalculates random numbers in the sequential code based on number of samples. +// TODO: correct generating random numbers in the first loop, similarly how it's done in +// the second loop and other data layers. +#pragma omp parallel if (batch_size > 1) +#pragma omp single nowait + { + for (int item_id = 0; item_id < batch_size; ++item_id) { + timer.Start(); + string* data = reader_.full().pop("Waiting for data"); + timer.Stop(); + read_time += timer.MicroSeconds(); +#pragma omp task firstprivate(item_id, data) shared(all_anno, expand_data, sampled_bboxes, have_samples) + { + std::unique_ptr anno_datum(new AnnotatedDatum()); + anno_datum->ParseFromString(*data); + reader_.free().push(data); + std::unique_ptr distort_datum(new AnnotatedDatum()); + boost::shared_ptr expand_datum; + if (transform_param.has_distort_param()) { + distort_datum->CopyFrom(*anno_datum); + this->data_transformer_->DistortImage(anno_datum->datum(), + distort_datum->mutable_datum()); + if (transform_param.has_expand_param()) { + expand_datum.reset(new AnnotatedDatum()); + this->data_transformer_->ExpandImage(*distort_datum, expand_datum.get()); + } else { + expand_datum = std::move(distort_datum); + } + } else { + if (transform_param.has_expand_param()) { + expand_datum.reset(new AnnotatedDatum()); + this->data_transformer_->ExpandImage(*anno_datum, expand_datum.get()); + } else { + expand_datum = std::move(anno_datum); + } + } + bool has_sampled = false; + if (batch_samplers_.size() > 0) { + // Generate sampled bboxes from expand_datum. + GenerateBatchSamples(*expand_datum, batch_samplers_, &sampled_bboxes[item_id]); + + if (sampled_bboxes[item_id].size() > 0) { + has_sampled = true; + } + } + expand_data[item_id] = expand_datum; + have_samples[item_id] = has_sampled; + } + } +#pragma omp taskwait + // RNG needs to be reinitialized because in some cases, when transform params are not set + // RNG is a NULL. + this->data_transformer_->ReinitRand(); + + for (int item_id = 0; item_id < batch_size; ++item_id) { + PreclcRandomNumbers precalculated_rand_numbers; + this->data_transformer_->GenerateRandNumbers(precalculated_rand_numbers, /* sample_bboxes */ have_samples[item_id]); + +#pragma omp task firstprivate(precalculated_rand_numbers, item_id) shared(num_bboxes, all_anno, expand_data, sampled_bboxes, have_samples) + { + boost::shared_ptr sampled_datum; + bool has_sampled = have_samples[item_id]; + if (has_sampled) { + int rand_idx = precalculated_rand_numbers(sampled_bboxes[item_id].size()); + sampled_datum.reset(new AnnotatedDatum()); + this->data_transformer_->CropImage(*expand_data[item_id], + sampled_bboxes[item_id][rand_idx], + sampled_datum.get()); + } else { + sampled_datum = expand_data[item_id]; + } + CHECK(sampled_datum.get() != NULL); + Blob data_blob; + data_blob.Reshape(top_shape); + vector shape = + this->data_transformer_->InferBlobShape(sampled_datum->datum()); + + if (transform_param.has_resize_param()) { + if (transform_param.resize_param().resize_mode() == + ResizeParameter_Resize_mode_FIT_SMALL_SIZE) { + DLOG(INFO) << "Has resize param"; + data_blob.Reshape(shape); + batch->data_.Reshape(shape); + top_data = batch->data_.mutable_cpu_data(); + } else { + CHECK(std::equal(top_shape.begin() + 1, top_shape.begin() + 4, + shape.begin() + 1)); + } + } else { + CHECK(std::equal(top_shape.begin() + 1, top_shape.begin() + 4, + shape.begin() + 1)); + } + + // Apply data transformations (mirror, scale, crop...) + int offset = batch->data_.offset(item_id); + data_blob.set_cpu_data(top_data + offset); + if (this->output_labels_) { + if (has_anno_type_) { + // Make sure all data have same annotation type. + CHECK(sampled_datum->has_type()) << "Some datum misses AnnotationType."; + if (anno_data_param.has_anno_type()) { + sampled_datum->set_type(anno_type_); + } else { + CHECK_EQ(anno_type_, sampled_datum->type()) << + "Different AnnotationType."; + } + // Transform datum and annotation_group at the same time + this->data_transformer_->Transform(*sampled_datum, + &data_blob, + &all_anno[item_id], + precalculated_rand_numbers); + if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) { + for (int g = 0; g < all_anno[item_id].size(); ++g) { +#pragma omp atomic + num_bboxes += all_anno[item_id][g].annotation_size(); + } + } else { + LOG(FATAL) << "Unknown annotation type."; + } + } else { + this->data_transformer_->Transform(sampled_datum->datum(), + &data_blob, + precalculated_rand_numbers); + // Otherwise, store the label from datum. + CHECK(sampled_datum->datum().has_label()) << "Cannot find any label."; + top_label[item_id] = sampled_datum->datum().label(); + } + } else { + this->data_transformer_->Transform(sampled_datum->datum(), + &data_blob, + precalculated_rand_numbers); + } + } + } + } + + // Store "rich" annotation if needed. + if (this->output_labels_ && has_anno_type_) { + vector label_shape(4); + if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) { + label_shape[0] = 1; + label_shape[1] = 1; + label_shape[3] = 8; + if (num_bboxes == 0) { + // Store all -1 in the label. + label_shape[2] = 1; + batch->label_.Reshape(label_shape); + caffe_set(8, -1, batch->label_.mutable_cpu_data()); + } else { + // Reshape the label and store the annotation. + label_shape[2] = num_bboxes; + batch->label_.Reshape(label_shape); + top_label = batch->label_.mutable_cpu_data(); + int idx = 0; + for (int item_id = 0; item_id < batch_size; ++item_id) { + const vector& anno_vec = all_anno[item_id]; + for (int g = 0; g < anno_vec.size(); ++g) { + const AnnotationGroup& anno_group = anno_vec[g]; + for (int a = 0; a < anno_group.annotation_size(); ++a) { + const Annotation& anno = anno_group.annotation(a); + const NormalizedBBox& bbox = anno.bbox(); + top_label[idx++] = item_id; + top_label[idx++] = anno_group.group_label(); + top_label[idx++] = anno.instance_id(); + top_label[idx++] = bbox.xmin(); + top_label[idx++] = bbox.ymin(); + top_label[idx++] = bbox.xmax(); + top_label[idx++] = bbox.ymax(); + top_label[idx++] = bbox.difficult(); + } + } + } + } + } else { + LOG(FATAL) << "Unknown annotation type."; + } + } + trans_timer.Stop(); + batch_timer.Stop(); + + trans_time = trans_timer.MicroSeconds() - read_time; + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; +} +#else +template +void AnnotatedDataLayer::load_batch(Batch* batch) { + CPUTimer batch_timer; + batch_timer.Start(); + double read_time = 0; + double trans_time = 0; + CPUTimer timer; + CHECK(batch->data_.count()); + CHECK(this->transformed_data_.count()); + + // Reshape according to the first anno_datum of each batch + // on single input batches allows for inputs of varying dimension. + const int batch_size = this->layer_param_.data_param().batch_size(); + const AnnotatedDataParameter& anno_data_param = + this->layer_param_.annotated_data_param(); + const TransformationParameter& transform_param = + this->layer_param_.transform_param(); + AnnotatedDatum anno_datum; + anno_datum.ParseFromString(*(reader_.full().peek())); + // Use data_transformer to infer the expected blob shape from anno_datum. + vector top_shape = + this->data_transformer_->InferBlobShape(anno_datum.datum()); + this->transformed_data_.Reshape(top_shape); + // Reshape batch according to the batch_size. + top_shape[0] = batch_size; + batch->data_.Reshape(top_shape); + + Dtype* top_data = batch->data_.mutable_cpu_data(); + Dtype* top_label = NULL; // suppress warnings about uninitialized variables + if (this->output_labels_ && !has_anno_type_) { + top_label = batch->label_.mutable_cpu_data(); + } + + // Store transformed annotation. + map > all_anno; + int num_bboxes = 0; + + for (int item_id = 0; item_id < batch_size; ++item_id) { + timer.Start(); + // get a anno_datum + string* data = reader_.full().pop("Waiting for data"); + AnnotatedDatum anno_datum; + anno_datum.ParseFromString(*data); + reader_.free().push(data); + read_time += timer.MicroSeconds(); + timer.Start(); + AnnotatedDatum distort_datum; + AnnotatedDatum* expand_datum = NULL; + if (transform_param.has_distort_param()) { + distort_datum.CopyFrom(anno_datum); + this->data_transformer_->DistortImage(anno_datum.datum(), + distort_datum.mutable_datum()); + if (transform_param.has_expand_param()) { + expand_datum = new AnnotatedDatum(); + this->data_transformer_->ExpandImage(distort_datum, expand_datum); + } else { + expand_datum = &distort_datum; + } + } else { + if (transform_param.has_expand_param()) { + expand_datum = new AnnotatedDatum(); + this->data_transformer_->ExpandImage(anno_datum, expand_datum); + } else { + expand_datum = &anno_datum; + } + } + AnnotatedDatum* sampled_datum = NULL; + bool has_sampled = false; + if (batch_samplers_.size() > 0) { + // Generate sampled bboxes from expand_datum. + vector sampled_bboxes; + GenerateBatchSamples(*expand_datum, batch_samplers_, &sampled_bboxes); + if (sampled_bboxes.size() > 0) { + // Randomly pick a sampled bbox and crop the expand_datum. + int rand_idx = caffe_rng_rand() % sampled_bboxes.size(); + sampled_datum = new AnnotatedDatum(); + this->data_transformer_->CropImage(*expand_datum, + sampled_bboxes[rand_idx], + sampled_datum); + has_sampled = true; + } else { + sampled_datum = expand_datum; + } + } else { + sampled_datum = expand_datum; + } + CHECK(sampled_datum != NULL); + timer.Start(); + vector shape = + this->data_transformer_->InferBlobShape(sampled_datum->datum()); + if (transform_param.has_resize_param()) { + if (transform_param.resize_param().resize_mode() == + ResizeParameter_Resize_mode_FIT_SMALL_SIZE) { + DLOG(INFO) << "Has resize param"; + this->transformed_data_.Reshape(shape); + batch->data_.Reshape(shape); + top_data = batch->data_.mutable_cpu_data(); + } else { + CHECK(std::equal(top_shape.begin() + 1, top_shape.begin() + 4, + shape.begin() + 1)); + } + } else { + CHECK(std::equal(top_shape.begin() + 1, top_shape.begin() + 4, + shape.begin() + 1)); + } + // Apply data transformations (mirror, scale, crop...) + int offset = batch->data_.offset(item_id); + this->transformed_data_.set_cpu_data(top_data + offset); + vector transformed_anno_vec; + if (this->output_labels_) { + if (has_anno_type_) { + // Make sure all data have same annotation type. + CHECK(sampled_datum->has_type()) << "Some datum misses AnnotationType."; + if (anno_data_param.has_anno_type()) { + sampled_datum->set_type(anno_type_); + } else { + CHECK_EQ(anno_type_, sampled_datum->type()) << + "Different AnnotationType."; + } + // Transform datum and annotation_group at the same time + transformed_anno_vec.clear(); + this->data_transformer_->Transform(*sampled_datum, + &(this->transformed_data_), + &transformed_anno_vec); + if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) { + // Count the number of bboxes. + for (int g = 0; g < transformed_anno_vec.size(); ++g) { + num_bboxes += transformed_anno_vec[g].annotation_size(); + } + } else { + LOG(FATAL) << "Unknown annotation type."; + } + all_anno[item_id] = transformed_anno_vec; + } else { + this->data_transformer_->Transform(sampled_datum->datum(), + &(this->transformed_data_)); + // Otherwise, store the label from datum. + CHECK(sampled_datum->datum().has_label()) << "Cannot find any label."; + top_label[item_id] = sampled_datum->datum().label(); + } + } else { + this->data_transformer_->Transform(sampled_datum->datum(), + &(this->transformed_data_)); + } + // clear memory + if (has_sampled) { + delete sampled_datum; + } + if (transform_param.has_expand_param()) { + delete expand_datum; + } + trans_time += timer.MicroSeconds(); + } + + // Store "rich" annotation if needed. + if (this->output_labels_ && has_anno_type_) { + vector label_shape(4); + if (anno_type_ == AnnotatedDatum_AnnotationType_BBOX) { + label_shape[0] = 1; + label_shape[1] = 1; + label_shape[3] = 8; + if (num_bboxes == 0) { + // Store all -1 in the label. + label_shape[2] = 1; + batch->label_.Reshape(label_shape); + caffe_set(8, -1, batch->label_.mutable_cpu_data()); + } else { + // Reshape the label and store the annotation. + label_shape[2] = num_bboxes; + batch->label_.Reshape(label_shape); + top_label = batch->label_.mutable_cpu_data(); + int idx = 0; + for (int item_id = 0; item_id < batch_size; ++item_id) { + const vector& anno_vec = all_anno[item_id]; + for (int g = 0; g < anno_vec.size(); ++g) { + const AnnotationGroup& anno_group = anno_vec[g]; + for (int a = 0; a < anno_group.annotation_size(); ++a) { + const Annotation& anno = anno_group.annotation(a); + const NormalizedBBox& bbox = anno.bbox(); + top_label[idx++] = item_id; + top_label[idx++] = anno_group.group_label(); + top_label[idx++] = anno.instance_id(); + top_label[idx++] = bbox.xmin(); + top_label[idx++] = bbox.ymin(); + top_label[idx++] = bbox.xmax(); + top_label[idx++] = bbox.ymax(); + top_label[idx++] = bbox.difficult(); + } + } + } + } + } else { + LOG(FATAL) << "Unknown annotation type."; + } + } + timer.Stop(); + batch_timer.Stop(); + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; +} +#endif //ifdef _OPENMP + +INSTANTIATE_CLASS(AnnotatedDataLayer); +REGISTER_LAYER_CLASS(AnnotatedData); + +} // namespace caffe diff --git a/src/caffe/layers/argmax_layer.cpp b/src/caffe/layers/argmax_layer.cpp index 2d3d6f2d3ff..8c1142a5634 100644 --- a/src/caffe/layers/argmax_layer.cpp +++ b/src/caffe/layers/argmax_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/layers/base_conv_layer.cpp b/src/caffe/layers/base_conv_layer.cpp index 4a4c68e009a..205be870cec 100644 --- a/src/caffe/layers/base_conv_layer.cpp +++ b/src/caffe/layers/base_conv_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -6,6 +43,10 @@ #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" +#ifdef _OPENMP +#include +#endif + namespace caffe { template @@ -183,7 +224,7 @@ void BaseConvolutionLayer::LayerSetUp(const vector*>& bottom, } template -void BaseConvolutionLayer::Reshape(const vector*>& bottom, +void BaseConvolutionLayer::DoReshape(const vector*>& bottom, const vector*>& top) { const int first_spatial_axis = channel_axis_ + 1; CHECK_EQ(bottom[0]->num_axes(), first_spatial_axis + num_spatial_axes_) @@ -254,15 +295,58 @@ void BaseConvolutionLayer::Reshape(const vector*>& bottom, } template +void BaseConvolutionLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + DoReshape(bottom, top); + + // ---- openmp ------------------------------------------ + num_of_threads_ = 1; +#ifdef _OPENMP + num_of_threads_ = omp_get_max_threads() < bottom[0]->shape(0) ? + omp_get_max_threads() : bottom[0]->shape(0); + if (num_of_threads_ < 1) { + LOG(WARNING) << "Base Conv layer: omp_get_max_threads() =" + << num_of_threads_; + num_of_threads_ = 1; + } +#endif + + col_buffer_mt_size = num_of_threads_ * static_cast(col_buffer_.count()); + weight_diff_mt_size = num_of_threads_ * static_cast(this->blobs_[0]->count()); + + col_buffer_mt_.resize(col_buffer_mt_size); + weight_diff_mt_.resize(weight_diff_mt_size); +} + +template +void BaseConvolutionLayer::ReshapeForMKL(const vector*>& bottom, + const vector*>& top) { + DoReshape(bottom, top); +} + +template void BaseConvolutionLayer::forward_cpu_gemm(const Dtype* input, const Dtype* weights, Dtype* output, bool skip_im2col) { - const Dtype* col_buff = input; + + int tid = 0; +#ifdef _OPENMP + tid = omp_get_thread_num(); + if (tid >= num_of_threads_) { + LOG(FATAL) << "ConvLayer::Forward_cpu: omp_thread_num() =" << tid + << " > OMP_num_THREADS = " << num_of_threads_; + } + tid = tid % num_of_threads_; // just to be sure +#endif + size_t col_data_buffer_size = col_buffer_mt_.size()/num_of_threads_; + + Dtype* col_buff = const_cast(input); if (!is_1x1_) { + col_buff = & col_buffer_mt_[ tid* col_data_buffer_size]; if (!skip_im2col) { - conv_im2col_cpu(input, col_buffer_.mutable_cpu_data()); + conv_im2col_cpu(input, col_buff); } - col_buff = col_buffer_.cpu_data(); } + for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, conv_out_channels_ / group_, conv_out_spatial_dim_, kernel_dim_, @@ -282,7 +366,19 @@ void BaseConvolutionLayer::forward_cpu_bias(Dtype* output, template void BaseConvolutionLayer::backward_cpu_gemm(const Dtype* output, const Dtype* weights, Dtype* input) { - Dtype* col_buff = col_buffer_.mutable_cpu_data(); + + int tid = 0; +#ifdef _OPENMP + tid = omp_get_thread_num(); + if (tid >= num_of_threads_) { + LOG(FATAL) << "ConvLayer::backward_cpu_gemm: omp_thread_num() =" << tid + << " > OMP_num_THREADS = " << num_of_threads_; + } + tid = tid % num_of_threads_; // just to be sure +#endif + size_t col_data_buffer_size = col_buffer_mt_.size()/num_of_threads_; + Dtype* col_buff = & col_buffer_mt_[ tid* col_data_buffer_size]; + if (is_1x1_) { col_buff = input; } @@ -297,19 +393,73 @@ void BaseConvolutionLayer::backward_cpu_gemm(const Dtype* output, } } + +template +void BaseConvolutionLayer::clear_weight_mt(void) { + size_t weight_diff_size = weight_diff_mt_.size() / num_of_threads_; + caffe_memset(num_of_threads_*weight_diff_size*sizeof(Dtype), + 0., + &weight_diff_mt_[0]); +} + + +template +void BaseConvolutionLayer::sum_weight_mt(Dtype* weight_diff) { + size_t weight_diff_size = weight_diff_mt_.size() / num_of_threads_; + size_t col_per_thread = weight_diff_size/num_of_threads_; + int tid = 0; +#ifdef _OPENMP + if (omp_in_parallel()) { + tid = omp_get_thread_num(); + } +#endif + for (size_t j = 0; j < col_per_thread; ++j) { + for (size_t t = 0; t < num_of_threads_ ; ++t) { + weight_diff[tid*col_per_thread + j] += + weight_diff_mt_[t*weight_diff_size + tid*col_per_thread + j]; + } + } + + size_t j = col_per_thread*num_of_threads_ + tid; + if (j < weight_diff_size) { + for (size_t t = 0; t < num_of_threads_ ; ++t) { + weight_diff[j] += weight_diff_mt_[t * weight_diff_size + j]; + } + } +} + template void BaseConvolutionLayer::weight_cpu_gemm(const Dtype* input, const Dtype* output, Dtype* weights) { - const Dtype* col_buff = input; + int tid = 0; +#ifdef _OPENMP + Dtype* weight_diff_data = NULL; + if (num_of_threads_ > 1) { + tid = omp_get_thread_num(); + if (tid >= num_of_threads_) { + LOG(FATAL) << "ConvLayer::weights_cpu_gemm: omp_thread_num() =" << tid + << " > OMP_num_THREADS = " << num_of_threads_; + } + tid = tid % num_of_threads_; // just to be sure + weight_diff_data = &weight_diff_mt_[tid * (weight_diff_mt_.size() / num_of_threads_)]; + } else { + weight_diff_data = weights; + } +#else + Dtype* weight_diff_data = weights; +#endif + Dtype* col_buff = const_cast(input); + if (!is_1x1_) { - conv_im2col_cpu(input, col_buffer_.mutable_cpu_data()); - col_buff = col_buffer_.cpu_data(); + size_t col_data_buffer_size = col_buffer_mt_.size() / num_of_threads_; + col_buff = &col_buffer_mt_[tid * col_data_buffer_size]; + conv_im2col_cpu(input, col_buff); } for (int g = 0; g < group_; ++g) { caffe_cpu_gemm(CblasNoTrans, CblasTrans, conv_out_channels_ / group_, kernel_dim_, conv_out_spatial_dim_, (Dtype)1., output + output_offset_ * g, col_buff + col_offset_ * g, - (Dtype)1., weights + weight_offset_ * g); + (Dtype)1., weight_diff_data + weight_offset_ * g); } } diff --git a/src/caffe/layers/base_data_layer.cpp b/src/caffe/layers/base_data_layer.cpp index 989319f1a07..982fe96d8c0 100644 --- a/src/caffe/layers/base_data_layer.cpp +++ b/src/caffe/layers/base_data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -66,9 +103,14 @@ void BasePrefetchingDataLayer::LayerSetUp( } } #endif + DLOG(INFO) << "Initializing prefetch"; this->data_transformer_->InitRand(); - StartInternalThread(); + + // Only if GPU mode on then we use background threads + if (Caffe::mode() == Caffe::GPU) { + StartInternalThread(); + } DLOG(INFO) << "Prefetch initialized."; } @@ -103,9 +145,25 @@ void BasePrefetchingDataLayer::InternalThreadEntry() { #endif } +// TODO: Make it properlly implemented/integrated with above solution +template +void BasePrefetchingDataLayer::GetBatch() { + try { + Batch* batch = prefetch_free_.pop(); + load_batch(batch); + prefetch_full_.push(batch); + } catch (boost::thread_interrupted&) { + // Interrupted exception is expected on shutdown + } +} + template void BasePrefetchingDataLayer::Forward_cpu( const vector*>& bottom, const vector*>& top) { + // Here for CPU we do transformation + if (Caffe::mode() == Caffe::CPU) { + this->GetBatch(); + } Batch* batch = prefetch_full_.pop("Data layer prefetch queue empty"); // Reshape to loaded data. top[0]->ReshapeLike(batch->data_); @@ -121,6 +179,8 @@ void BasePrefetchingDataLayer::Forward_cpu( top[1]->mutable_cpu_data()); } + // TODO: Consider prefetch_data_array and prefetch_label_array + prefetch_free_.push(batch); } diff --git a/src/caffe/layers/batch_norm_layer.cpp b/src/caffe/layers/batch_norm_layer.cpp index a69d8f99316..b7746d98855 100644 --- a/src/caffe/layers/batch_norm_layer.cpp +++ b/src/caffe/layers/batch_norm_layer.cpp @@ -1,4 +1,42 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include +#include #include #include "caffe/layers/batch_norm_layer.hpp" @@ -52,7 +90,7 @@ void BatchNormLayer::Reshape(const vector*>& bottom, sz[0]=bottom[0]->shape(0); batch_sum_multiplier_.Reshape(sz); - int spatial_dim = bottom[0]->count()/(channels_*bottom[0]->shape(0)); + int spatial_dim = bottom[0]->count(2); if (spatial_sum_multiplier_.num_axes() == 0 || spatial_sum_multiplier_.shape(0) != spatial_dim) { sz[0] = spatial_dim; @@ -72,6 +110,48 @@ void BatchNormLayer::Reshape(const vector*>& bottom, } template +void BatchNormLayer::replicate(Dtype* buffer_to_write, + int num_batches, + unsigned int batch_offset_incr, + unsigned int channel_offset_incr, + const Dtype* data_to_be_replicated) { +#ifdef _OPENMP + #pragma omp parallel for collapse(2) +#endif + for (unsigned int j = 0; j< channels_; ++j) { + for (unsigned int n = 0; n < num_batches; ++n) { + caffe_set(channel_offset_incr, data_to_be_replicated[j], + buffer_to_write + j * channel_offset_incr + n * batch_offset_incr); + } + } +} + +template +template +void BatchNormLayer::replicate_to_op(Dtype* buffer_to_write, + int num_batches, + unsigned int batch_offset_incr, + unsigned int channel_offset_incr, + const Dtype* data_to_be_replicated, + FuncTy op_func) { +#ifdef _OPENMP + #pragma omp parallel for collapse(2) +#endif + for (unsigned int j = 0; j< channels_; ++j) { + for (unsigned int n = 0; n < num_batches; ++n) { + Dtype value = data_to_be_replicated[j]; + Dtype* buffer_offsetted = + buffer_to_write + j * channel_offset_incr + n * batch_offset_incr; + for (unsigned int k = 0; k < channel_offset_incr; ++k) { + buffer_offsetted[k] = op_func(buffer_offsetted[k], value); + } + } + } +} + + + +template void BatchNormLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); @@ -103,12 +183,12 @@ void BatchNormLayer::Forward_cpu(const vector*>& bottom, } // subtract mean - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, - batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0., - num_by_chans_.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, - spatial_dim, 1, -1, num_by_chans_.cpu_data(), - spatial_sum_multiplier_.cpu_data(), 1., top_data); + replicate_to_op(top_data, + num, + spatial_dim*channels_, + spatial_dim, + mean_.cpu_data(), + std::minus()); if (!use_global_stats_) { // compute variance using var(X) = E((X-EX)^2) @@ -140,12 +220,12 @@ void BatchNormLayer::Forward_cpu(const vector*>& bottom, variance_.mutable_cpu_data()); // replicate variance to input size - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, - batch_sum_multiplier_.cpu_data(), variance_.cpu_data(), 0., - num_by_chans_.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, - spatial_dim, 1, 1., num_by_chans_.cpu_data(), - spatial_sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data()); + this->replicate(temp_.mutable_cpu_data(), + num, + spatial_dim*channels_, + spatial_dim, + variance_.cpu_data()); + caffe_div(temp_.count(), top_data, temp_.cpu_data(), top_data); // TODO(cdoersch): The caching is only needed because later in-place layers // might clobber the data. Can we skip this if they won't? @@ -193,13 +273,11 @@ void BatchNormLayer::Backward_cpu(const vector*>& top, num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0., mean_.mutable_cpu_data()); - // reshape (broadcast) the above - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, - batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0., - num_by_chans_.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels_ * num, - spatial_dim, 1, 1., num_by_chans_.cpu_data(), - spatial_sum_multiplier_.cpu_data(), 0., bottom_diff); + this->replicate(bottom_diff, + num, + spatial_dim*channels_, + spatial_dim, + mean_.cpu_data()); // sum(dE/dY \cdot Y) \cdot Y caffe_mul(temp_.count(), top_data, bottom_diff, bottom_diff); @@ -213,12 +291,13 @@ void BatchNormLayer::Backward_cpu(const vector*>& top, mean_.mutable_cpu_data()); // reshape (broadcast) the above to make // sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1, - batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0., - num_by_chans_.mutable_cpu_data()); - caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, num * channels_, - spatial_dim, 1, 1., num_by_chans_.cpu_data(), - spatial_sum_multiplier_.cpu_data(), 1., bottom_diff); + + replicate_to_op(bottom_diff, + num, + spatial_dim*channels_, + spatial_dim, + mean_.cpu_data(), + std::plus()); // dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y caffe_cpu_axpby(temp_.count(), Dtype(1), top_diff, @@ -235,5 +314,4 @@ STUB_GPU(BatchNormLayer); #endif INSTANTIATE_CLASS(BatchNormLayer); -REGISTER_LAYER_CLASS(BatchNorm); } // namespace caffe diff --git a/src/caffe/layers/batch_reindex_layer.cpp b/src/caffe/layers/batch_reindex_layer.cpp index b14e56f7c6b..32e387da381 100644 --- a/src/caffe/layers/batch_reindex_layer.cpp +++ b/src/caffe/layers/batch_reindex_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/batch_reindex_layer.hpp" diff --git a/src/caffe/layers/bias_layer.cpp b/src/caffe/layers/bias_layer.cpp index 4726a729834..40c17a51139 100644 --- a/src/caffe/layers/bias_layer.cpp +++ b/src/caffe/layers/bias_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/filler.hpp" diff --git a/src/caffe/layers/bnll_layer.cpp b/src/caffe/layers/bnll_layer.cpp index 448d86d752d..02bf8dafb59 100644 --- a/src/caffe/layers/bnll_layer.cpp +++ b/src/caffe/layers/bnll_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/concat_layer.cpp b/src/caffe/layers/concat_layer.cpp index 580bd47977d..a252849dde5 100644 --- a/src/caffe/layers/concat_layer.cpp +++ b/src/caffe/layers/concat_layer.cpp @@ -1,5 +1,46 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include +#ifdef _OPENMP +#include +#endif + #include "caffe/layers/concat_layer.hpp" #include "caffe/util/math_functions.hpp" @@ -63,13 +104,17 @@ void ConcatLayer::Forward_cpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int offset_value = offset_concat_axis; + offset_concat_axis += bottom_concat_axis; +#ifdef _OPENMP + #pragma omp parallel for if(num_concats_ > 1) +#endif for (int n = 0; n < num_concats_; ++n) { caffe_copy(bottom_concat_axis * concat_input_size_, bottom_data + n * bottom_concat_axis * concat_input_size_, - top_data + (n * top_concat_axis + offset_concat_axis) + top_data + (n * top_concat_axis + offset_value) * concat_input_size_); } - offset_concat_axis += bottom_concat_axis; } } @@ -82,15 +127,19 @@ void ConcatLayer::Backward_cpu(const vector*>& top, const int top_concat_axis = top[0]->shape(concat_axis_); for (int i = 0; i < bottom.size(); ++i) { const int bottom_concat_axis = bottom[i]->shape(concat_axis_); + const int offset_value = offset_concat_axis; + offset_concat_axis += bottom_concat_axis; if (propagate_down[i]) { Dtype* bottom_diff = bottom[i]->mutable_cpu_diff(); +#ifdef _OPENMP + #pragma omp parallel for if(num_concats_ > 1) +#endif for (int n = 0; n < num_concats_; ++n) { caffe_copy(bottom_concat_axis * concat_input_size_, top_diff + - (n * top_concat_axis + offset_concat_axis) * concat_input_size_, + (n * top_concat_axis + offset_value) * concat_input_size_, bottom_diff + n * bottom_concat_axis * concat_input_size_); } } - offset_concat_axis += bottom_concat_axis; } } @@ -99,6 +148,5 @@ STUB_GPU(ConcatLayer); #endif INSTANTIATE_CLASS(ConcatLayer); -REGISTER_LAYER_CLASS(Concat); } // namespace caffe diff --git a/src/caffe/layers/contrastive_loss_layer.cpp b/src/caffe/layers/contrastive_loss_layer.cpp index 599e178e9c4..240583874fd 100644 --- a/src/caffe/layers/contrastive_loss_layer.cpp +++ b/src/caffe/layers/contrastive_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/conv_layer.cpp b/src/caffe/layers/conv_layer.cpp index 5d522ab31f2..82efc241040 100644 --- a/src/caffe/layers/conv_layer.cpp +++ b/src/caffe/layers/conv_layer.cpp @@ -1,4 +1,45 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include +#ifdef _OPENMP +#include +#endif + #include "caffe/layers/conv_layer.hpp" @@ -25,17 +66,29 @@ template void ConvolutionLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* weight = this->blobs_[0]->cpu_data(); + // If we have more threads available than batches to be prcessed then + // we are wasting resources (lower batches than 36 on XeonE5) + // So we instruct MKL for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); - for (int n = 0; n < this->num_; ++n) { - this->forward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); - if (this->bias_term_) { - const Dtype* bias = this->blobs_[1]->cpu_data(); - this->forward_cpu_bias(top_data + n * this->top_dim_, bias); +#ifdef _OPENMP + #pragma omp parallel if(this->num_of_threads_ > 1) num_threads(this->num_of_threads_) + { + #pragma omp for +#endif + for (int n = 0; n < this->num_; ++n) { + this->forward_cpu_gemm(bottom_data + n*this->bottom_dim_, + weight, + top_data + n*this->top_dim_); + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->cpu_data(); + this->forward_cpu_bias(top_data + n * this->top_dim_, bias); + } } +#ifdef _OPENMP } +#endif } } @@ -55,19 +108,51 @@ void ConvolutionLayer::Backward_cpu(const vector*>& top, this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_); } } - if (this->param_propagate_down_[0] || propagate_down[i]) { - for (int n = 0; n < this->num_; ++n) { - // gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { + + // OpenMP path is using bigger separate buffer to accumulate + // weight diffs, which are lateron add to weight_diff + // so bigger buffer (weight_diff_mt) hase to be cleared out + // before GEMM ops and results has to be summed up after GEMM ops. + + if (this->param_propagate_down_[0]) { +#ifdef _OPENMP + if (this->num_of_threads_ > 1) { + this->clear_weight_mt(); + } + #pragma omp parallel if(this->num_of_threads_ > 1) num_threads(this->num_of_threads_) +#endif + { +#ifdef _OPENMP + #pragma omp for +#endif + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. weight. Note that we will accumulate diffs. this->weight_cpu_gemm(bottom_data + n * this->bottom_dim_, - top_diff + n * this->top_dim_, weight_diff); + top_diff + n * this->top_dim_, weight_diff); + } + +#ifdef _OPENMP + if (this->num_of_threads_ > 1) { + this->sum_weight_mt(weight_diff); } - // gradient w.r.t. bottom data, if necessary. - if (propagate_down[i]) { +#endif + } + } + + if (propagate_down[i]) { +#ifdef _OPENMP + #pragma omp parallel if(this->num_of_threads_ > 1) num_threads(this->num_of_threads_) + { + #pragma omp for +#endif + for (int n = 0; n < this->num_; ++n) { + // gradient w.r.t. bottom data, if necessary. this->backward_cpu_gemm(top_diff + n * this->top_dim_, weight, bottom_diff + n * this->bottom_dim_); } +#ifdef _OPENMP } +#endif } } } diff --git a/src/caffe/layers/crop_layer.cpp b/src/caffe/layers/crop_layer.cpp index aecdcd63194..3183b60d3c9 100644 --- a/src/caffe/layers/crop_layer.cpp +++ b/src/caffe/layers/crop_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include @@ -69,44 +106,42 @@ void CropLayer::Reshape(const vector*>& bottom, } template +template void CropLayer::crop_copy(const vector*>& bottom, const vector*>& top, const vector& offsets, - vector indices, - int cur_dim, const Dtype* src_data, - Dtype* dest_data, - bool is_forward) { - if (cur_dim + 1 < top[0]->num_axes()) { - // We are not yet at the final dimension, call copy recursively - for (int i = 0; i < top[0]->shape(cur_dim); ++i) { - indices[cur_dim] = i; - crop_copy(bottom, top, offsets, indices, cur_dim+1, - src_data, dest_data, is_forward); + Dtype* dest_data) { + int last_dim = top[0]->num_axes() - 1; + int copy_count = top[0]->count() / top[0]->shape(last_dim); + +#ifdef _OPENMP + #pragma omp parallel for +#endif + for (int i = 0; i < copy_count; ++i) { + // prepare index vector reduced(red) and with offsets(off) + std::vector ind_red(last_dim, 0); + std::vector ind_off(last_dim+1, 0); + int cur_iteration = i; + for (int j = last_dim - 1; j >=0; --j) { + int index = cur_iteration % top[0]->shape(j); + cur_iteration /= top[0]->shape(j); + ind_red[j] = index; + ind_off[j] = index + offsets[j]; } - } else { - // We are at the last dimensions, which is stored continously in memory - for (int i = 0; i < top[0]->shape(cur_dim); ++i) { - // prepare index vector reduced(red) and with offsets(off) - std::vector ind_red(cur_dim, 0); - std::vector ind_off(cur_dim+1, 0); - for (int j = 0; j < cur_dim; ++j) { - ind_red[j] = indices[j]; - ind_off[j] = indices[j] + offsets[j]; - } - ind_off[cur_dim] = offsets[cur_dim]; - // do the copy - if (is_forward) { - caffe_copy(top[0]->shape(cur_dim), - src_data + bottom[0]->offset(ind_off), - dest_data + top[0]->offset(ind_red)); - } else { - // in the backwards pass the src_data is top_diff - // and the dest_data is bottom_diff - caffe_copy(top[0]->shape(cur_dim), - src_data + top[0]->offset(ind_red), - dest_data + bottom[0]->offset(ind_off)); - } + ind_off[last_dim] = offsets[last_dim]; + // Last dimensions stored continously in memory + // do the copy + if (is_forward) { + caffe_copy(top[0]->shape(last_dim), + src_data + bottom[0]->offset(ind_off), + dest_data + top[0]->offset(ind_red)); + } else { + // in the backwards pass the src_data is top_diff + // and the dest_data is bottom_diff + caffe_copy(top[0]->shape(last_dim), + src_data + top[0]->offset(ind_red), + dest_data + bottom[0]->offset(ind_off)); } } } @@ -114,10 +149,9 @@ void CropLayer::crop_copy(const vector*>& bottom, template void CropLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - std::vector indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); - crop_copy(bottom, top, offsets, indices, 0, bottom_data, top_data, true); + crop_copy(bottom, top, offsets, bottom_data, top_data); } template @@ -128,8 +162,7 @@ void CropLayer::Backward_cpu(const vector*>& top, if (propagate_down[0]) { caffe_set(bottom[0]->count(), static_cast(0), bottom_diff); - std::vector indices(top[0]->num_axes(), 0); - crop_copy(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false); + crop_copy(bottom, top, offsets, top_diff, bottom_diff); } } diff --git a/src/caffe/layers/cudnn_conv_layer.cpp b/src/caffe/layers/cudnn_conv_layer.cpp index 1987fb096b0..876c0424260 100644 --- a/src/caffe/layers/cudnn_conv_layer.cpp +++ b/src/caffe/layers/cudnn_conv_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include #include diff --git a/src/caffe/layers/cudnn_lcn_layer.cpp b/src/caffe/layers/cudnn_lcn_layer.cpp index 9c09bf26b4d..7b0011ca624 100644 --- a/src/caffe/layers/cudnn_lcn_layer.cpp +++ b/src/caffe/layers/cudnn_lcn_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include diff --git a/src/caffe/layers/cudnn_lrn_layer.cpp b/src/caffe/layers/cudnn_lrn_layer.cpp index 0495b802baf..4a2e169f835 100644 --- a/src/caffe/layers/cudnn_lrn_layer.cpp +++ b/src/caffe/layers/cudnn_lrn_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include diff --git a/src/caffe/layers/cudnn_pooling_layer.cpp b/src/caffe/layers/cudnn_pooling_layer.cpp index 24f14780b4f..39d366c39ba 100644 --- a/src/caffe/layers/cudnn_pooling_layer.cpp +++ b/src/caffe/layers/cudnn_pooling_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include diff --git a/src/caffe/layers/cudnn_relu_layer.cpp b/src/caffe/layers/cudnn_relu_layer.cpp index 795e0a9efb0..43873ab093c 100644 --- a/src/caffe/layers/cudnn_relu_layer.cpp +++ b/src/caffe/layers/cudnn_relu_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include diff --git a/src/caffe/layers/cudnn_sigmoid_layer.cpp b/src/caffe/layers/cudnn_sigmoid_layer.cpp index 3ce6aef1764..0bda5941d08 100644 --- a/src/caffe/layers/cudnn_sigmoid_layer.cpp +++ b/src/caffe/layers/cudnn_sigmoid_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include diff --git a/src/caffe/layers/cudnn_softmax_layer.cpp b/src/caffe/layers/cudnn_softmax_layer.cpp index 6440df9805b..5c62f2a22fa 100644 --- a/src/caffe/layers/cudnn_softmax_layer.cpp +++ b/src/caffe/layers/cudnn_softmax_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include diff --git a/src/caffe/layers/cudnn_tanh_layer.cpp b/src/caffe/layers/cudnn_tanh_layer.cpp index e87dd9de0ab..62798c35eb5 100644 --- a/src/caffe/layers/cudnn_tanh_layer.cpp +++ b/src/caffe/layers/cudnn_tanh_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include diff --git a/src/caffe/layers/data_layer.cpp b/src/caffe/layers/data_layer.cpp index 66e6301fd45..e889ca51f68 100644 --- a/src/caffe/layers/data_layer.cpp +++ b/src/caffe/layers/data_layer.cpp @@ -1,8 +1,45 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include #endif // USE_OPENCV #include - +#include #include #include "caffe/data_transformer.hpp" @@ -27,13 +64,15 @@ void DataLayer::DataLayerSetUp(const vector*>& bottom, const vector*>& top) { const int batch_size = this->layer_param_.data_param().batch_size(); // Read a data point, and use it to initialize the top blob. - Datum& datum = *(reader_.full().peek()); + Datum datum; + datum.ParseFromString(*(reader_.full().peek())); // Use data_transformer to infer the expected blob shape from datum. vector top_shape = this->data_transformer_->InferBlobShape(datum); this->transformed_data_.Reshape(top_shape); // Reshape top[0] and prefetch_data according to the batch_size. top_shape[0] = batch_size; + top[0]->Reshape(top_shape); for (int i = 0; i < this->PREFETCH_COUNT; ++i) { this->prefetch_[i].data_.Reshape(top_shape); @@ -59,16 +98,23 @@ void DataLayer::load_batch(Batch* batch) { double read_time = 0; double trans_time = 0; CPUTimer timer; + CPUTimer trans_timer; CHECK(batch->data_.count()); + +#ifndef _OPENMP CHECK(this->transformed_data_.count()); +#endif // Reshape according to the first datum of each batch // on single input batches allows for inputs of varying dimension. const int batch_size = this->layer_param_.data_param().batch_size(); - Datum& datum = *(reader_.full().peek()); + Datum datum; + datum.ParseFromString(*(reader_.full().peek())); // Use data_transformer to infer the expected blob shape from datum. vector top_shape = this->data_transformer_->InferBlobShape(datum); +#ifndef _OPENMP this->transformed_data_.Reshape(top_shape); +#endif // Reshape batch according to the batch_size. top_shape[0] = batch_size; batch->data_.Reshape(top_shape); @@ -79,26 +125,52 @@ void DataLayer::load_batch(Batch* batch) { if (this->output_labels_) { top_label = batch->label_.mutable_cpu_data(); } + + trans_timer.Start(); +#ifdef _OPENMP + #pragma omp parallel if (batch_size > 1) + #pragma omp single nowait +#endif for (int item_id = 0; item_id < batch_size; ++item_id) { timer.Start(); // get a datum - Datum& datum = *(reader_.full().pop("Waiting for data")); + string* data = (reader_.full().pop("Waiting for data")); + timer.Stop(); read_time += timer.MicroSeconds(); - timer.Start(); // Apply data transformations (mirror, scale, crop...) int offset = batch->data_.offset(item_id); - this->transformed_data_.set_cpu_data(top_data + offset); - this->data_transformer_->Transform(datum, &(this->transformed_data_)); - // Copy label. - if (this->output_labels_) { - top_label[item_id] = datum.label(); - } - trans_time += timer.MicroSeconds(); - reader_.free().push(const_cast(&datum)); +#ifdef _OPENMP + PreclcRandomNumbers precalculated_rand_numbers; + this->data_transformer_->GenerateRandNumbers(precalculated_rand_numbers); + #pragma omp task firstprivate(offset, precalculated_rand_numbers, data, item_id) +#endif + { + Datum datum; + datum.ParseFromString(*data); + (reader_.free()).push(data); + // Copy label. We need to copy it before we release datum + if (this->output_labels_) { + top_label[item_id] = datum.label(); + } +#ifdef _OPENMP + Blob tmp_data; + tmp_data.Reshape(top_shape); + tmp_data.set_cpu_data(top_data + offset); + this->data_transformer_->Transform(datum, &tmp_data, + precalculated_rand_numbers); +#else + this->transformed_data_.set_cpu_data(top_data + offset); + this->data_transformer_->Transform(datum, &(this->transformed_data_)); +#endif + } } - timer.Stop(); + trans_timer.Stop(); batch_timer.Stop(); + // Due to multithreaded nature of transformation, + // time it takes to execute them we get from subtracting + // read batch of images time from total batch read&transform time + trans_time = trans_timer.MicroSeconds() - read_time; DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; diff --git a/src/caffe/layers/deconv_layer.cpp b/src/caffe/layers/deconv_layer.cpp index 20a460fbdea..bc6dca4a41c 100644 --- a/src/caffe/layers/deconv_layer.cpp +++ b/src/caffe/layers/deconv_layer.cpp @@ -1,4 +1,44 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include +#ifdef _OPENMP +#include +#endif #include "caffe/layers/deconv_layer.hpp" @@ -28,12 +68,20 @@ void DeconvolutionLayer::Forward_cpu(const vector*>& bottom, for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->cpu_data(); Dtype* top_data = top[i]->mutable_cpu_data(); - for (int n = 0; n < this->num_; ++n) { - this->backward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight, - top_data + n * this->top_dim_); - if (this->bias_term_) { - const Dtype* bias = this->blobs_[1]->cpu_data(); - this->forward_cpu_bias(top_data + n * this->top_dim_, bias); +#ifdef _OPENMP + #pragma omp parallel if(this->num_of_threads_ > 1) num_threads(this->num_of_threads_) +#endif + { +#ifdef _OPENMP + #pragma omp for +#endif + for (int n = 0; n < this->num_; ++n) { + this->backward_cpu_gemm(bottom_data + n * this->bottom_dim_, weight, + top_data + n * this->top_dim_); + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->cpu_data(); + this->forward_cpu_bias(top_data + n * this->top_dim_, bias); + } } } } @@ -55,20 +103,40 @@ void DeconvolutionLayer::Backward_cpu(const vector*>& top, this->backward_cpu_bias(bias_diff, top_diff + n * this->top_dim_); } } + + if (this->param_propagate_down_[0] || propagate_down[i]) { - for (int n = 0; n < this->num_; ++n) { - // Gradient w.r.t. weight. Note that we will accumulate diffs. - if (this->param_propagate_down_[0]) { - this->weight_cpu_gemm(top_diff + n * this->top_dim_, - bottom_data + n * this->bottom_dim_, weight_diff); +#ifdef _OPENMP + if (this->num_of_threads_ > 1) { + this->clear_weight_mt(); + } + #pragma omp parallel if(this->num_of_threads_ > 1) num_threads(this->num_of_threads_) +#endif + { +#ifdef _OPENMP + #pragma omp for +#endif + for (int n = 0; n < this->num_; ++n) { + if (this->param_propagate_down_[0]) { + // Gradient w.r.t. weight. Note that we will accumulate diffs. + this->weight_cpu_gemm(top_diff + n * this->top_dim_, + bottom_data + n * this->bottom_dim_, weight_diff); + } + + if (propagate_down[i]) { + // Gradient w.r.t. bottom data, if necessary, reusing + // the column buffer. we might have just computed above. + this->forward_cpu_gemm(top_diff + n*this->top_dim_, + weight, + bottom_diff + n*this->bottom_dim_, + this->param_propagate_down_[0]); + } } - // Gradient w.r.t. bottom data, if necessary, reusing the column buffer - // we might have just computed above. - if (propagate_down[i]) { - this->forward_cpu_gemm(top_diff + n * this->top_dim_, weight, - bottom_diff + n * this->bottom_dim_, - this->param_propagate_down_[0]); +#ifdef _OPENMP + if (this->num_of_threads_ > 1) { + this->sum_weight_mt(weight_diff); } +#endif } } } @@ -79,6 +147,5 @@ STUB_GPU(DeconvolutionLayer); #endif INSTANTIATE_CLASS(DeconvolutionLayer); -REGISTER_LAYER_CLASS(Deconvolution); } // namespace caffe diff --git a/src/caffe/layers/detection_evaluate_layer.cpp b/src/caffe/layers/detection_evaluate_layer.cpp new file mode 100644 index 00000000000..e6588e99631 --- /dev/null +++ b/src/caffe/layers/detection_evaluate_layer.cpp @@ -0,0 +1,287 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include "caffe/layers/detection_evaluate_layer.hpp" +#include "caffe/util/bbox_util.hpp" + +namespace caffe { + +template +void DetectionEvaluateLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + const DetectionEvaluateParameter& detection_evaluate_param = + this->layer_param_.detection_evaluate_param(); + CHECK(detection_evaluate_param.has_num_classes()) + << "Must provide num_classes."; + num_classes_ = detection_evaluate_param.num_classes(); + background_label_id_ = detection_evaluate_param.background_label_id(); + overlap_threshold_ = detection_evaluate_param.overlap_threshold(); + CHECK_GT(overlap_threshold_, 0.) << "overlap_threshold must be non negative."; + evaluate_difficult_gt_ = detection_evaluate_param.evaluate_difficult_gt(); + if (detection_evaluate_param.has_name_size_file()) { + string name_size_file = detection_evaluate_param.name_size_file(); + std::ifstream infile(name_size_file.c_str()); + CHECK(infile.good()) + << "Failed to open name size file: " << name_size_file; + // The file is in the following format: + // name height width + // ... + string name; + int height, width; + while (infile >> name >> height >> width) { + sizes_.push_back(std::make_pair(height, width)); + } + infile.close(); + } + count_ = 0; + // If there is no name_size_file provided, use normalized bbox to evaluate. + use_normalized_bbox_ = sizes_.size() == 0; + + // Retrieve resize parameter if there is any provided. + has_resize_ = detection_evaluate_param.has_resize_param(); + if (has_resize_) { + resize_param_ = detection_evaluate_param.resize_param(); + } +} + +template +void DetectionEvaluateLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_LE(count_, sizes_.size()); + CHECK_EQ(bottom[0]->num(), 1); + CHECK_EQ(bottom[0]->channels(), 1); + CHECK_EQ(bottom[0]->width(), 7); + CHECK_EQ(bottom[1]->num(), 1); + CHECK_EQ(bottom[1]->channels(), 1); + CHECK_EQ(bottom[1]->width(), 8); + + // num() and channels() are 1. + vector top_shape(2, 1); + int num_pos_classes = background_label_id_ == -1 ? + num_classes_ : num_classes_ - 1; + int num_valid_det = 0; + const Dtype* det_data = bottom[0]->cpu_data(); + for (int i = 0; i < bottom[0]->height(); ++i) { + if (det_data[1] != -1) { + ++num_valid_det; + } + det_data += 7; + } + top_shape.push_back(num_pos_classes + num_valid_det); + // Each row is a 5 dimension vector, which stores + // [image_id, label, confidence, true_pos, false_pos] + top_shape.push_back(5); + top[0]->Reshape(top_shape); +} + +template +void DetectionEvaluateLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* det_data = bottom[0]->cpu_data(); + const Dtype* gt_data = bottom[1]->cpu_data(); + + // Retrieve all detection results. + map all_detections; + GetDetectionResults(det_data, bottom[0]->height(), background_label_id_, + &all_detections); + + // Retrieve all ground truth (including difficult ones). + map all_gt_bboxes; + GetGroundTruth(gt_data, bottom[1]->height(), background_label_id_, + true, &all_gt_bboxes); + + Dtype* top_data = top[0]->mutable_cpu_data(); + caffe_set(top[0]->count(), Dtype(0.), top_data); + int num_det = 0; + + // Insert number of ground truth for each label. + map num_pos; + for (map::iterator it = all_gt_bboxes.begin(); + it != all_gt_bboxes.end(); ++it) { + for (LabelBBox::iterator iit = it->second.begin(); iit != it->second.end(); + ++iit) { + int count = 0; + if (evaluate_difficult_gt_) { + count = iit->second.size(); + } else { + // Get number of non difficult ground truth. + for (int i = 0; i < iit->second.size(); ++i) { + if (!iit->second[i].difficult()) { + ++count; + } + } + } + if (num_pos.find(iit->first) == num_pos.end()) { + num_pos[iit->first] = count; + } else { + num_pos[iit->first] += count; + } + } + } + for (int c = 0; c < num_classes_; ++c) { + if (c == background_label_id_) { + continue; + } + top_data[num_det * 5] = -1; + top_data[num_det * 5 + 1] = c; + if (num_pos.find(c) == num_pos.end()) { + top_data[num_det * 5 + 2] = 0; + } else { + top_data[num_det * 5 + 2] = num_pos.find(c)->second; + } + top_data[num_det * 5 + 3] = -1; + top_data[num_det * 5 + 4] = -1; + ++num_det; + } + + // Insert detection evaluate status. + for (map::iterator it = all_detections.begin(); + it != all_detections.end(); ++it) { + int image_id = it->first; + LabelBBox& detections = it->second; + if (all_gt_bboxes.find(image_id) == all_gt_bboxes.end()) { + // No ground truth for current image. All detections become false_pos. + for (LabelBBox::iterator iit = detections.begin(); + iit != detections.end(); ++iit) { + int label = iit->first; + if (label == -1) { + continue; + } + const vector& bboxes = iit->second; + for (int i = 0; i < bboxes.size(); ++i) { + top_data[num_det * 5] = image_id; + top_data[num_det * 5 + 1] = label; + top_data[num_det * 5 + 2] = bboxes[i].score(); + top_data[num_det * 5 + 3] = 0; + top_data[num_det * 5 + 4] = 1; + ++num_det; + } + } + } else { + LabelBBox& label_bboxes = all_gt_bboxes.find(image_id)->second; + for (LabelBBox::iterator iit = detections.begin(); + iit != detections.end(); ++iit) { + int label = iit->first; + if (label == -1) { + continue; + } + vector& bboxes = iit->second; + if (label_bboxes.find(label) == label_bboxes.end()) { + // No ground truth for current label. All detections become false_pos. + for (int i = 0; i < bboxes.size(); ++i) { + top_data[num_det * 5] = image_id; + top_data[num_det * 5 + 1] = label; + top_data[num_det * 5 + 2] = bboxes[i].score(); + top_data[num_det * 5 + 3] = 0; + top_data[num_det * 5 + 4] = 1; + ++num_det; + } + } else { + vector& gt_bboxes = label_bboxes.find(label)->second; + // Scale ground truth if needed. + if (!use_normalized_bbox_) { + CHECK_LT(count_, sizes_.size()); + for (int i = 0; i < gt_bboxes.size(); ++i) { + OutputBBox(gt_bboxes[i], sizes_[count_], has_resize_, + resize_param_, &(gt_bboxes[i])); + } + } + vector visited(gt_bboxes.size(), false); + // Sort detections in descend order based on scores. + std::sort(bboxes.begin(), bboxes.end(), SortBBoxDescend); + for (int i = 0; i < bboxes.size(); ++i) { + top_data[num_det * 5] = image_id; + top_data[num_det * 5 + 1] = label; + top_data[num_det * 5 + 2] = bboxes[i].score(); + if (!use_normalized_bbox_) { + OutputBBox(bboxes[i], sizes_[count_], has_resize_, + resize_param_, &(bboxes[i])); + } + // Compare with each ground truth bbox. + float overlap_max = -1; + int jmax = -1; + for (int j = 0; j < gt_bboxes.size(); ++j) { + float overlap = JaccardOverlap(bboxes[i], gt_bboxes[j], + use_normalized_bbox_); + if (overlap > overlap_max) { + overlap_max = overlap; + jmax = j; + } + } + if (overlap_max >= overlap_threshold_) { + if (evaluate_difficult_gt_ || + (!evaluate_difficult_gt_ && !gt_bboxes[jmax].difficult())) { + if (!visited[jmax]) { + // true positive. + top_data[num_det * 5 + 3] = 1; + top_data[num_det * 5 + 4] = 0; + visited[jmax] = true; + } else { + // false positive (multiple detection). + top_data[num_det * 5 + 3] = 0; + top_data[num_det * 5 + 4] = 1; + } + } + } else { + // false positive. + top_data[num_det * 5 + 3] = 0; + top_data[num_det * 5 + 4] = 1; + } + ++num_det; + } + } + } + } + if (sizes_.size() > 0) { + ++count_; + if (count_ == sizes_.size()) { + // reset count after a full iterations through the DB. + count_ = 0; + } + } + } +} + +INSTANTIATE_CLASS(DetectionEvaluateLayer); +REGISTER_LAYER_CLASS(DetectionEvaluate); + +} // namespace caffe diff --git a/src/caffe/layers/detection_output_layer.cpp b/src/caffe/layers/detection_output_layer.cpp new file mode 100644 index 00000000000..1237731a6cb --- /dev/null +++ b/src/caffe/layers/detection_output_layer.cpp @@ -0,0 +1,525 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include // NOLINT(readability/streams) +#include +#include +#include +#include + +#include "boost/filesystem.hpp" +#include "boost/foreach.hpp" + +#include "caffe/layers/detection_output_layer.hpp" + +namespace caffe { + +template +void DetectionOutputLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + const DetectionOutputParameter& detection_output_param = + this->layer_param_.detection_output_param(); + CHECK(detection_output_param.has_num_classes()) << "Must specify num_classes"; + num_classes_ = detection_output_param.num_classes(); + share_location_ = detection_output_param.share_location(); + num_loc_classes_ = share_location_ ? 1 : num_classes_; + background_label_id_ = detection_output_param.background_label_id(); + code_type_ = detection_output_param.code_type(); + variance_encoded_in_target_ = + detection_output_param.variance_encoded_in_target(); + keep_top_k_ = detection_output_param.keep_top_k(); + confidence_threshold_ = detection_output_param.has_confidence_threshold() ? + detection_output_param.confidence_threshold() : -FLT_MAX; + // Parameters used in nms. + nms_threshold_ = detection_output_param.nms_param().nms_threshold(); + CHECK_GE(nms_threshold_, 0.) << "nms_threshold must be non negative."; + eta_ = detection_output_param.nms_param().eta(); + CHECK_GT(eta_, 0.); + CHECK_LE(eta_, 1.); + top_k_ = -1; + if (detection_output_param.nms_param().has_top_k()) { + top_k_ = detection_output_param.nms_param().top_k(); + } + const SaveOutputParameter& save_output_param = + detection_output_param.save_output_param(); + output_directory_ = save_output_param.output_directory(); + if (!output_directory_.empty()) { + if (boost::filesystem::is_directory(output_directory_)) { + boost::filesystem::remove_all(output_directory_); + } + // bug on ret val of create_dir https://svn.boost.org/trac/boost/ticket/7258 + boost::filesystem::create_directories(output_directory_); + if(!boost::filesystem::is_directory(output_directory_)) { + LOG(FATAL) << "Failed to create directory: " << output_directory_; + } + } + output_name_prefix_ = save_output_param.output_name_prefix(); + need_save_ = output_directory_ == "" ? false : true; + output_format_ = save_output_param.output_format(); + if (save_output_param.has_label_map_file()) { + string label_map_file = save_output_param.label_map_file(); + if (label_map_file.empty()) { + // Ignore saving if there is no label_map_file provided. + LOG(WARNING) << "Provide label_map_file if output results to files."; + need_save_ = false; + } else { + LabelMap label_map; + CHECK(ReadProtoFromTextFile(label_map_file, &label_map)) + << "Failed to read label map file: " << label_map_file; + CHECK(MapLabelToName(label_map, true, &label_to_name_)) + << "Failed to convert label to name."; + CHECK(MapLabelToDisplayName(label_map, true, &label_to_display_name_)) + << "Failed to convert label to display name."; + } + } else { + need_save_ = false; + } + if (save_output_param.has_name_size_file()) { + string name_size_file = save_output_param.name_size_file(); + if (name_size_file.empty()) { + // Ignore saving if there is no name_size_file provided. + LOG(WARNING) << "Provide name_size_file if output results to files."; + need_save_ = false; + } else { + std::ifstream infile(name_size_file.c_str()); + CHECK(infile.good()) + << "Failed to open name size file: " << name_size_file; + // The file is in the following format: + // name height width + // ... + string name; + int height, width; + while (infile >> name >> height >> width) { + names_.push_back(name); + sizes_.push_back(std::make_pair(height, width)); + } + infile.close(); + if (save_output_param.has_num_test_image()) { + num_test_image_ = save_output_param.num_test_image(); + } else { + num_test_image_ = names_.size(); + } + CHECK_LE(num_test_image_, names_.size()); + } + } else { + need_save_ = false; + } + has_resize_ = save_output_param.has_resize_param(); + if (has_resize_) { + resize_param_ = save_output_param.resize_param(); + } + name_count_ = 0; + visualize_ = detection_output_param.visualize(); + if (visualize_) { + visualize_threshold_ = 0.6; + if (detection_output_param.has_visualize_threshold()) { + visualize_threshold_ = detection_output_param.visualize_threshold(); + } + data_transformer_.reset( + new DataTransformer(this->layer_param_.transform_param(), + this->phase_)); + data_transformer_->InitRand(); + save_file_ = detection_output_param.save_file(); + } + bbox_preds_.ReshapeLike(*(bottom[0])); + if (!share_location_) { + bbox_permute_.ReshapeLike(*(bottom[0])); + } + conf_permute_.ReshapeLike(*(bottom[1])); +} + +template +void DetectionOutputLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + if (need_save_) { + CHECK_LE(name_count_, names_.size()); + if (name_count_ % num_test_image_ == 0) { + // Clean all outputs. + if (output_format_ == "VOC") { + boost::filesystem::path output_directory(output_directory_); + for (map::iterator it = label_to_name_.begin(); + it != label_to_name_.end(); ++it) { + if (it->first == background_label_id_) { + continue; + } + std::ofstream outfile; + boost::filesystem::path file( + output_name_prefix_ + it->second + ".txt"); + boost::filesystem::path out_file = output_directory / file; + outfile.open(out_file.string().c_str(), std::ofstream::out); + } + } + } + } + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + if (bbox_preds_.num() != bottom[0]->num() || + bbox_preds_.count(1) != bottom[0]->count(1)) { + bbox_preds_.ReshapeLike(*(bottom[0])); + } + if (!share_location_ && (bbox_permute_.num() != bottom[0]->num() || + bbox_permute_.count(1) != bottom[0]->count(1))) { + bbox_permute_.ReshapeLike(*(bottom[0])); + } + if (conf_permute_.num() != bottom[1]->num() || + conf_permute_.count(1) != bottom[1]->count(1)) { + conf_permute_.ReshapeLike(*(bottom[1])); + } + num_priors_ = bottom[2]->height() / 4; + CHECK_EQ(num_priors_ * num_loc_classes_ * 4, bottom[0]->channels()) + << "Number of priors must match number of location predictions."; + CHECK_EQ(num_priors_ * num_classes_, bottom[1]->channels()) + << "Number of priors must match number of confidence predictions."; + // num() and channels() are 1. + vector top_shape(2, 1); + // Since the number of bboxes to be kept is unknown before nms, we manually + // set it to (fake) 1. + top_shape.push_back(1); + // Each row is a 7 dimension vector, which stores + // [image_id, label, confidence, xmin, ymin, xmax, ymax] + top_shape.push_back(7); + top[0]->Reshape(top_shape); +} + +template +void DetectionOutputLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* loc_data = bottom[0]->cpu_data(); + const Dtype* conf_data = bottom[1]->cpu_data(); + const Dtype* prior_data = bottom[2]->cpu_data(); + const int num = bottom[0]->num(); + + // Retrieve all location predictions. + vector all_loc_preds; + GetLocPredictions(loc_data, num, num_priors_, num_loc_classes_, + share_location_, &all_loc_preds); + + // Retrieve all confidences. + vector > > all_conf_scores; + GetConfidenceScores(conf_data, num, num_priors_, num_classes_, + &all_conf_scores); + + // Retrieve all prior bboxes. It is same within a batch since we assume all + // images in a batch are of same dimension. + vector prior_bboxes; + vector > prior_variances; + GetPriorBBoxes(prior_data, num_priors_, &prior_bboxes, &prior_variances); + + // Decode all loc predictions to bboxes. + vector all_decode_bboxes; + const bool clip_bbox = false; + DecodeBBoxesAll(all_loc_preds, prior_bboxes, prior_variances, num, + share_location_, num_loc_classes_, background_label_id_, + code_type_, variance_encoded_in_target_, clip_bbox, + &all_decode_bboxes); + + int num_kept = 0; + vector > > all_indices(num); +#ifdef _OPENMP + #pragma omp parallel for +#endif + for (int i = 0; i < num; ++i) { + const LabelBBox& decode_bboxes = all_decode_bboxes[i]; + const map >& conf_scores = all_conf_scores[i]; + map > indices; + int num_det = 0; + for (int c = 0; c < num_classes_; ++c) { + if (c == background_label_id_) { + // Ignore background class. + continue; + } + if (conf_scores.find(c) == conf_scores.end()) { + // Something bad happened if there are no predictions for current label. + LOG(FATAL) << "Could not find confidence predictions for label " << c; + } + const vector& scores = conf_scores.find(c)->second; + int label = share_location_ ? -1 : c; + if (decode_bboxes.find(label) == decode_bboxes.end()) { + // Something bad happened if there are no predictions for current label. + LOG(FATAL) << "Could not find location predictions for label " << label; + continue; + } + const vector& bboxes = decode_bboxes.find(label)->second; + ApplyNMSFast(bboxes, scores, confidence_threshold_, nms_threshold_, eta_, + top_k_, &(indices[c])); + num_det += indices[c].size(); + } + // Temporary variable for critical section + int num_to_add = 0; + if (keep_top_k_ > -1 && num_det > keep_top_k_) { + vector > > score_index_pairs; + for (map >::iterator it = indices.begin(); + it != indices.end(); ++it) { + int label = it->first; + const vector& label_indices = it->second; + if (conf_scores.find(label) == conf_scores.end()) { + // Something bad happened for current label. + LOG(FATAL) << "Could not find location predictions for " << label; + continue; + } + const vector& scores = conf_scores.find(label)->second; + for (int j = 0; j < label_indices.size(); ++j) { + int idx = label_indices[j]; + CHECK_LT(idx, scores.size()); + score_index_pairs.push_back(std::make_pair( + scores[idx], std::make_pair(label, idx))); + } + } + // Keep top k results per image. + std::sort(score_index_pairs.begin(), score_index_pairs.end(), + SortScorePairDescend >); + score_index_pairs.resize(keep_top_k_); + // Store the new indices. + map > new_indices; + for (int j = 0; j < score_index_pairs.size(); ++j) { + int label = score_index_pairs[j].second.first; + int idx = score_index_pairs[j].second.second; + new_indices[label].push_back(idx); + } + all_indices[i] = new_indices; + num_to_add = keep_top_k_; + } else { + all_indices[i] = indices; + num_to_add = num_det; + } +#ifdef _OPENMP + #pragma omp atomic +#endif + num_kept += num_to_add; + } + + vector top_shape(2, 1); + top_shape.push_back(num_kept); + top_shape.push_back(7); + Dtype* top_data; + if (num_kept == 0) { + LOG(INFO) << "Couldn't find any detections"; + top_shape[2] = num; + top[0]->Reshape(top_shape); + top_data = top[0]->mutable_cpu_data(); + caffe_set(top[0]->count(), -1, top_data); + // Generate fake results per image. + for (int i = 0; i < num; ++i) { + top_data[0] = i; + top_data += 7; + } + } else { + top[0]->Reshape(top_shape); + top_data = top[0]->mutable_cpu_data(); + } + + int count = 0; + boost::filesystem::path output_directory(output_directory_); + for (int i = 0; i < num; ++i) { + const map >& conf_scores = all_conf_scores[i]; + const LabelBBox& decode_bboxes = all_decode_bboxes[i]; + for (map >::iterator it = all_indices[i].begin(); + it != all_indices[i].end(); ++it) { + int label = it->first; + if (conf_scores.find(label) == conf_scores.end()) { + // Something bad happened if there are no predictions for current label. + LOG(FATAL) << "Could not find confidence predictions for " << label; + continue; + } + const vector& scores = conf_scores.find(label)->second; + int loc_label = share_location_ ? -1 : label; + if (decode_bboxes.find(loc_label) == decode_bboxes.end()) { + // Something bad happened if there are no predictions for current label. + LOG(FATAL) << "Could not find location predictions for " << loc_label; + continue; + } + const vector& bboxes = + decode_bboxes.find(loc_label)->second; + vector& indices = it->second; + if (need_save_) { + CHECK(label_to_name_.find(label) != label_to_name_.end()) + << "Cannot find label: " << label << " in the label map."; + CHECK_LT(name_count_, names_.size()); + } + for (int j = 0; j < indices.size(); ++j) { + int idx = indices[j]; + top_data[count * 7] = i; + top_data[count * 7 + 1] = label; + top_data[count * 7 + 2] = scores[idx]; + const NormalizedBBox& bbox = bboxes[idx]; + top_data[count * 7 + 3] = bbox.xmin(); + top_data[count * 7 + 4] = bbox.ymin(); + top_data[count * 7 + 5] = bbox.xmax(); + top_data[count * 7 + 6] = bbox.ymax(); + if (need_save_) { + NormalizedBBox out_bbox; + OutputBBox(bbox, sizes_[name_count_], has_resize_, resize_param_, + &out_bbox); + float score = top_data[count * 7 + 2]; + float xmin = out_bbox.xmin(); + float ymin = out_bbox.ymin(); + float xmax = out_bbox.xmax(); + float ymax = out_bbox.ymax(); + ptree pt_xmin, pt_ymin, pt_width, pt_height; + pt_xmin.put("", round(xmin * 100) / 100.); + pt_ymin.put("", round(ymin * 100) / 100.); + pt_width.put("", round((xmax - xmin) * 100) / 100.); + pt_height.put("", round((ymax - ymin) * 100) / 100.); + + ptree cur_bbox; + cur_bbox.push_back(std::make_pair("", pt_xmin)); + cur_bbox.push_back(std::make_pair("", pt_ymin)); + cur_bbox.push_back(std::make_pair("", pt_width)); + cur_bbox.push_back(std::make_pair("", pt_height)); + + ptree cur_det; + cur_det.put("image_id", names_[name_count_]); + if (output_format_ == "ILSVRC") { + cur_det.put("category_id", label); + } else { + cur_det.put("category_id", label_to_name_[label].c_str()); + } + cur_det.add_child("bbox", cur_bbox); + cur_det.put("score", score); + + detections_.push_back(std::make_pair("", cur_det)); + } + ++count; + } + } + if (need_save_) { + ++name_count_; + if (name_count_ % num_test_image_ == 0) { + if (output_format_ == "VOC") { + map outfiles; + for (int c = 0; c < num_classes_; ++c) { + if (c == background_label_id_) { + continue; + } + string label_name = label_to_name_[c]; + boost::filesystem::path file( + output_name_prefix_ + label_name + ".txt"); + boost::filesystem::path out_file = output_directory / file; + outfiles[label_name] = new std::ofstream(out_file.string().c_str(), + std::ofstream::out); + } + BOOST_FOREACH(ptree::value_type &det, detections_.get_child("")) { + ptree pt = det.second; + string label_name = pt.get("category_id"); + if (outfiles.find(label_name) == outfiles.end()) { + std::cout << "Cannot find " << label_name << std::endl; + continue; + } + string image_name = pt.get("image_id"); + float score = pt.get("score"); + vector bbox; + BOOST_FOREACH(ptree::value_type &elem, pt.get_child("bbox")) { + bbox.push_back(static_cast(elem.second.get_value())); + } + *(outfiles[label_name]) << image_name; + *(outfiles[label_name]) << " " << score; + *(outfiles[label_name]) << " " << bbox[0] << " " << bbox[1]; + *(outfiles[label_name]) << " " << bbox[0] + bbox[2]; + *(outfiles[label_name]) << " " << bbox[1] + bbox[3]; + *(outfiles[label_name]) << std::endl; + } + for (int c = 0; c < num_classes_; ++c) { + if (c == background_label_id_) { + continue; + } + string label_name = label_to_name_[c]; + outfiles[label_name]->flush(); + outfiles[label_name]->close(); + delete outfiles[label_name]; + } + } else if (output_format_ == "COCO") { + boost::filesystem::path output_directory(output_directory_); + boost::filesystem::path file(output_name_prefix_ + ".json"); + boost::filesystem::path out_file = output_directory / file; + std::ofstream outfile; + outfile.open(out_file.string().c_str(), std::ofstream::out); + + boost::regex exp("\"(null|true|false|-?[0-9]+(\\.[0-9]+)?)\""); + ptree output; + output.add_child("detections", detections_); + std::stringstream ss; + write_json(ss, output); + std::string rv = boost::regex_replace(ss.str(), exp, "$1"); + outfile << rv.substr(rv.find("["), rv.rfind("]") - rv.find("[")) + << std::endl << "]" << std::endl; + } else if (output_format_ == "ILSVRC") { + boost::filesystem::path output_directory(output_directory_); + boost::filesystem::path file(output_name_prefix_ + ".txt"); + boost::filesystem::path out_file = output_directory / file; + std::ofstream outfile; + outfile.open(out_file.string().c_str(), std::ofstream::out); + + BOOST_FOREACH(ptree::value_type &det, detections_.get_child("")) { + ptree pt = det.second; + int label = pt.get("category_id"); + string image_name = pt.get("image_id"); + float score = pt.get("score"); + vector bbox; + BOOST_FOREACH(ptree::value_type &elem, pt.get_child("bbox")) { + bbox.push_back(static_cast(elem.second.get_value())); + } + outfile << image_name << " " << label << " " << score; + outfile << " " << bbox[0] << " " << bbox[1]; + outfile << " " << bbox[0] + bbox[2]; + outfile << " " << bbox[1] + bbox[3]; + outfile << std::endl; + } + } + name_count_ = 0; + detections_.clear(); + } + } + } + if (visualize_) { +#ifdef USE_OPENCV + vector cv_imgs; + this->data_transformer_->TransformInv(bottom[3], &cv_imgs); + vector colors = GetColors(label_to_display_name_.size()); + VisualizeBBox(cv_imgs, top[0], visualize_threshold_, colors, + label_to_display_name_, save_file_); +#endif // USE_OPENCV + } +} + +#ifdef CPU_ONLY +STUB_GPU_FORWARD(DetectionOutputLayer, Forward); +#endif + +INSTANTIATE_CLASS(DetectionOutputLayer); +REGISTER_LAYER_CLASS(DetectionOutput); + +} // namespace caffe diff --git a/src/caffe/layers/detection_output_layer.cu b/src/caffe/layers/detection_output_layer.cu new file mode 100644 index 00000000000..a7230791ac7 --- /dev/null +++ b/src/caffe/layers/detection_output_layer.cu @@ -0,0 +1,304 @@ +#ifdef USE_OPENCV +#include +#include +#endif // USE_OPENCV +#include +#include // NOLINT(readability/streams) +#include +#include +#include +#include + +#include "boost/filesystem.hpp" +#include "boost/foreach.hpp" + +#include "caffe/layers/detection_output_layer.hpp" + +namespace caffe { + +template +void DetectionOutputLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) { + const Dtype* loc_data = bottom[0]->gpu_data(); + const Dtype* prior_data = bottom[2]->gpu_data(); + const int num = bottom[0]->num(); + + // Decode predictions. + Dtype* bbox_data = bbox_preds_.mutable_gpu_data(); + const int loc_count = bbox_preds_.count(); + const bool clip_bbox = false; + DecodeBBoxesGPU(loc_count, loc_data, prior_data, code_type_, + variance_encoded_in_target_, num_priors_, share_location_, + num_loc_classes_, background_label_id_, clip_bbox, bbox_data); + // Retrieve all decoded location predictions. + const Dtype* bbox_cpu_data; + if (!share_location_) { + Dtype* bbox_permute_data = bbox_permute_.mutable_gpu_data(); + PermuteDataGPU(loc_count, bbox_data, num_loc_classes_, num_priors_, + 4, bbox_permute_data); + bbox_cpu_data = bbox_permute_.cpu_data(); + } else { + bbox_cpu_data = bbox_preds_.cpu_data(); + } + + // Retrieve all confidences. + Dtype* conf_permute_data = conf_permute_.mutable_gpu_data(); + PermuteDataGPU(bottom[1]->count(), bottom[1]->gpu_data(), + num_classes_, num_priors_, 1, conf_permute_data); + const Dtype* conf_cpu_data = conf_permute_.cpu_data(); + + int num_kept = 0; + vector > > all_indices; + for (int i = 0; i < num; ++i) { + map > indices; + int num_det = 0; + const int conf_idx = i * num_classes_ * num_priors_; + int bbox_idx; + if (share_location_) { + bbox_idx = i * num_priors_ * 4; + } else { + bbox_idx = conf_idx * 4; + } + for (int c = 0; c < num_classes_; ++c) { + if (c == background_label_id_) { + // Ignore background class. + continue; + } + const Dtype* cur_conf_data = conf_cpu_data + conf_idx + c * num_priors_; + const Dtype* cur_bbox_data = bbox_cpu_data + bbox_idx; + if (!share_location_) { + cur_bbox_data += c * num_priors_ * 4; + } + ApplyNMSFast(cur_bbox_data, cur_conf_data, num_priors_, + confidence_threshold_, nms_threshold_, eta_, top_k_, &(indices[c])); + num_det += indices[c].size(); + } + if (keep_top_k_ > -1 && num_det > keep_top_k_) { + vector > > score_index_pairs; + for (map >::iterator it = indices.begin(); + it != indices.end(); ++it) { + int label = it->first; + const vector& label_indices = it->second; + for (int j = 0; j < label_indices.size(); ++j) { + int idx = label_indices[j]; + float score = conf_cpu_data[conf_idx + label * num_priors_ + idx]; + score_index_pairs.push_back(std::make_pair( + score, std::make_pair(label, idx))); + } + } + // Keep top k results per image. + std::sort(score_index_pairs.begin(), score_index_pairs.end(), + SortScorePairDescend >); + score_index_pairs.resize(keep_top_k_); + // Store the new indices. + map > new_indices; + for (int j = 0; j < score_index_pairs.size(); ++j) { + int label = score_index_pairs[j].second.first; + int idx = score_index_pairs[j].second.second; + new_indices[label].push_back(idx); + } + all_indices.push_back(new_indices); + num_kept += keep_top_k_; + } else { + all_indices.push_back(indices); + num_kept += num_det; + } + } + + vector top_shape(2, 1); + top_shape.push_back(num_kept); + top_shape.push_back(7); + Dtype* top_data; + if (num_kept == 0) { + LOG(INFO) << "Couldn't find any detections"; + top_shape[2] = num; + top[0]->Reshape(top_shape); + top_data = top[0]->mutable_cpu_data(); + caffe_set(top[0]->count(), -1, top_data); + // Generate fake results per image. + for (int i = 0; i < num; ++i) { + top_data[0] = i; + top_data += 7; + } + } else { + top[0]->Reshape(top_shape); + top_data = top[0]->mutable_cpu_data(); + } + + int count = 0; + boost::filesystem::path output_directory(output_directory_); + for (int i = 0; i < num; ++i) { + const int conf_idx = i * num_classes_ * num_priors_; + int bbox_idx; + if (share_location_) { + bbox_idx = i * num_priors_ * 4; + } else { + bbox_idx = conf_idx * 4; + } + for (map >::iterator it = all_indices[i].begin(); + it != all_indices[i].end(); ++it) { + int label = it->first; + vector& indices = it->second; + if (need_save_) { + CHECK(label_to_name_.find(label) != label_to_name_.end()) + << "Cannot find label: " << label << " in the label map."; + CHECK_LT(name_count_, names_.size()); + } + const Dtype* cur_conf_data = + conf_cpu_data + conf_idx + label * num_priors_; + const Dtype* cur_bbox_data = bbox_cpu_data + bbox_idx; + if (!share_location_) { + cur_bbox_data += label * num_priors_ * 4; + } + for (int j = 0; j < indices.size(); ++j) { + int idx = indices[j]; + top_data[count * 7] = i; + top_data[count * 7 + 1] = label; + top_data[count * 7 + 2] = cur_conf_data[idx]; + for (int k = 0; k < 4; ++k) { + top_data[count * 7 + 3 + k] = cur_bbox_data[idx * 4 + k]; + } + if (need_save_) { + // Generate output bbox. + NormalizedBBox bbox; + bbox.set_xmin(top_data[count * 7 + 3]); + bbox.set_ymin(top_data[count * 7 + 4]); + bbox.set_xmax(top_data[count * 7 + 5]); + bbox.set_ymax(top_data[count * 7 + 6]); + NormalizedBBox out_bbox; + OutputBBox(bbox, sizes_[name_count_], has_resize_, resize_param_, + &out_bbox); + float score = top_data[count * 7 + 2]; + float xmin = out_bbox.xmin(); + float ymin = out_bbox.ymin(); + float xmax = out_bbox.xmax(); + float ymax = out_bbox.ymax(); + ptree pt_xmin, pt_ymin, pt_width, pt_height; + pt_xmin.put("", round(xmin * 100) / 100.); + pt_ymin.put("", round(ymin * 100) / 100.); + pt_width.put("", round((xmax - xmin) * 100) / 100.); + pt_height.put("", round((ymax - ymin) * 100) / 100.); + + ptree cur_bbox; + cur_bbox.push_back(std::make_pair("", pt_xmin)); + cur_bbox.push_back(std::make_pair("", pt_ymin)); + cur_bbox.push_back(std::make_pair("", pt_width)); + cur_bbox.push_back(std::make_pair("", pt_height)); + + ptree cur_det; + cur_det.put("image_id", names_[name_count_]); + if (output_format_ == "ILSVRC") { + cur_det.put("category_id", label); + } else { + cur_det.put("category_id", label_to_name_[label].c_str()); + } + cur_det.add_child("bbox", cur_bbox); + cur_det.put("score", score); + + detections_.push_back(std::make_pair("", cur_det)); + } + ++count; + } + } + if (need_save_) { + ++name_count_; + if (name_count_ % num_test_image_ == 0) { + if (output_format_ == "VOC") { + map outfiles; + for (int c = 0; c < num_classes_; ++c) { + if (c == background_label_id_) { + continue; + } + string label_name = label_to_name_[c]; + boost::filesystem::path file( + output_name_prefix_ + label_name + ".txt"); + boost::filesystem::path out_file = output_directory / file; + outfiles[label_name] = new std::ofstream(out_file.string().c_str(), + std::ofstream::out); + } + BOOST_FOREACH(ptree::value_type &det, detections_.get_child("")) { + ptree pt = det.second; + string label_name = pt.get("category_id"); + if (outfiles.find(label_name) == outfiles.end()) { + std::cout << "Cannot find " << label_name << std::endl; + continue; + } + string image_name = pt.get("image_id"); + float score = pt.get("score"); + vector bbox; + BOOST_FOREACH(ptree::value_type &elem, pt.get_child("bbox")) { + bbox.push_back(static_cast(elem.second.get_value())); + } + *(outfiles[label_name]) << image_name; + *(outfiles[label_name]) << " " << score; + *(outfiles[label_name]) << " " << bbox[0] << " " << bbox[1]; + *(outfiles[label_name]) << " " << bbox[0] + bbox[2]; + *(outfiles[label_name]) << " " << bbox[1] + bbox[3]; + *(outfiles[label_name]) << std::endl; + } + for (int c = 0; c < num_classes_; ++c) { + if (c == background_label_id_) { + continue; + } + string label_name = label_to_name_[c]; + outfiles[label_name]->flush(); + outfiles[label_name]->close(); + delete outfiles[label_name]; + } + } else if (output_format_ == "COCO") { + boost::filesystem::path output_directory(output_directory_); + boost::filesystem::path file(output_name_prefix_ + ".json"); + boost::filesystem::path out_file = output_directory / file; + std::ofstream outfile; + outfile.open(out_file.string().c_str(), std::ofstream::out); + + boost::regex exp("\"(null|true|false|-?[0-9]+(\\.[0-9]+)?)\""); + ptree output; + output.add_child("detections", detections_); + std::stringstream ss; + write_json(ss, output); + std::string rv = boost::regex_replace(ss.str(), exp, "$1"); + outfile << rv.substr(rv.find("["), rv.rfind("]") - rv.find("[")) + << std::endl << "]" << std::endl; + } else if (output_format_ == "ILSVRC") { + boost::filesystem::path output_directory(output_directory_); + boost::filesystem::path file(output_name_prefix_ + ".txt"); + boost::filesystem::path out_file = output_directory / file; + std::ofstream outfile; + outfile.open(out_file.string().c_str(), std::ofstream::out); + + BOOST_FOREACH(ptree::value_type &det, detections_.get_child("")) { + ptree pt = det.second; + int label = pt.get("category_id"); + string image_name = pt.get("image_id"); + float score = pt.get("score"); + vector bbox; + BOOST_FOREACH(ptree::value_type &elem, pt.get_child("bbox")) { + bbox.push_back(static_cast(elem.second.get_value())); + } + outfile << image_name << " " << label << " " << score; + outfile << " " << bbox[0] << " " << bbox[1]; + outfile << " " << bbox[0] + bbox[2]; + outfile << " " << bbox[1] + bbox[3]; + outfile << std::endl; + } + } + name_count_ = 0; + detections_.clear(); + } + } + } + if (visualize_) { +#ifdef USE_OPENCV + vector cv_imgs; + this->data_transformer_->TransformInv(bottom[3], &cv_imgs); + vector colors = GetColors(label_to_display_name_.size()); + VisualizeBBox(cv_imgs, top[0], visualize_threshold_, colors, + label_to_display_name_, save_file_); +#endif // USE_OPENCV + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(DetectionOutputLayer); + +} // namespace caffe diff --git a/src/caffe/layers/dropout_layer.cpp b/src/caffe/layers/dropout_layer.cpp index 533ab26c04d..c23c583de06 100644 --- a/src/caffe/layers/dropout_layer.cpp +++ b/src/caffe/layers/dropout_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // TODO (sergeyk): effect should not be dependent on phase. wasted memcpy. #include @@ -37,6 +74,9 @@ void DropoutLayer::Forward_cpu(const vector*>& bottom, if (this->phase_ == TRAIN) { // Create random numbers caffe_rng_bernoulli(count, 1. - threshold_, mask); +#ifdef _OPENMP + #pragma omp parallel for +#endif for (int i = 0; i < count; ++i) { top_data[i] = bottom_data[i] * mask[i] * scale_; } @@ -55,6 +95,9 @@ void DropoutLayer::Backward_cpu(const vector*>& top, if (this->phase_ == TRAIN) { const unsigned int* mask = rand_vec_.cpu_data(); const int count = bottom[0]->count(); +#ifdef _OPENMP + #pragma omp parallel for +#endif for (int i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * mask[i] * scale_; } diff --git a/src/caffe/layers/dummy_data_layer.cpp b/src/caffe/layers/dummy_data_layer.cpp index e382bfea802..383174a1453 100644 --- a/src/caffe/layers/dummy_data_layer.cpp +++ b/src/caffe/layers/dummy_data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/filler.hpp" diff --git a/src/caffe/layers/eltwise_layer.cpp b/src/caffe/layers/eltwise_layer.cpp index 21256166bfa..ff9039d09cd 100644 --- a/src/caffe/layers/eltwise_layer.cpp +++ b/src/caffe/layers/eltwise_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -155,6 +192,5 @@ STUB_GPU(EltwiseLayer); #endif INSTANTIATE_CLASS(EltwiseLayer); -REGISTER_LAYER_CLASS(Eltwise); } // namespace caffe diff --git a/src/caffe/layers/elu_layer.cpp b/src/caffe/layers/elu_layer.cpp index a0f87635a5a..613263e07d8 100644 --- a/src/caffe/layers/elu_layer.cpp +++ b/src/caffe/layers/elu_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -12,6 +49,9 @@ void ELULayer::Forward_cpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); Dtype alpha = this->layer_param_.elu_param().alpha(); +#ifdef _OPENMP +#pragma omp parallel for +#endif for (int i = 0; i < count; ++i) { top_data[i] = std::max(bottom_data[i], Dtype(0)) + alpha * (exp(std::min(bottom_data[i], Dtype(0))) - Dtype(1)); @@ -29,6 +69,9 @@ void ELULayer::Backward_cpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const int count = bottom[0]->count(); Dtype alpha = this->layer_param_.elu_param().alpha(); +#ifdef _OPENMP +#pragma omp parallel for +#endif for (int i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) + (alpha + top_data[i]) * (bottom_data[i] <= 0)); diff --git a/src/caffe/layers/embed_layer.cpp b/src/caffe/layers/embed_layer.cpp index 36b40d700fd..50c247757ce 100644 --- a/src/caffe/layers/embed_layer.cpp +++ b/src/caffe/layers/embed_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/filler.hpp" diff --git a/src/caffe/layers/euclidean_loss_layer.cpp b/src/caffe/layers/euclidean_loss_layer.cpp index 300d991e765..a26746b8b50 100644 --- a/src/caffe/layers/euclidean_loss_layer.cpp +++ b/src/caffe/layers/euclidean_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/euclidean_loss_layer.hpp" diff --git a/src/caffe/layers/exp_layer.cpp b/src/caffe/layers/exp_layer.cpp index 0c1b463ae12..1a6e270c127 100644 --- a/src/caffe/layers/exp_layer.cpp +++ b/src/caffe/layers/exp_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/exp_layer.hpp" diff --git a/src/caffe/layers/filter_layer.cpp b/src/caffe/layers/filter_layer.cpp index e226c0b6c9b..46e8dba1d17 100644 --- a/src/caffe/layers/filter_layer.cpp +++ b/src/caffe/layers/filter_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/filter_layer.hpp" diff --git a/src/caffe/layers/flatten_layer.cpp b/src/caffe/layers/flatten_layer.cpp index d4ab3935760..99943911c69 100644 --- a/src/caffe/layers/flatten_layer.cpp +++ b/src/caffe/layers/flatten_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/flatten_layer.hpp" diff --git a/src/caffe/layers/hdf5_data_layer.cpp b/src/caffe/layers/hdf5_data_layer.cpp index 2f13dc641df..6124bb494d3 100644 --- a/src/caffe/layers/hdf5_data_layer.cpp +++ b/src/caffe/layers/hdf5_data_layer.cpp @@ -1,4 +1,41 @@ /* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +/* TODO: - load file in a separate thread ("prefetch") - can be smarter about the memcpy call instead of doing it row-by-row @@ -61,10 +98,10 @@ void HDF5DataLayer::LoadHDF5FileData(const char* filename) { // Shuffle if needed. if (this->layer_param_.hdf5_data_param().shuffle()) { std::random_shuffle(data_permutation_.begin(), data_permutation_.end()); - DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) + DLOG(INFO) << "Successfully loaded " << hdf_blobs_[0]->shape(0) << " rows (shuffled)"; } else { - DLOG(INFO) << "Successully loaded " << hdf_blobs_[0]->shape(0) << " rows"; + DLOG(INFO) << "Successfully loaded " << hdf_blobs_[0]->shape(0) << " rows"; } } diff --git a/src/caffe/layers/hdf5_output_layer.cpp b/src/caffe/layers/hdf5_output_layer.cpp index f8f1edcd18e..173d687fc2e 100644 --- a/src/caffe/layers/hdf5_output_layer.cpp +++ b/src/caffe/layers/hdf5_output_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "hdf5.h" diff --git a/src/caffe/layers/hinge_loss_layer.cpp b/src/caffe/layers/hinge_loss_layer.cpp index 374aed3c98f..ec20c3a16e1 100644 --- a/src/caffe/layers/hinge_loss_layer.cpp +++ b/src/caffe/layers/hinge_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/im2col_layer.cpp b/src/caffe/layers/im2col_layer.cpp index 2fb9b3c1099..0cdb2385136 100644 --- a/src/caffe/layers/im2col_layer.cpp +++ b/src/caffe/layers/im2col_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/im2col_layer.hpp" diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 7ee7dc40714..420db6998dd 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include @@ -100,7 +137,6 @@ void ImageDataLayer::ShuffleImages() { shuffle(lines_.begin(), lines_.end(), prefetch_rng); } -// This function is called on prefetch thread template void ImageDataLayer::load_batch(Batch* batch) { CPUTimer batch_timer; @@ -134,20 +170,49 @@ void ImageDataLayer::load_batch(Batch* batch) { // datum scales const int lines_size = lines_.size(); - for (int item_id = 0; item_id < batch_size; ++item_id) { + +#ifdef _OPENMP + #pragma omp parallel if (batch_size > 1) + #pragma omp single nowait +#endif + for (int item_id = 0; item_id < batch_size; ++item_id) { // get a blob timer.Start(); CHECK_GT(lines_size, lines_id_); +#ifndef _OPENMP cv::Mat cv_img = ReadImageToCVMat(root_folder + lines_[lines_id_].first, new_height, new_width, is_color); CHECK(cv_img.data) << "Could not load " << lines_[lines_id_].first; read_time += timer.MicroSeconds(); timer.Start(); - // Apply transformations (mirror, crop...) to the image +// Apply transformations (mirror, crop...) to the image + int offset = batch->data_.offset(item_id); this->transformed_data_.set_cpu_data(prefetch_data + offset); this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); trans_time += timer.MicroSeconds(); +#else + read_time = 0; + trans_time = 0; + + int offset = batch->data_.offset(item_id); + std::string img_file_name = lines_[lines_id_].first; + PreclcRandomNumbers precalculated_rand_numbers; + this->data_transformer_->GenerateRandNumbers(precalculated_rand_numbers); + #pragma omp task firstprivate(offset, img_file_name, \ + precalculated_rand_numbers) + { + cv::Mat cv_img = ReadImageToCVMat(root_folder + img_file_name, + new_height, new_width, is_color); + CHECK(cv_img.data) << "Could not load " << img_file_name; + + Blob tmp_data; + tmp_data.Reshape(top_shape); + tmp_data.set_cpu_data(prefetch_data + offset); + this->data_transformer_->Transform(cv_img, &tmp_data, + precalculated_rand_numbers); + } +#endif prefetch_label[item_id] = lines_[lines_id_].second; // go to the next iter @@ -161,12 +226,16 @@ void ImageDataLayer::load_batch(Batch* batch) { } } } + batch_timer.Stop(); DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; } + + + INSTANTIATE_CLASS(ImageDataLayer); REGISTER_LAYER_CLASS(ImageData); diff --git a/src/caffe/layers/infogain_loss_layer.cpp b/src/caffe/layers/infogain_loss_layer.cpp index 624d3118124..9d2958227fe 100644 --- a/src/caffe/layers/infogain_loss_layer.cpp +++ b/src/caffe/layers/infogain_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/layers/inner_product_layer.cpp b/src/caffe/layers/inner_product_layer.cpp index e65349f0055..ca9534a847e 100644 --- a/src/caffe/layers/inner_product_layer.cpp +++ b/src/caffe/layers/inner_product_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/filler.hpp" @@ -52,6 +89,7 @@ void InnerProductLayer::LayerSetUp(const vector*>& bottom, } } // parameter initialization this->param_propagate_down_.resize(this->blobs_.size(), true); + } template @@ -65,7 +103,9 @@ void InnerProductLayer::Reshape(const vector*>& bottom, << "Input size incompatible with inner product parameters."; // The first "axis" dimensions are independent inner products; the total // number of these is M_, the product over these dimensions. +// M_ = bottom[0]->count(0, axis); M_ = bottom[0]->count(0, axis); + // The top shape will be the bottom shape with the flattened axes dropped, // and replaced by a single axis with dimension num_output (N_). vector top_shape = bottom[0]->shape(); @@ -145,6 +185,7 @@ STUB_GPU(InnerProductLayer); #endif INSTANTIATE_CLASS(InnerProductLayer); -REGISTER_LAYER_CLASS(InnerProduct); + +// REGISTER_LAYER_CLASS(InnerProduct); } // namespace caffe diff --git a/src/caffe/layers/input_layer.cpp b/src/caffe/layers/input_layer.cpp index 667d8ad67ef..d61becf44ff 100644 --- a/src/caffe/layers/input_layer.cpp +++ b/src/caffe/layers/input_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/input_layer.hpp" diff --git a/src/caffe/layers/log_layer.cpp b/src/caffe/layers/log_layer.cpp index c70a795cf53..06bfa80fe24 100644 --- a/src/caffe/layers/log_layer.cpp +++ b/src/caffe/layers/log_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/log_layer.hpp" diff --git a/src/caffe/layers/loss_layer.cpp b/src/caffe/layers/loss_layer.cpp index c0b7a862181..dfed5b7d097 100644 --- a/src/caffe/layers/loss_layer.cpp +++ b/src/caffe/layers/loss_layer.cpp @@ -1,3 +1,41 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include #include #include "caffe/layers/loss_layer.hpp" @@ -22,6 +60,37 @@ void LossLayer::Reshape( top[0]->Reshape(loss_shape); } +template +Dtype LossLayer::GetNormalizer( + const LossParameter_NormalizationMode normalization_mode, + const int outer_num, const int inner_num, const int valid_count) { + Dtype normalizer; + switch (normalization_mode) { + case LossParameter_NormalizationMode_FULL: + normalizer = Dtype(outer_num * inner_num); + break; + case LossParameter_NormalizationMode_VALID: + if (valid_count == -1) { + normalizer = Dtype(outer_num * inner_num); + } else { + normalizer = Dtype(valid_count); + } + break; + case LossParameter_NormalizationMode_BATCH_SIZE: + normalizer = Dtype(outer_num); + break; + case LossParameter_NormalizationMode_NONE: + normalizer = Dtype(1); + break; + default: + LOG(FATAL) << "Unknown normalization mode: " + << LossParameter_NormalizationMode_Name(normalization_mode); + } + // Some users will have no labels for some examples in order to 'turn off' a + // particular loss in a multi-task setup. The max prevents NaNs in that case. + return std::max(Dtype(1.0), normalizer); +} + INSTANTIATE_CLASS(LossLayer); } // namespace caffe diff --git a/src/caffe/layers/lrn_layer.cpp b/src/caffe/layers/lrn_layer.cpp index 210525e20f3..75ba4c3476b 100644 --- a/src/caffe/layers/lrn_layer.cpp +++ b/src/caffe/layers/lrn_layer.cpp @@ -1,7 +1,47 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/lrn_layer.hpp" #include "caffe/util/math_functions.hpp" +#ifdef _OPENMP +#include +#endif namespace caffe { @@ -74,10 +114,22 @@ void LRNLayer::Reshape(const vector*>& bottom, channels_ = bottom[0]->channels(); height_ = bottom[0]->height(); width_ = bottom[0]->width(); + // ---- openmp ---- + num_of_threads_ = 1; +#ifdef _OPENMP + num_of_threads_ = omp_get_max_threads() < num_ ? omp_get_max_threads() : num_; + if (num_of_threads_ < 1) { + LOG(WARNING) << "LRN layer: omp_get_max_threads() =" << num_of_threads_; + num_of_threads_ = 1; + } +#endif switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: top[0]->Reshape(num_, channels_, height_, width_); scale_.Reshape(num_, channels_, height_, width_); + padded_ratio_.Reshape(num_of_threads_, channels_ + size_ - 1, + height_, width_); + accum_ratio_.Reshape(num_of_threads_, 1, height_, width_); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: split_layer_->Reshape(bottom, split_top_vec_); @@ -110,43 +162,26 @@ void LRNLayer::CrossChannelForward_cpu( const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); Dtype* scale_data = scale_.mutable_cpu_data(); - // start with the constant value - for (int i = 0; i < scale_.count(); ++i) { - scale_data[i] = k_; - } - Blob padded_square(1, channels_ + size_ - 1, height_, width_); - Dtype* padded_square_data = padded_square.mutable_cpu_data(); - caffe_set(padded_square.count(), Dtype(0), padded_square_data); Dtype alpha_over_size = alpha_ / size_; - // go through the images - for (int n = 0; n < num_; ++n) { - // compute the padded square - caffe_sqr(channels_ * height_ * width_, - bottom_data + bottom[0]->offset(n), - padded_square_data + padded_square.offset(0, pre_pad_)); - // Create the first channel scale - for (int c = 0; c < size_; ++c) { - caffe_axpy(height_ * width_, alpha_over_size, - padded_square_data + padded_square.offset(0, c), - scale_data + scale_.offset(n, 0)); - } - for (int c = 1; c < channels_; ++c) { - // copy previous scale - caffe_copy(height_ * width_, - scale_data + scale_.offset(n, c - 1), - scale_data + scale_.offset(n, c)); - // add head - caffe_axpy(height_ * width_, alpha_over_size, - padded_square_data + padded_square.offset(0, c + size_ - 1), - scale_data + scale_.offset(n, c)); - // subtract tail - caffe_axpy(height_ * width_, -alpha_over_size, - padded_square_data + padded_square.offset(0, c - 1), - scale_data + scale_.offset(n, c)); + + caffe_sqr(num_ * channels_ * height_ * width_, bottom_data, top_data); + caffe_set(num_ * channels_ * height_ * width_, Dtype(k_), scale_data); + +#ifdef _OPENMP +#pragma omp parallel for collapse(2) +#endif + for (int n = 0; n < num_; n++) { + for (int c = 0; c < channels_; c++) { + for (int i = c - pre_pad_; i <= c + pre_pad_; i++) { + if ((i >= 0) && (i < channels_)) { + caffe_axpy(height_ * width_, alpha_over_size, + top_data + scale_.offset(n, i), + scale_data + scale_.offset(n, c)); + } + } } } - // In the end, compute output caffe_powx(scale_.count(), scale_data, -beta_, top_data); caffe_mul(scale_.count(), top_data, bottom_data, top_data); } @@ -185,13 +220,11 @@ void LRNLayer::CrossChannelBackward_cpu( const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* scale_data = scale_.cpu_data(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - Blob padded_ratio(1, channels_ + size_ - 1, height_, width_); - Blob accum_ratio(1, 1, height_, width_); - Dtype* padded_ratio_data = padded_ratio.mutable_cpu_data(); - Dtype* accum_ratio_data = accum_ratio.mutable_cpu_data(); + Dtype* padded_ratio_data = padded_ratio_.mutable_cpu_data(); + Dtype* accum_ratio_data = accum_ratio_.mutable_cpu_data(); // We hack a little bit by using the diff() to store an additional result - Dtype* accum_ratio_times_bottom = accum_ratio.mutable_cpu_diff(); - caffe_set(padded_ratio.count(), Dtype(0), padded_ratio_data); + Dtype* accum_ratio_times_bottom = accum_ratio_.mutable_cpu_diff(); + caffe_set(padded_ratio_.count(), Dtype(0), padded_ratio_data); Dtype cache_ratio_value = 2. * alpha_ * beta_ / size_; caffe_powx(scale_.count(), scale_data, -beta_, bottom_diff); @@ -199,36 +232,54 @@ void LRNLayer::CrossChannelBackward_cpu( // go through individual data int inverse_pre_pad = size_ - (size_ + 1) / 2; - for (int n = 0; n < num_; ++n) { - int block_offset = scale_.offset(n); - // first, compute diff_i * y_i / s_i - caffe_mul(channels_ * height_ * width_, - top_diff + block_offset, top_data + block_offset, - padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); - caffe_div(channels_ * height_ * width_, - padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad), - scale_data + block_offset, - padded_ratio_data + padded_ratio.offset(0, inverse_pre_pad)); - // Now, compute the accumulated ratios and the bottom diff - caffe_set(accum_ratio.count(), Dtype(0), accum_ratio_data); - for (int c = 0; c < size_ - 1; ++c) { - caffe_axpy(height_ * width_, 1., - padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); - } - for (int c = 0; c < channels_; ++c) { - caffe_axpy(height_ * width_, 1., - padded_ratio_data + padded_ratio.offset(0, c + size_ - 1), - accum_ratio_data); - // compute bottom diff - caffe_mul(height_ * width_, - bottom_data + top[0]->offset(n, c), - accum_ratio_data, accum_ratio_times_bottom); - caffe_axpy(height_ * width_, -cache_ratio_value, - accum_ratio_times_bottom, bottom_diff + top[0]->offset(n, c)); - caffe_axpy(height_ * width_, -1., - padded_ratio_data + padded_ratio.offset(0, c), accum_ratio_data); +#ifdef _OPENMP + #pragma omp parallel for num_threads(this->num_of_threads_) +#endif + for (int n = 0; n < num_; ++n) { + int tid = 0; +#ifdef _OPENMP + tid = omp_get_thread_num(); +#endif + int block_offset = scale_.offset(n); + // first, compute diff_i * y_i / s_i + caffe_mul(channels_ * height_ * width_, + top_diff + block_offset, + top_data + block_offset, + padded_ratio_data + padded_ratio_.offset(tid, inverse_pre_pad)); + caffe_div(channels_ * height_ * width_, + padded_ratio_data + padded_ratio_.offset(tid, inverse_pre_pad), + scale_data + block_offset, + padded_ratio_data + padded_ratio_.offset(tid, inverse_pre_pad)); + // Now, compute the accumulated ratios and the bottom diff + caffe_set(height_*width_, + Dtype(0), + accum_ratio_data + accum_ratio_.offset(tid, 0)); + for (int c = 0; c < size_ - 1; ++c) { + caffe_add(height_ * width_, + accum_ratio_data + accum_ratio_.offset(tid, 0), + padded_ratio_data + padded_ratio_.offset(tid, c), + accum_ratio_data + accum_ratio_.offset(tid, 0)); + } + for (int c = 0; c < channels_; ++c) { + caffe_add(height_ * width_, + accum_ratio_data + accum_ratio_.offset(tid, 0), + padded_ratio_data + padded_ratio_.offset(tid, c + size_ - 1), + accum_ratio_data + accum_ratio_.offset(tid, 0)); + // compute bottom diff + caffe_mul(height_ * width_, + bottom_data + top[0]->offset(n, c), + accum_ratio_data + accum_ratio_.offset(tid, 0), + accum_ratio_times_bottom + + accum_ratio_.offset(tid, 0)); + caffe_axpy(height_ * width_, -cache_ratio_value, + accum_ratio_times_bottom + accum_ratio_.offset(tid, 0), + bottom_diff + top[0]->offset(n, c)); + caffe_sub(height_ * width_, + accum_ratio_data + accum_ratio_.offset(tid, 0), + padded_ratio_data + padded_ratio_.offset(tid, c), + accum_ratio_data + accum_ratio_.offset(tid, 0)); + } } - } } template diff --git a/src/caffe/layers/lstm_layer.cpp b/src/caffe/layers/lstm_layer.cpp index da48dba4c05..38f80ec12f5 100644 --- a/src/caffe/layers/lstm_layer.cpp +++ b/src/caffe/layers/lstm_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/lstm_unit_layer.cpp b/src/caffe/layers/lstm_unit_layer.cpp index 277c031ad15..73d5290fada 100644 --- a/src/caffe/layers/lstm_unit_layer.cpp +++ b/src/caffe/layers/lstm_unit_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/layers/memory_data_layer.cpp b/src/caffe/layers/memory_data_layer.cpp index 975f4841723..85c619c3e58 100644 --- a/src/caffe/layers/memory_data_layer.cpp +++ b/src/caffe/layers/memory_data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include #endif // USE_OPENCV diff --git a/src/caffe/layers/mkl_batch_norm_layer.cpp b/src/caffe/layers/mkl_batch_norm_layer.cpp new file mode 100755 index 00000000000..6dce5024323 --- /dev/null +++ b/src/caffe/layers/mkl_batch_norm_layer.cpp @@ -0,0 +1,498 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKL2017_SUPPORTED) +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + +template +MKLBatchNormLayer::~MKLBatchNormLayer() { + dnnDelete(batchNormFwd); + dnnDelete(batchNormFwdInference); + dnnDelete(batchNormBwd); + dnnLayoutDelete(layout_usr_); + dnnReleaseBuffer(mean_buffer_); + dnnReleaseBuffer(variance_buffer_); + dnnReleaseBuffer(scaleShift_buffer_); + dnnReleaseBuffer(diffScaleShift_buffer_); +} + +template +void MKLBatchNormLayer::Init(const vector*>& bottom, + const vector*>& top) { + moving_average_fraction_ = + this->layer_param_.batch_norm_param().moving_average_fraction(); + eps_ = this->layer_param_.batch_norm_param().eps(); + use_weight_bias_ = this->layer_param_.batch_norm_param().use_weight_bias(); + bias_term_ = this->layer_param_.batch_norm_param().bias_term(); + + use_global_stats_ = this->phase_ == TEST; + if (this->layer_param_.batch_norm_param().has_use_global_stats()) + use_global_stats_ = this->layer_param_.batch_norm_param().use_global_stats(); + + CHECK(use_weight_bias_) << "BatchNorm without scaling have not supported yet"; + + size_t dim = 4, sizes[4], strides[4]; + + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_ = bottom[0]->num(); + + sizes[0] = width_; + sizes[1] = height_; + sizes[2] = channels_; + sizes[3] = num_; + + strides[0] = 1; + strides[1] = sizes[0]; + strides[2] = sizes[0]*sizes[1]; + strides[3] = sizes[0]*sizes[1]*sizes[2]; + + // Names are for debugging only + fwd_bottom_data->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_top_data->name = "fwd_top_data @ " + this->layer_param_.name(); + bwd_bottom_diff->name = "bwd_bottom_diff @ " + this->layer_param_.name(); + bwd_top_diff->name = "bwd_top_diff @ " + this->layer_param_.name(); + + // TODO: Make a cleanup routine to avoid + // copy of following code in the Destructor + + dnnError_t e; + dnnLayoutDelete(layout_usr_); + e = dnnLayoutCreate(&layout_usr_, dim, sizes, strides); + CHECK_EQ(e, E_SUCCESS); + + fwd_bottom_data->create_user_layout(dim, sizes, strides, false); + fwd_top_data ->create_user_layout(dim, sizes, strides, false); + bwd_bottom_diff->create_user_layout(dim, sizes, strides, false); + bwd_top_diff ->create_user_layout(dim, sizes, strides, false); + + dnnReleaseBuffer(mean_buffer_); + dnnReleaseBuffer(variance_buffer_); + dnnReleaseBuffer(scaleShift_buffer_); + dnnReleaseBuffer(diffScaleShift_buffer_); + + // "Lazy" allocation because here we don't know + // what layout is used by neighbours. + + // Primitives will be allocated during the first fwd pass + dnnDelete(batchNormFwd); + dnnDelete(batchNormFwdInference); + dnnDelete(batchNormBwd); + + this->blobs_.resize(3); + + if (use_weight_bias_) { + if ( bias_term_ ) { + this->blobs_.resize(5); + } else { + this->blobs_.resize(4); + } + // Initialize scale and shift + vector scaleshift_shape(1); + scaleshift_shape[0] = channels_; + + this->blobs_[3].reset(new Blob(scaleshift_shape)); + FillerParameter filler_param( + this->layer_param_.batch_norm_param().filler()); + if (!this->layer_param_.batch_norm_param().has_filler()) { + filler_param.set_type("constant"); + filler_param.set_value(1); + } + shared_ptr > filler(GetFiller(filler_param)); + filler->Fill(this->blobs_[3].get()); + + if ( bias_term_ ) { + this->blobs_[4].reset(new Blob(scaleshift_shape)); + FillerParameter bias_filler_param( + this->layer_param_.batch_norm_param().bias_filler()); + if (!this->layer_param_.batch_norm_param().has_bias_filler()) { + bias_filler_param.set_type("constant"); + bias_filler_param.set_value(0); + } + shared_ptr > bias_filler( + GetFiller(bias_filler_param)); + bias_filler->Fill(this->blobs_[4].get()); + } + } + + vector sz; + sz.push_back(channels_); + this->blobs_[0].reset(new Blob(sz)); + this->blobs_[1].reset(new Blob(sz)); + sz[0]=1; + this->blobs_[2].reset(new Blob(sz)); + for (int i = 0; i < 3; ++i) { + caffe_set(this->blobs_[i]->count(), Dtype(0), + this->blobs_[i]->mutable_cpu_data()); + } + + // Mask statistics from optimization by setting local learning rates + // for mean, variance, and the bias correction to zero. + for (int i = 0; i < 3; ++i) { + if (this->layer_param_.param_size() == i) { + ParamSpec* fixed_param_spec = this->layer_param_.add_param(); + fixed_param_spec->set_lr_mult(0.f); + } else { + CHECK_EQ(this->layer_param_.param(i).lr_mult(), 0.f) + << "Cannot configure batch normalization statistics as layer " + << "parameters."; + } + } +} + +template +void MKLBatchNormLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + Init(bottom, top); +} + +template +void MKLBatchNormLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + bool re_init = true; + if (channels_ == bottom[0]->channels() && + height_ == bottom[0]->height() && + width_ == bottom[0]->width()) { + re_init = false; + } + + if (bottom[0] == top[0]) { // in-place computation + temp_.ReshapeLike(*bottom[0]); + } else { + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_ = bottom[0]->num(); + top[0]->Reshape(num_, channels_, height_, width_); + } + + if (re_init == true) { + Init(bottom, top); + } else if (num_ != bottom[0]->num()) { //recreate layout only when batch size changes + size_t dim = 4, sizes[4], strides[4]; + sizes[0] = width_; + sizes[1] = height_; + sizes[2] = channels_; + sizes[3] = num_; + + strides[0] = 1; + strides[1] = sizes[0]; + strides[2] = sizes[0]*sizes[1]; + strides[3] = sizes[0]*sizes[1]*sizes[2]; + + dnnError_t e; + dnnLayoutDelete(layout_usr_); + e = dnnLayoutCreate(&layout_usr_, dim, sizes, strides); + CHECK_EQ(e, E_SUCCESS); + fwd_bottom_data->create_user_layout(dim, sizes, strides, false); + fwd_top_data ->create_user_layout(dim, sizes, strides, false); + bwd_bottom_diff->create_user_layout(dim, sizes, strides, false); + bwd_top_diff ->create_user_layout(dim, sizes, strides, false); + } +} + +template +void MKLBatchNormLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + void* bottom_data = + reinterpret_cast(const_cast(bottom[0]->prv_data())); + int is_first_pass = 0; + unsigned int amount_to_copy =0; + + if (NULL != bottom_data) { + amount_to_copy = bottom[0]->prv_data_count(); + // Is it the first pass? Create a primitive. + if (batchNormFwd == NULL) { + is_first_pass = 1; + + CHECK((bottom[0]->get_prv_data_descriptor())->get_descr_type() == + PrvMemDescr::PRV_DESCR_MKL2017); + shared_ptr > mem_descr + = boost::static_pointer_cast >( + bottom[0]->get_prv_data_descriptor()); + CHECK(mem_descr != NULL); + + DLOG(INFO) << "Using layout of " << mem_descr->name + << " as input layout for " << this->layer_param_.name(); + + fwd_bottom_data = mem_descr; + + dnnError_t e; + e = dnnBatchNormalizationCreateForward( + &batchNormFwd, NULL, mem_descr->layout_int, eps_, dnnUseScaleShift); + CHECK_EQ(e, E_SUCCESS); + + e = dnnBatchNormalizationCreateForward( + &batchNormFwdInference, NULL, mem_descr->layout_int, eps_, + dnnUseScaleShift | dnnUseInputMeanVariance); + CHECK_EQ(e, E_SUCCESS); + + fwd_top_data ->create_internal_layout(batchNormFwd, dnnResourceDst); + bwd_top_diff ->create_internal_layout(batchNormFwd, dnnResourceDst); + bwd_bottom_diff->create_internal_layout(batchNormFwd, dnnResourceSrc); + + if (!use_global_stats_) { + e = dnnBatchNormalizationCreateBackward( + &batchNormBwd, NULL, mem_descr->layout_int, eps_, dnnUseScaleShift); + CHECK_EQ(e, E_SUCCESS); + } else { + e = dnnBatchNormalizationCreateBackward( + &batchNormBwd, NULL, mem_descr->layout_int, eps_, dnnUseScaleShift | dnnUseInputMeanVariance); + CHECK_EQ(e, E_SUCCESS); + } + } + } else { + DLOG(INFO) << "Using cpu_data in MKLBatchNormLayer."; + if (batchNormFwd == NULL) { + // First pass + is_first_pass = 1; + + dnnError_t e; + e = dnnBatchNormalizationCreateForward( + &batchNormFwd, NULL, layout_usr_, eps_, dnnUseScaleShift); + CHECK_EQ(e, E_SUCCESS); + e = dnnBatchNormalizationCreateForward( + &batchNormFwdInference, NULL, layout_usr_, eps_, + dnnUseScaleShift | dnnUseInputMeanVariance); + CHECK_EQ(e, E_SUCCESS); + + if (!use_global_stats_) { + e = dnnBatchNormalizationCreateBackward( + &batchNormBwd, NULL, layout_usr_, eps_, dnnUseScaleShift); + CHECK_EQ(e, E_SUCCESS); + } else { + e = dnnBatchNormalizationCreateBackward( + &batchNormBwd, NULL, layout_usr_, eps_, dnnUseScaleShift | dnnUseInputMeanVariance); + CHECK_EQ(e, E_SUCCESS); + } + } + bottom_data = + reinterpret_cast(const_cast(bottom[0]->cpu_data())); + amount_to_copy = bottom[0]->count(); + } + if (is_first_pass == 1) { + dnnError_t e; + dnnLayout_t mean_buffer_l = NULL; + e = dnnLayoutCreateFromPrimitive( + &mean_buffer_l, batchNormFwd, dnnResourceMean); + CHECK_EQ(e, E_SUCCESS); + e = dnnAllocateBuffer( + reinterpret_cast(&mean_buffer_), mean_buffer_l); + CHECK_EQ(e, E_SUCCESS); + dnnLayoutDelete(mean_buffer_l); + + dnnLayout_t variance_buffer_l = NULL; + e = dnnLayoutCreateFromPrimitive( + &variance_buffer_l, batchNormFwd, dnnResourceVariance); + CHECK_EQ(e, E_SUCCESS); + e = dnnAllocateBuffer( + reinterpret_cast(&variance_buffer_), variance_buffer_l); + CHECK_EQ(e, E_SUCCESS); + dnnLayoutDelete(variance_buffer_l); + + dnnLayout_t diffScaleShift_buffer_l = NULL; + e = dnnLayoutCreateFromPrimitive( + &diffScaleShift_buffer_l, batchNormBwd, dnnResourceDiffScaleShift); + CHECK_EQ(e, E_SUCCESS); + e = dnnAllocateBuffer( + reinterpret_cast(&diffScaleShift_buffer_), diffScaleShift_buffer_l); + CHECK_EQ(e, E_SUCCESS); + dnnLayoutDelete(diffScaleShift_buffer_l); + + dnnLayout_t scaleShift_buffer_l = NULL; + e = dnnLayoutCreateFromPrimitive( + &scaleShift_buffer_l, batchNormFwd, dnnResourceScaleShift); + CHECK_EQ(e, E_SUCCESS); + e = dnnAllocateBuffer( + reinterpret_cast(&scaleShift_buffer_), scaleShift_buffer_l); + CHECK_EQ(e, E_SUCCESS); + dnnLayoutDelete(scaleShift_buffer_l); + if (!use_weight_bias_) { + for (int i = 0; i < channels_; i++) { + scaleShift_buffer_[i] = 1.0; + scaleShift_buffer_[channels_ + i] = 0; + } + } + } + + if (use_weight_bias_) { + // Fill ScaleShift buffer + for (int i = 0; i < channels_; i++) { + scaleShift_buffer_[i] = this->blobs_[3]->cpu_data()[i]; + scaleShift_buffer_[channels_ + i] = 0; + if (bias_term_) { + scaleShift_buffer_[channels_ + i] = this->blobs_[4]->cpu_data()[i]; + } + } + } + + if (bottom[0] == top[0] && this->phase_ == TRAIN) { + // In-place computation; need to store bottom data before overwriting it. + // Note that this is only necessary for Backward; we skip this if not + // doing Backward + // TODO: make a caffe_coppy working on blobs + caffe_copy(amount_to_copy, static_cast(bottom_data), + temp_.mutable_cpu_data()); + } + + if (use_global_stats_) { + // use the stored mean/variance estimates. + const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ? + 0 : 1 / this->blobs_[2]->cpu_data()[0]; + caffe_cpu_scale(this->blobs_[0]->count(), scale_factor, + this->blobs_[0]->cpu_data(), mean_buffer_); + caffe_cpu_scale(this->blobs_[1]->count(), scale_factor, + this->blobs_[1]->cpu_data(), variance_buffer_); + } + + dnnError_t e; + void* BatchNorm_res[dnnResourceNumber]; + BatchNorm_res[dnnResourceMean] = mean_buffer_; + BatchNorm_res[dnnResourceVariance] = variance_buffer_; + BatchNorm_res[dnnResourceSrc] = bottom_data; + BatchNorm_res[dnnResourceScaleShift] = scaleShift_buffer_; + if (fwd_top_data->conversion_needed()) { + top[0]->set_prv_data_descriptor(fwd_top_data); + BatchNorm_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_prv_data()); + } else { + BatchNorm_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_cpu_data()); + DLOG(INFO) << "Using cpu_data for top in DnnBatchNorm."; + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(use_global_stats_? batchNormFwdInference : batchNormFwd, + BatchNorm_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + CHECK_EQ(e, E_SUCCESS); + + if (!use_global_stats_) { + // compute and save moving average + this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_; + this->blobs_[2]->mutable_cpu_data()[0] += 1; + caffe_cpu_axpby(this->blobs_[0]->count(), Dtype(1), mean_buffer_, + moving_average_fraction_, this->blobs_[0]->mutable_cpu_data()); + int m = bottom[0]->count()/channels_; + Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1; + caffe_cpu_axpby(this->blobs_[1]->count(), bias_correction_factor, + variance_buffer_, moving_average_fraction_, + this->blobs_[1]->mutable_cpu_data()); + } +} + +template +void MKLBatchNormLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + void *bottom_data = NULL; + if (bottom[0] == top[0]) { + bottom_data = reinterpret_cast( + const_cast(temp_.cpu_data())); + } else { + bottom_data = + reinterpret_cast( + const_cast(bottom[0]->prv_data())); + if (NULL == bottom_data) + bottom_data = + reinterpret_cast( + const_cast(bottom[0]->cpu_data())); + } + + dnnError_t e; + void* BatchNorm_res[dnnResourceNumber]; + BatchNorm_res[dnnResourceMean] = mean_buffer_; + BatchNorm_res[dnnResourceVariance] = variance_buffer_; + BatchNorm_res[dnnResourceSrc] = bottom_data; + BatchNorm_res[dnnResourceScaleShift] = scaleShift_buffer_; + BatchNorm_res[dnnResourceDiffScaleShift] = diffScaleShift_buffer_; + + BatchNorm_res[dnnResourceDiffDst] = bwd_top_diff->get_converted_prv(top[0], + true); + if (bwd_bottom_diff->conversion_needed()) { + bottom[0]->set_prv_diff_descriptor(bwd_bottom_diff); + BatchNorm_res[dnnResourceDiffSrc] = bottom[0]->mutable_prv_diff(); + } else { + BatchNorm_res[dnnResourceDiffSrc] = bottom[0]->mutable_cpu_diff(); + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKL_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(batchNormBwd, BatchNorm_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + CHECK_EQ(e, E_SUCCESS); + + if (use_weight_bias_) { + caffe_cpu_axpby(this->blobs_[3]->count(), (Dtype)1., + diffScaleShift_buffer_, (Dtype)1., this->blobs_[3]->mutable_cpu_diff()); + if (bias_term_) + caffe_cpu_axpby(this->blobs_[4]->count(), (Dtype)1., + diffScaleShift_buffer_ + channels_, + (Dtype)1., this->blobs_[4]->mutable_cpu_diff()); + else + caffe_set(this->blobs_[4]->count(), + static_cast(0), this->blobs_[4]->mutable_cpu_diff()); + } +} + + +#ifdef CPU_ONLY +STUB_GPU(MKLBatchNormLayer); +#else +template +void MKLBatchNormLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) {NOT_IMPLEMENTED;} +template +void MKLBatchNormLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLBatchNormLayer); +// REGISTER_LAYER_CLASS(MKLBatchNorm); +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/layers/mkl_concat_layer.cpp b/src/caffe/layers/mkl_concat_layer.cpp new file mode 100644 index 00000000000..7471da02b3a --- /dev/null +++ b/src/caffe/layers/mkl_concat_layer.cpp @@ -0,0 +1,264 @@ + /* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKL2017_SUPPORTED) +#include + +#include "caffe/layer.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + +template MKLConcatLayer::~MKLConcatLayer() { + dnnDelete(concatFwd_); + dnnDelete(concatBwd_); + delete[] split_channels_; +} + +template +void MKLConcatLayer::Init(const vector*>& bottom, + const vector*>& top) { + size_t dim_src = bottom[0]->shape().size(); + size_t dim_dst = dim_src; + + num_concats_ = bottom.size(); + channels_ = 0; + + for (size_t i = 1; i < num_concats_; ++i) { + CHECK_EQ(bottom[0]->num(), bottom[i]->num()); + CHECK_EQ(bottom[0]->height(), bottom[i]->height()); + CHECK_EQ(bottom[0]->width(), bottom[i]->width()); + } + + + delete[] split_channels_; + split_channels_ = new size_t[num_concats_]; + for (size_t i = 0; i < num_concats_; ++i) { + CHECK_EQ(dim_src, bottom[i]->shape().size()); + + fwd_bottom_data_.push_back(shared_ptr >(new MKLData)); + bwd_bottom_diff_.push_back(shared_ptr >(new MKLDiff)); + fwd_bottom_data_[i]->name = "fwd_bottom_data_[i]"; + bwd_bottom_diff_[i]->name = "bwd_bottom_data[i]"; + + // TODO: should be a helper function + size_t sizes_src[dim_src], strides_src[dim_src]; + for (size_t d = 0; d < dim_src; ++d) { + sizes_src[d] = bottom[i]->shape()[dim_src - d - 1]; + strides_src[d] = (d == 0) ? 1 : strides_src[d - 1] * sizes_src[d - 1]; + } + + split_channels_[i] = bottom[i]->channels(); + channels_ += split_channels_[i]; + fwd_bottom_data_[i]->create_user_layout(dim_src, + sizes_src, + strides_src, + false); + bwd_bottom_diff_[i]->create_user_layout(dim_src, + sizes_src, + strides_src, + false); + } + + // XXX: almost the same computations as above for src + size_t sizes_dst[dim_dst], strides_dst[dim_dst]; + for (size_t d = 0; d < dim_dst; ++d) { + if (d == 2) + sizes_dst[d] = channels_; + else + sizes_dst[d] = bottom[0]->shape()[dim_dst - 1 - d]; + strides_dst[d] = (d == 0) ? 1 : strides_dst[d - 1] * sizes_dst[d - 1]; + } + bwd_top_diff_->create_user_layout(dim_dst, sizes_dst, strides_dst, false); + fwd_top_data_->create_user_layout(dim_dst, sizes_dst, strides_dst, false); + + dnnDelete(concatFwd_); + dnnDelete(concatBwd_); +} + +template +void MKLConcatLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + num_ = 0; + height_ = 0; + width_ = 0; + Init(bottom, top); +} + +template +void MKLConcatLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + if ((num_ == bottom[0]->num()) && + height_ == bottom[0]->height() && + width_ == bottom[0]->width()) { + top[0]->Reshape(num_, channels_, height_, width_); + return; + } + + num_ = bottom[0]->num(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + top[0]->Reshape(num_, channels_, height_, width_); + Init(bottom, top); +} + +template +void MKLConcatLayer::Forward_cpu(const vector *>& bottom, + const vector *>& top) { + dnnError_t e; + vector bottom_data; + bool isFirstPass = (concatFwd_ == NULL); + dnnLayout_t *layouts = NULL; + if (isFirstPass) { + layouts = new dnnLayout_t[num_concats_]; + } + + for (size_t n = 0; n < num_concats_; n++) { + bottom_data.push_back(reinterpret_cast( + const_cast(bottom[n]->prv_data()))); + + if (bottom_data[n] == NULL) { + bottom_data[n] = + reinterpret_cast(const_cast(bottom[n]->cpu_data())); + if (isFirstPass) { + layouts[n] = fwd_bottom_data_[n]->layout_usr; + } + } else if (isFirstPass) { + CHECK((bottom[n]->get_prv_data_descriptor())->get_descr_type() == + PrvMemDescr::PRV_DESCR_MKL2017); + shared_ptr > mem_descr = + boost::static_pointer_cast >( + bottom[n]->get_prv_data_descriptor()); + CHECK(mem_descr != NULL); + + fwd_bottom_data_[n] = mem_descr; + layouts[n] = mem_descr->layout_int; + } + } + + if (isFirstPass) { + e = dnnConcatCreate(&concatFwd_, NULL, num_concats_, layouts); + CHECK_EQ(e, E_SUCCESS); + + fwd_top_data_->create_internal_layout(concatFwd_, dnnResourceDst); + bwd_top_diff_->create_internal_layout(concatFwd_, dnnResourceDst); + + e = dnnSplitCreate(&concatBwd_, NULL, num_concats_, + bwd_top_diff_->layout_int, split_channels_); + CHECK_EQ(e, E_SUCCESS); + + for (size_t n = 0; n < num_concats_; ++n) { + bwd_bottom_diff_[n]->create_internal_layout(concatBwd_, + (dnnResourceType_t)(dnnResourceMultipleDst + n)); + } + } + + delete[] layouts; + + void *concat_res[dnnResourceNumber]; + for (int n = 0; n < num_concats_; ++n) { + concat_res[dnnResourceMultipleSrc + n] + = reinterpret_cast(bottom_data[n]); + } + + if (fwd_top_data_->conversion_needed()) { + top[0]->set_prv_data_descriptor(fwd_top_data_); + concat_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_prv_data()); + } else { + concat_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_cpu_data()); + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(concatFwd_, concat_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + + CHECK_EQ(e, E_SUCCESS); +} + +template +void MKLConcatLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector *>& bottom) { + int need_bwd = 0; + for (size_t n = 0; n < num_concats_; n++) { + need_bwd += propagate_down[n]; + } + if (!need_bwd) { + return; + } + + dnnError_t e; + void *concat_res[dnnResourceNumber]; + + concat_res[dnnResourceSrc] = bwd_top_diff_->get_converted_prv(top[0], true); + + for (size_t i = 0; i < num_concats_; ++i) { + if (bwd_bottom_diff_[i]->conversion_needed()) { + bottom[i]->set_prv_diff_descriptor(bwd_bottom_diff_[i]); + concat_res[dnnResourceMultipleDst + i] = bottom[i]->mutable_prv_diff(); + } else { + concat_res[dnnResourceMultipleDst + i] = bottom[i]->mutable_cpu_diff(); + } + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKL_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(concatBwd_, concat_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + + CHECK_EQ(e, E_SUCCESS); +} + +#ifdef CPU_ONLY +STUB_GPU(MKLConcatLayer); +#else +template +void MKLConcatLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) {NOT_IMPLEMENTED;} +template +void MKLConcatLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLConcatLayer); +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/layers/mkl_convolution_layer.cpp b/src/caffe/layers/mkl_convolution_layer.cpp new file mode 100644 index 00000000000..59c74102cd5 --- /dev/null +++ b/src/caffe/layers/mkl_convolution_layer.cpp @@ -0,0 +1,616 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include +#include +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/util/performance.hpp" +#include "mkl_service.h" + +static int getMKLBuildDate() { + static int build = 0; + if (build == 0) { + MKLVersion v; + mkl_get_version(&v); + build = atoi(v.Build); + } + return build; +} + +namespace caffe { +template +MKLConvolutionLayer::MKLConvolutionLayer( + const LayerParameter& param) + : ConvolutionLayer(param), + fwd_bottom_data(new MKLData()), + fwd_top_data(new MKLData()), + fwd_filter_data(new MKLData()), + fwd_bias_data(new MKLData()), + convolutionFwd(NULL), + bwdd_top_diff(new MKLDiff()), + bwdd_bottom_diff(new MKLDiff()), + bwdd_filter_data(new MKLData()), + convolutionBwdData(static_cast(NULL)), + bwdf_top_diff(new MKLDiff()), + bwdf_filter_diff(new MKLDiff()), + bwdf2fwd_filter_diff(new MKLDiff()), + bwdf_bottom_data(new MKLData()), + convolutionBwdFilter(static_cast(NULL)), + bwdb_top_diff(new MKLDiff()), + bwdb_bias_diff(new MKLDiff()), + convolutionBwdBias(static_cast(NULL)), + bwdf_filter_diff_iter(new MKLDiff()), + bwdb_bias_diff_iter(new MKLDiff()) { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_prop_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_diff_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_bias_); + } + +template +void MKLConvolutionLayer::compute_output_shape() { + ConvolutionLayer::compute_output_shape(); + this->height_out_ = (this->height_ + 2 * this->pad_h_ - this->kernel_h_) + / this->stride_h_ + 1; + this->width_out_ = (this->width_ + 2 * this->pad_w_ - this->kernel_w_) + / this->stride_w_ + 1; +} + +template +MKLConvolutionLayer::~MKLConvolutionLayer() { + dnnDelete(convolutionFwd); + dnnDelete(convolutionBwdData); + dnnDelete(convolutionBwdFilter); + if (this->bias_term_) + dnnDelete(convolutionBwdBias); +} + +template +void MKLConvolutionLayer::Init( + const vector*>& bottom, + const vector*>& top) { + this->width_ = bottom[0]->width(); + this->height_ = bottom[0]->height(); + this->num_ = bottom[0]->num(); + + // TODO: clean up this + kernel_w_ = this->kernel_shape_.cpu_data()[1]; + kernel_h_ = this->kernel_shape_.cpu_data()[0]; + stride_w_ = this->stride_.cpu_data()[1]; + stride_h_ = this->stride_.cpu_data()[0]; + pad_w_ = this->pad_.cpu_data()[1]; + pad_h_ = this->pad_.cpu_data()[0]; + + this->bottom_shape_ = &bottom[0]->shape(); + compute_output_shape(); + int status; + size_t n, g; + size_t iw, ih, ic; + size_t ow, oh, oc; + size_t kw, kh; /* filter */ + size_t dimension = 4; + + g = std::max(this->group_, 1); + n = this->num_; + iw = this->width_; + ih = this->height_; + ic = this->channels_; + + ow = this->width_out_; + oh = this->height_out_; + oc = this->num_output_; + + kw = this->kernel_w_; + kh = this->kernel_h_; + + size_t bdata_sizes[4] = {iw, ih, ic, n}; + size_t bdata_strides[4] = {1, iw, iw*ih, iw*ih*ic}; + + /* starting with MKL 2017 Gold in case of groups filter layout + * becomes 5D, i.e. groups become a separate dimension */ + size_t g_mkl2017 = g; + size_t f_dimension = dimension + (g != 1); + if (getMKLBuildDate() < 20160701) { + g_mkl2017 = 1; + f_dimension = dimension; + } + + size_t fdata_sizes[5] = {kw, kh, ic/g, oc/g_mkl2017, g_mkl2017}; + size_t fdata_strides[5] = {1, kw, kw*kh, kw*kh*ic/g, kw*kh*ic/g*oc/g}; + + size_t bias_sizes[1] = {oc}; + size_t bias_strides[1] = {1}; + + size_t tdata_sizes[4] = {ow, oh, oc, n}; + size_t tdata_strides[4] = {1, ow, ow*oh, ow*oh*oc}; + + size_t convolutionStrides[2] = {this->stride_w_, this->stride_h_}; + int inputOffset[2] = {-this->pad_w_, -this->pad_h_}; + + // Names are for debugging purposes only. + fwd_bottom_data ->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_top_data ->name = "fwd_top_data @ " + this->layer_param_.name(); + fwd_filter_data ->name = "fwd_filter_data @ " + this->layer_param_.name(); + fwd_bias_data ->name = "fwd_bias_data @ " + this->layer_param_.name(); + bwdd_top_diff ->name = "bwdd_top_diff @ " + this->layer_param_.name(); + bwdd_bottom_diff->name = "bwdd_bottom_diff @ " + this->layer_param_.name(); + bwdd_filter_data->name = "bwdd_filter_data @ " + this->layer_param_.name(); + bwdf_top_diff ->name = "bwdf_top_diff @ " + this->layer_param_.name(); + bwdf_bottom_data->name = "bwdf_bottom_data @ " + this->layer_param_.name(); + bwdf_filter_diff->name = "bwdf_filter_diff @ " + this->layer_param_.name(); + bwdf2fwd_filter_diff->name = + "bwdf2fwd_filter_diff @ " + this->layer_param_.name(); + bwdb_top_diff ->name = "bwdb_top_diff @ " + this->layer_param_.name(); + bwdb_bias_diff ->name = "bwdb_bias_diff @ " + this->layer_param_.name(); + + // Free MKL primitives + dnnDelete(convolutionFwd); + if (this->bias_term_) { + status = dnnGroupsConvolutionCreateForwardBias( + &convolutionFwd, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + bdata_sizes, + tdata_sizes, + fdata_sizes, + convolutionStrides, + inputOffset, + dnnBorderZeros); + } else { + status = dnnGroupsConvolutionCreateForward( + &convolutionFwd, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + bdata_sizes, + tdata_sizes, + fdata_sizes, + convolutionStrides, + inputOffset, + dnnBorderZeros); + } + + CHECK_EQ(status, 0) + << "Failed dnnCreateConvolution(dnnForward) with status " + << status << "\n"; + + fwd_bottom_data->create_layouts(convolutionFwd, dnnResourceSrc, dimension, + bdata_sizes, bdata_strides); + fwd_top_data ->create_layouts(convolutionFwd, dnnResourceDst, dimension, + tdata_sizes, tdata_strides); + fwd_filter_data->create_layouts(convolutionFwd, dnnResourceFilter, + f_dimension, fdata_sizes, fdata_strides); + + if (this->bias_term_) + fwd_bias_data->create_layouts(convolutionFwd, dnnResourceBias, 1, + bias_sizes, bias_strides); +/* + * Backward by data layer setup + */ + dnnDelete(convolutionBwdData); + status = dnnGroupsConvolutionCreateBackwardData( + &convolutionBwdData, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + bdata_sizes, + tdata_sizes, + fdata_sizes, + convolutionStrides, + inputOffset, + dnnBorderZeros); + CHECK_EQ(status, 0) + << "Failed dnnConvolutionCreateBackwardData with status " + << status << "\n"; + + bwdd_bottom_diff->create_layouts(convolutionBwdData, dnnResourceDiffSrc, + dimension, bdata_sizes, bdata_strides); + bwdd_top_diff ->create_layouts(convolutionBwdData, dnnResourceDiffDst, + dimension, tdata_sizes, tdata_strides); + bwdd_filter_data->create_layouts(convolutionBwdData, dnnResourceFilter, + f_dimension, fdata_sizes, fdata_strides); + +/* + * Backward by filter layer setup + */ + dnnDelete(convolutionBwdFilter); + status = dnnGroupsConvolutionCreateBackwardFilter( + &convolutionBwdFilter, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + bdata_sizes, + tdata_sizes, + fdata_sizes, + convolutionStrides, + inputOffset, + dnnBorderZeros); + CHECK_EQ(status, 0) + << "Failed dnnConvolutionCreateBackwardFilter with status " + << status << "\n"; + + bwdf_bottom_data->create_layouts(convolutionBwdFilter, dnnResourceSrc, + dimension, bdata_sizes, bdata_strides); + bwdf_top_diff ->create_layouts(convolutionBwdFilter, dnnResourceDiffDst, + dimension, tdata_sizes, tdata_strides); + bwdf_filter_diff->create_layouts(convolutionFwd, dnnResourceFilter, + f_dimension, fdata_sizes, fdata_strides); + // support for (iter_size > 1) requires additional buffer + bwdf_filter_diff_iter->create_layouts(convolutionFwd, dnnResourceFilter, + f_dimension, fdata_sizes, fdata_strides); + + // Note: this caused some trouble for older MKL + if (getMKLBuildDate() > 20160701) { + // bwdf2fwd_filter_diff: + // layout_int = internal layout of weight diff + // layout_usr = internal layout of weight data on forward convolution + bwdf2fwd_filter_diff->create_internal_layout(convolutionBwdFilter, + dnnResourceDiffFilter); + bwdf2fwd_filter_diff->remove_user_layout(); + status = dnnLayoutCreateFromPrimitive( + &bwdf2fwd_filter_diff->layout_usr, convolutionFwd, dnnResourceFilter); + CHECK_EQ(status, 0) << "Failed dnnLayoutCreateFromPrimitive with status " + << status << "\n"; + + bwdf2fwd_filter_diff->create_conversions(); + } + +/* + * Backward by bias layer setup + */ + if (this->bias_term_) { + dnnDelete(convolutionBwdBias); + status = dnnGroupsConvolutionCreateBackwardBias( + &convolutionBwdBias, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + tdata_sizes); + CHECK_EQ(status, 0) + << "Failed dnnConvolutionCreateBackwardBias with status " + << status << "\n"; + + bwdb_top_diff->create_layouts(convolutionBwdBias, dnnResourceDiffDst, + dimension, tdata_sizes, tdata_strides); + bwdb_bias_diff->create_layouts(convolutionBwdBias, dnnResourceDiffBias, + 1, bias_sizes, bias_strides); + // support for (iter_size > 1) requires additional buffer + bwdb_bias_diff_iter->create_layouts(convolutionBwdBias, dnnResourceDiffBias, + 1, bias_sizes, bias_strides); + } +} + +template +void MKLConvolutionLayer::LayerSetUp( + const vector*>& bottom, + const vector*>& top) { + ConvolutionLayer::LayerSetUp(bottom, top); + + Init(bottom, top); +} + +template +void MKLConvolutionLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + bool reinitialize = (this->width_ == bottom[0]->width() && + this->height_ == bottom[0]->height() && + this->channels_ == bottom[0]->channels() && + this->num_ == bottom[0]->num()) ? false : true; + + BaseConvolutionLayer::ReshapeForMKL(bottom, top); + + if (reinitialize == true) { + Init(bottom, top); + } +} + +template +void MKLConvolutionLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + int status; + size_t n, g; + size_t iw, ih, ic; + size_t ow, oh, oc; + + g = this->group_; + n = this->num_; + iw = this->width_; + ih = this->height_; + ic = this->channels_/g; + + CHECK(bottom[0]->width() == iw && + bottom[0]->height() == ih && + bottom[0]->channels() == ic*g && + bottom[0]->num() == n) + << "Inclompatible shape of bottom with layer"; + + ow = this->width_out_; + oh = this->height_out_; + oc = this->num_output_/g; + CHECK(top[0]->width() == ow && + top[0]->height() == oh && + top[0]->channels() == oc*g && + top[0]->num() == n) << "Inclompatible shape of bottom with layer"; + + + void *res_convolutionFwd[dnnResourceNumber]; + res_convolutionFwd[dnnResourceSrc] = + fwd_bottom_data->get_converted_prv(bottom[0], false); + res_convolutionFwd[dnnResourceFilter] = + fwd_filter_data->get_converted_prv(this->blobs_[0].get(), true); + if (this->bias_term_) { + res_convolutionFwd[dnnResourceBias] = + fwd_bias_data ->get_converted_prv(this->blobs_[1].get(), true); + } + + if (fwd_top_data->conversion_needed()) { + top[0]->set_prv_data_descriptor(fwd_top_data); + res_convolutionFwd[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_prv_data()); + } else { + res_convolutionFwd[dnnResourceDst] = top[0]->mutable_cpu_data(); + } + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convolutionFwd, res_convolutionFwd); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + + CHECK_EQ(status, 0) << "Forward convolution failed with status " << status; +} + +template +void MKLConvolutionLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + int status; + size_t n, g; + size_t iw, ih, ic; + size_t ow, oh, oc; + + g = this->group_; + n = this->num_; + iw = this->width_; + ih = this->height_; + ic = this->channels_/g; + + CHECK(bottom[0]->width() == iw && + bottom[0]->height() == ih && + bottom[0]->channels() == ic*g && + bottom[0]->num() == n) + << "Incompatible shape of bottom with layer"; + + ow = this->width_out_; + oh = this->height_out_; + oc = this->num_output_/g; + CHECK(top[0]->width() == ow && + top[0]->height() == oh && + top[0]->channels() == oc*g && + top[0]->num() == n) << "Incompatible shape of bottom with layer"; + + if (propagate_down[0]) { + void *res_convolutionBwdData[dnnResourceNumber]; + + res_convolutionBwdData[dnnResourceDiffDst] = + bwdd_top_diff->get_converted_prv(top[0], true); + // Currently this conversion adds padding to weights. + // We don't want that to be stored in the weights prv_ptr_ + res_convolutionBwdData[dnnResourceFilter] = + bwdd_filter_data->get_converted_prv(this->blobs_[0].get(), false); + + if (bwdd_bottom_diff->conversion_needed()) { + bottom[0]->set_prv_diff_descriptor(bwdd_bottom_diff); + res_convolutionBwdData[dnnResourceDiffSrc] = + bottom[0]->mutable_prv_diff(); + } else { + res_convolutionBwdData[dnnResourceDiffSrc] = + bottom[0]->mutable_cpu_diff(); + } + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_prop_, + PERFORMANCE_MKL_NAME_DETAILED("BW", "_prop")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convolutionBwdData, res_convolutionBwdData); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_prop_); + + CHECK_EQ(status, 0) << "Backward Data conv failed with status " << status; + } + + if (this->param_propagate_down(0)) { + void *res_convolutionBwdFilter[dnnResourceNumber]; + + res_convolutionBwdFilter[dnnResourceDiffDst] = + bwdf_top_diff->get_converted_prv(top[0], true); + // The last get_converted_prv() argument is a hack for reusing conversion + // done already in the forward direction. + res_convolutionBwdFilter[dnnResourceSrc] = + bwdf_bottom_data->get_converted_prv(bottom[0], false, + fwd_bottom_data.get()); + + if (bwdf_filter_diff->conversion_needed()) { + this->blobs_[0]->set_prv_diff_descriptor(bwdf_filter_diff); + } + if (bwdf2fwd_filter_diff->conversion_needed()) { + // Different layouts in fwd filters vs bwd diffs + res_convolutionBwdFilter[dnnResourceDiffFilter] = + reinterpret_cast(bwdf2fwd_filter_diff->prv_ptr()); + } else { + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + res_convolutionBwdFilter[dnnResourceDiffFilter] = + bwdf_filter_diff_iter->prv_ptr(); + } else { + if (bwdf_filter_diff->conversion_needed()) { + res_convolutionBwdFilter[dnnResourceDiffFilter] = + this->blobs_[0]->mutable_prv_diff(); + } else { + res_convolutionBwdFilter[dnnResourceDiffFilter] = + this->blobs_[0]->mutable_cpu_diff(); + } + } + } + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKL_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convolutionBwdFilter, res_convolutionBwdFilter); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + + CHECK_EQ(status, 0) << "Backward Filter conv failed with status " << status; + + if (bwdf2fwd_filter_diff->conversion_needed()) { + // Different layouts in fwd filters vs bwd diffs + void *convert_resources[dnnResourceNumber]; + convert_resources[dnnResourceFrom] = bwdf2fwd_filter_diff->prv_ptr(); + + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + convert_resources[dnnResourceTo] = + bwdf_filter_diff_iter->prv_ptr(); + if (bwdf_filter_diff->conversion_needed()) + DLOG(INFO) << "convert priv => priv " << bwdf2fwd_filter_diff->name + << " => " << bwdf_filter_diff->name; + else + DLOG(INFO) << "convert priv => " << bwdf2fwd_filter_diff->name + << " =>"; + } else { + if (bwdf_filter_diff->conversion_needed()) { + convert_resources[dnnResourceTo] = + this->blobs_[0]->mutable_prv_diff(); + DLOG(INFO) << "convert priv => priv " << bwdf2fwd_filter_diff->name + << " => " << bwdf_filter_diff->name; + } else { + convert_resources[dnnResourceTo] = + this->blobs_[0]->mutable_cpu_diff(); + DLOG(INFO) << "convert priv => " << bwdf2fwd_filter_diff->name + << " =>"; + } + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_diff_, + PERFORMANCE_MKL_NAME_DETAILED("BW", "_diff")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(bwdf2fwd_filter_diff->convert_from_int, + convert_resources); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_diff_); + + CHECK_EQ(status, 0) << "Conversion failed with status " << status; + } + + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + if (bwdf_filter_diff->conversion_needed()) { + caffe_axpy((const int)this->blobs_[0]->prv_diff_count(), 1, + reinterpret_cast(bwdf_filter_diff_iter->prv_ptr()), + this->blobs_[0]->mutable_prv_diff()); + } else { + caffe_axpy((const int)this->blobs_[0]->count(), 1, + reinterpret_cast(bwdf_filter_diff_iter->prv_ptr()), + this->blobs_[0]->mutable_cpu_diff()); + } + } + } + + if (this->param_propagate_down(1)) { + void *res_convolutionBwdBias[dnnResourceNumber]; + + res_convolutionBwdBias[dnnResourceDiffDst] = + bwdb_top_diff->get_converted_prv(top[0], true); + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + res_convolutionBwdBias[dnnResourceDiffBias] = + bwdb_bias_diff_iter->prv_ptr(); + } else { + if (bwdb_bias_diff->conversion_needed()) { + this->blobs_[1]->set_prv_diff_descriptor(bwdb_bias_diff); + res_convolutionBwdBias[dnnResourceDiffBias] = + reinterpret_cast(this->blobs_[1]->mutable_prv_diff()); + + } else { + res_convolutionBwdBias[dnnResourceDiffBias] = + reinterpret_cast(this->blobs_[1]->mutable_cpu_diff()); + } + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_bias_, + PERFORMANCE_MKL_NAME_DETAILED("BW", "_bias")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convolutionBwdBias, res_convolutionBwdBias); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_bias_); + + CHECK_EQ(status, 0) << "Backward Bias failed with status " << status; + + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + if (bwdb_bias_diff->conversion_needed()) { + caffe_axpy((const int)this->blobs_[1]->prv_diff_count(), 1, + reinterpret_cast(bwdb_bias_diff_iter->prv_ptr()), + this->blobs_[1]->mutable_prv_diff()); + } else { + caffe_axpy((const int)this->blobs_[1]->count(), 1, + reinterpret_cast(bwdb_bias_diff_iter->prv_ptr()), + this->blobs_[1]->mutable_cpu_diff()); + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLConvolutionLayer); +#else +template +void MKLConvolutionLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) + {NOT_IMPLEMENTED;} +template +void MKLConvolutionLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLConvolutionLayer); +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/layers/mkl_deconvolution_layer.cpp b/src/caffe/layers/mkl_deconvolution_layer.cpp new file mode 100644 index 00000000000..779eee71c90 --- /dev/null +++ b/src/caffe/layers/mkl_deconvolution_layer.cpp @@ -0,0 +1,630 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include +#include +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/util/performance.hpp" +#include "mkl_service.h" +#ifdef _OPENMP +#include +#endif + + +static int getMKLBuildDate() { + static int build = 0; + if (build == 0) { + MKLVersion v; + mkl_get_version(&v); + build = atoi(v.Build); + } + return build; +} + +namespace caffe { +template +MKLDeconvolutionLayer::MKLDeconvolutionLayer( + const LayerParameter& param) + : DeconvolutionLayer(param), + fwd_bottom_data(new MKLData()), + fwd_top_data(new MKLData()), + fwd_filter_data(new MKLData()), + fwd_bias_data(new MKLData()), + convolutionFwd(NULL), + bwdd_top_diff(new MKLDiff()), + bwdd_bottom_diff(new MKLDiff()), + bwdd_filter_data(new MKLData()), + convolutionBwdData(static_cast(NULL)), + bwdf_top_diff(new MKLDiff()), + bwdf_filter_diff(new MKLDiff()), + bwdf2fwd_filter_diff(new MKLDiff()), + bwdf_bottom_data(new MKLData()), + convolutionBwdFilter(static_cast(NULL)), + bwdb_top_diff(new MKLDiff()), + bwdb_bias_diff(new MKLDiff()), + convolutionBwdBias(static_cast(NULL)), + bwdf_filter_diff_iter(new MKLDiff()), + bwdb_bias_diff_iter(new MKLDiff()) { + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_prop_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_diff_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_bias_); + } + +template +void MKLDeconvolutionLayer::compute_output_shape() { + DeconvolutionLayer::compute_output_shape(); + this->height_out_ = this->stride_h_ * (this->height_ - 1) + + this->kernel_h_ - 2 * this->pad_h_ ; + this->width_out_ = this->stride_w_ * (this->width_ - 1) + + this->kernel_w_ - 2 * this->pad_w_ ; +} + +template +MKLDeconvolutionLayer::~MKLDeconvolutionLayer() { + dnnDelete(convolutionFwd); + dnnDelete(convolutionBwdData); + dnnDelete(convolutionBwdFilter); + if (this->bias_term_) + dnnDelete(convolutionBwdBias); +} + +template +void MKLDeconvolutionLayer::Init( + const vector*>& bottom, + const vector*>& top) { + +#ifdef _OPENMP + this->num_of_threads_ = omp_get_max_threads() < bottom[0]->shape(0) ? + omp_get_max_threads() : bottom[0]->shape(0); + if (this->num_of_threads_ < 1) { + LOG(WARNING) << "DeConv layer: omp_get_max_threads() =" + << this->num_of_threads_; + this->num_of_threads_ = 1; + } +#endif + + + this->width_ = bottom[0]->width(); + this->height_ = bottom[0]->height(); + this->num_ = bottom[0]->num(); + + // TODO: clean up this + kernel_w_ = this->kernel_shape_.cpu_data()[1]; + kernel_h_ = this->kernel_shape_.cpu_data()[0]; + stride_w_ = this->stride_.cpu_data()[1]; + stride_h_ = this->stride_.cpu_data()[0]; + pad_w_ = this->pad_.cpu_data()[1]; + pad_h_ = this->pad_.cpu_data()[0]; + + this->bottom_shape_ = &bottom[0]->shape(); + compute_output_shape(); + int status; + size_t n, g; + size_t iw, ih, ic; + size_t ow, oh, oc; + size_t kw, kh; /* filter */ + size_t dimension = 4; + + g = std::max(this->group_, 1); + n = this->num_; + iw = this->width_; + ih = this->height_; + ic = this->channels_; + + ow = this->width_out_; + oh = this->height_out_; + oc = this->num_output_; + + kw = this->kernel_w_; + kh = this->kernel_h_; + + size_t bdata_sizes[4] = {iw, ih, ic, n}; + size_t bdata_strides[4] = {1, iw, iw*ih, iw*ih*ic}; + + /* starting with MKL 2017 Gold in case of groups filter layout + * becomes 5D, i.e. groups become a separate dimension */ + size_t g_mkl2017 = g; + size_t f_dimension = dimension + (g != 1); + if (getMKLBuildDate() < 20160701) { + g_mkl2017 = 1; + f_dimension = dimension; + } + + size_t fdata_sizes[5] = {kw, kh, oc/g, ic/g_mkl2017, g_mkl2017}; + size_t fdata_strides[5] = {1, kw, kw*kh, kw*kh*oc/g, kw*kh*ic/g*oc/g}; + + size_t bias_sizes[1] = {oc}; + size_t bias_strides[1] = {1}; + + size_t tdata_sizes[4] = {ow, oh, oc, n}; + size_t tdata_strides[4] = {1, ow, ow*oh, ow*oh*oc}; + + size_t convolutionStrides[2] = {this->stride_w_, this->stride_h_}; + int inputOffset[2] = {-this->pad_w_, -this->pad_h_}; + + // Names are for debugging purposes only. + fwd_bottom_data ->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_top_data ->name = "fwd_top_data @ " + this->layer_param_.name(); + fwd_filter_data ->name = "fwd_filter_data @ " + this->layer_param_.name(); + fwd_bias_data ->name = "fwd_bias_data @ " + this->layer_param_.name(); + bwdd_top_diff ->name = "bwdd_top_diff @ " + this->layer_param_.name(); + bwdd_bottom_diff->name = "bwdd_bottom_diff @ " + this->layer_param_.name(); + bwdd_filter_data->name = "bwdd_filter_data @ " + this->layer_param_.name(); + bwdf_top_diff ->name = "bwdf_top_diff @ " + this->layer_param_.name(); + bwdf_bottom_data->name = "bwdf_bottom_data @ " + this->layer_param_.name(); + bwdf_filter_diff->name = "bwdf_filter_diff @ " + this->layer_param_.name(); + bwdf2fwd_filter_diff->name = + "bwdf2fwd_filter_diff @ " + this->layer_param_.name(); + bwdb_top_diff ->name = "bwdb_top_diff @ " + this->layer_param_.name(); + bwdb_bias_diff ->name = "bwdb_bias_diff @ " + this->layer_param_.name(); + + +/* + * Forward setup, implemented by convolutionBwdData + */ + dnnDelete(convolutionBwdData); + status = dnnGroupsConvolutionCreateBackwardData( + &convolutionBwdData, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + tdata_sizes, + bdata_sizes, + fdata_sizes, + convolutionStrides, + inputOffset, + dnnBorderZeros); + CHECK_EQ(status, 0) + << "Failed dnnConvolutionCreateBackwardData with status " + << status << "\n"; + fwd_bottom_data->create_layouts(convolutionBwdData, dnnResourceDiffDst, dimension, + bdata_sizes, bdata_strides); + fwd_top_data ->create_layouts(convolutionBwdData, dnnResourceDiffSrc, dimension, + tdata_sizes, tdata_strides); + fwd_filter_data->create_layouts(convolutionBwdData, dnnResourceFilter, + f_dimension, fdata_sizes, fdata_strides); + +/* + * Backward by Data setup, implemented by convolutionFwd + */ + + dnnDelete(convolutionFwd); + + status = dnnGroupsConvolutionCreateForward( + &convolutionFwd, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + tdata_sizes, + bdata_sizes, + fdata_sizes, + convolutionStrides, + inputOffset, + dnnBorderZeros); + + CHECK_EQ(status, 0) + << "Failed dnnCreateConvolution(dnnForward) with status " + << status << "\n"; + + bwdd_bottom_diff->create_layouts(convolutionFwd, dnnResourceDst, + dimension, bdata_sizes, bdata_strides); + bwdd_top_diff ->create_layouts(convolutionFwd, dnnResourceSrc, + dimension, tdata_sizes, tdata_strides); + bwdd_filter_data->create_layouts(convolutionFwd, dnnResourceFilter, + f_dimension, fdata_sizes, fdata_strides); + +/* + * Backward by filter layer setup + */ + dnnDelete(convolutionBwdFilter); + status = dnnGroupsConvolutionCreateBackwardFilter( + &convolutionBwdFilter, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + tdata_sizes, + bdata_sizes, + fdata_sizes, + convolutionStrides, + inputOffset, + dnnBorderZeros); + CHECK_EQ(status, 0) + << "Failed dnnConvolutionCreateBackwardFilter with status " + << status << "\n"; + + bwdf_bottom_data->create_layouts(convolutionBwdFilter, dnnResourceDiffDst, + dimension, bdata_sizes, bdata_strides); + bwdf_top_diff ->create_layouts(convolutionBwdFilter, dnnResourceSrc, + dimension, tdata_sizes, tdata_strides); + bwdf_filter_diff->create_layouts(convolutionBwdData, dnnResourceFilter, + f_dimension, fdata_sizes, fdata_strides); + // support for (iter_size > 1) requires additional buffer + bwdf_filter_diff_iter->create_layouts(convolutionFwd, dnnResourceFilter, + f_dimension, fdata_sizes, fdata_strides); + + // Note: this caused some trouble for older MKL + if (getMKLBuildDate() > 20160701) { + // bwdf2fwd_filter_diff: + // layout_int = internal layout of weight diff + // layout_usr = internal layout of weight data on forward convolution + bwdf2fwd_filter_diff->create_internal_layout(convolutionBwdFilter, + dnnResourceDiffFilter); + bwdf2fwd_filter_diff->remove_user_layout(); + status = dnnLayoutCreateFromPrimitive( + &bwdf2fwd_filter_diff->layout_usr, convolutionBwdData, dnnResourceFilter); + CHECK_EQ(status, 0) << "Failed dnnLayoutCreateFromPrimitive with status " + << status << "\n"; + + bwdf2fwd_filter_diff->create_conversions(); + } + +/* + * Backward by bias layer setup + */ + if (this->bias_term_) { + dnnDelete(convolutionBwdBias); + status = dnnGroupsConvolutionCreateBackwardBias( + &convolutionBwdBias, + NULL, + dnnAlgorithmConvolutionDirect, + g, + dimension, + tdata_sizes); + CHECK_EQ(status, 0) + << "Failed dnnConvolutionCreateBackwardBias with status " + << status << "\n"; + + bwdb_top_diff->create_layouts(convolutionBwdBias, dnnResourceDiffDst, + dimension, tdata_sizes, tdata_strides); + bwdb_bias_diff->create_layouts(convolutionBwdBias, dnnResourceDiffBias, + 1, bias_sizes, bias_strides); + // support for (iter_size > 1) requires additional buffer + bwdb_bias_diff_iter->create_layouts(convolutionBwdBias, dnnResourceDiffBias, + 1, bias_sizes, bias_strides); + } + +} + +template +void MKLDeconvolutionLayer::LayerSetUp( + const vector*>& bottom, + const vector*>& top) { + DeconvolutionLayer::LayerSetUp(bottom, top); + + Init(bottom, top); +} + +template +void MKLDeconvolutionLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + bool reinitialize = (this->width_ == bottom[0]->width() && + this->height_ == bottom[0]->height() && + this->channels_ == bottom[0]->channels() && + this->num_ == bottom[0]->num()) ? false : true; + + BaseConvolutionLayer::ReshapeForMKL(bottom, top); + + if (reinitialize == true) { + Init(bottom, top); + } +} + +template +void MKLDeconvolutionLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + int status; + size_t n, g; + size_t iw, ih, ic; + size_t ow, oh, oc; + + g = this->group_; + n = this->num_; + iw = this->width_; + ih = this->height_; + ic = this->channels_/g; + + CHECK(bottom[0]->width() == iw && + bottom[0]->height() == ih && + bottom[0]->channels() == ic*g && + bottom[0]->num() == n) + << "Inclompatible shape of bottom with layer"; + + ow = this->width_out_; + oh = this->height_out_; + oc = this->num_output_/g; + CHECK(top[0]->width() == ow && + top[0]->height() == oh && + top[0]->channels() == oc*g && + top[0]->num() == n) << "Inclompatible shape of bottom with layer"; + + + void *res_convolutionBwdData[dnnResourceNumber]; + + res_convolutionBwdData[dnnResourceDiffDst] = + fwd_bottom_data->get_converted_prv(bottom[0], false); + // Currently this conversion adds padding to weights. + // We don't want that to be stored in the weights prv_ptr_ + res_convolutionBwdData[dnnResourceFilter] = + fwd_filter_data->get_converted_prv(this->blobs_[0].get(), true); + + if (fwd_top_data->conversion_needed()) { + top[0]->set_prv_data_descriptor(fwd_top_data); + res_convolutionBwdData[dnnResourceDiffSrc] = + reinterpret_cast(top[0]->mutable_prv_data()); + } else { + res_convolutionBwdData[dnnResourceDiffSrc] = + top[0]->mutable_cpu_data(); + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convolutionBwdData, res_convolutionBwdData); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + + CHECK_EQ(status, 0) << "Forward deconvolution failed with status " << status; + + if (this->bias_term_) { + const Dtype* bias = this->blobs_[1]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + +#ifdef _OPENMP +# pragma omp parallel for num_threads(this->num_of_threads_) +#endif + for (int n = 0; n < this->num_; ++n) { + this->forward_cpu_bias(top_data + n * this->top_dim_, bias); + } + } +} + +template +void MKLDeconvolutionLayer::Backward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + int status; + size_t n, g; + size_t iw, ih, ic; + size_t ow, oh, oc; + + g = this->group_; + n = this->num_; + iw = this->width_; + ih = this->height_; + ic = this->channels_/g; + + CHECK(bottom[0]->width() == iw && + bottom[0]->height() == ih && + bottom[0]->channels() == ic*g && + bottom[0]->num() == n) + << "Incompatible shape of bottom with layer"; + + ow = this->width_out_; + oh = this->height_out_; + oc = this->num_output_/g; + CHECK(top[0]->width() == ow && + top[0]->height() == oh && + top[0]->channels() == oc*g && + top[0]->num() == n) << "Incompatible shape of top with layer"; + + if (propagate_down[0]) { + + void *res_convolutionFwd[dnnResourceNumber]; + res_convolutionFwd[dnnResourceSrc] = + bwdd_top_diff->get_converted_prv(top[0], true); + // Currently this conversion adds padding to weights. + // We don't want that to be stored in the weights prv_ptr_ + res_convolutionFwd[dnnResourceFilter] = + bwdd_filter_data->get_converted_prv(this->blobs_[0].get(), false); + + if (bwdd_bottom_diff->conversion_needed()) { + bottom[0]->set_prv_diff_descriptor(bwdd_bottom_diff); + res_convolutionFwd[dnnResourceDst] = + bottom[0]->mutable_prv_diff(); + } else { + res_convolutionFwd[dnnResourceDst] = + bottom[0]->mutable_cpu_diff(); + } + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_prop_, + PERFORMANCE_MKL_NAME_DETAILED("BW", "_prop")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convolutionFwd, res_convolutionFwd); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_prop_); + + CHECK_EQ(status, 0) << "Backward Data deconv failed with status " << status; + } + + if (this->param_propagate_down(0)) { + void *res_convolutionBwdFilter[dnnResourceNumber]; + res_convolutionBwdFilter[dnnResourceDiffDst] = + bwdf_bottom_data->get_converted_prv(bottom[0], false); + + res_convolutionBwdFilter[dnnResourceSrc] = + bwdf_top_diff->get_converted_prv(top[0], false); + + + if (bwdf_filter_diff->conversion_needed()) { + this->blobs_[0]->set_prv_diff_descriptor(bwdf_filter_diff); + } + if (bwdf2fwd_filter_diff->conversion_needed()) { + // Different layouts in fwd filters vs bwd diffs + res_convolutionBwdFilter[dnnResourceDiffFilter] = + reinterpret_cast(bwdf2fwd_filter_diff->prv_ptr()); + } else { + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + res_convolutionBwdFilter[dnnResourceDiffFilter] = + bwdf_filter_diff_iter->prv_ptr(); + } else { + if (bwdf_filter_diff->conversion_needed()) { + res_convolutionBwdFilter[dnnResourceDiffFilter] = + this->blobs_[0]->mutable_prv_diff(); + } else { + res_convolutionBwdFilter[dnnResourceDiffFilter] = + this->blobs_[0]->mutable_cpu_diff(); + } + } + } + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKL_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convolutionBwdFilter, res_convolutionBwdFilter); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + + CHECK_EQ(status, 0) << "Backward Filter conv failed with status " << status; + + if (bwdf2fwd_filter_diff->conversion_needed()) { + // Different layouts in fwd filters vs bwd diffs + void *convert_resources[dnnResourceNumber]; + convert_resources[dnnResourceFrom] = bwdf2fwd_filter_diff->prv_ptr(); + + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + convert_resources[dnnResourceTo] = + bwdf_filter_diff_iter->prv_ptr(); + if (bwdf_filter_diff->conversion_needed()) + DLOG(INFO) << "convert priv => priv " << bwdf2fwd_filter_diff->name + << " => " << bwdf_filter_diff->name; + else + DLOG(INFO) << "convert priv => " << bwdf2fwd_filter_diff->name + << " =>"; + } else { + if (bwdf_filter_diff->conversion_needed()) { + convert_resources[dnnResourceTo] = + this->blobs_[0]->mutable_prv_diff(); + DLOG(INFO) << "convert priv => priv " << bwdf2fwd_filter_diff->name + << " => " << bwdf_filter_diff->name; + } else { + convert_resources[dnnResourceTo] = + this->blobs_[0]->mutable_cpu_diff(); + DLOG(INFO) << "convert priv => " << bwdf2fwd_filter_diff->name + << " =>"; + } + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_diff_, + PERFORMANCE_MKL_NAME_DETAILED("BW", "_diff")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(bwdf2fwd_filter_diff->convert_from_int, + convert_resources); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_diff_); + + CHECK_EQ(status, 0) << "Conversion failed with status " << status; + } + + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + if (bwdf_filter_diff->conversion_needed()) { + caffe_axpy((const int)this->blobs_[0]->prv_diff_count(), 1, + reinterpret_cast(bwdf_filter_diff_iter->prv_ptr()), + this->blobs_[0]->mutable_prv_diff()); + } else { + caffe_axpy((const int)this->blobs_[0]->count(), 1, + reinterpret_cast(bwdf_filter_diff_iter->prv_ptr()), + this->blobs_[0]->mutable_cpu_diff()); + } + } + } + + if (this->param_propagate_down(1)) { + void *res_convolutionBwdBias[dnnResourceNumber]; + + res_convolutionBwdBias[dnnResourceDiffDst] = + bwdb_top_diff->get_converted_prv(top[0], true); + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + res_convolutionBwdBias[dnnResourceDiffBias] = + bwdb_bias_diff_iter->prv_ptr(); + } else { + if (bwdb_bias_diff->conversion_needed()) { + this->blobs_[1]->set_prv_diff_descriptor(bwdb_bias_diff); + res_convolutionBwdBias[dnnResourceDiffBias] = + reinterpret_cast(this->blobs_[1]->mutable_prv_diff()); + + } else { + res_convolutionBwdBias[dnnResourceDiffBias] = + reinterpret_cast(this->blobs_[1]->mutable_cpu_diff()); + } + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_bias_, + PERFORMANCE_MKL_NAME_DETAILED("BW", "_bias")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convolutionBwdBias, res_convolutionBwdBias); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_bias_); + + CHECK_EQ(status, 0) << "Backward Bias failed with status " << status; + + if (Caffe::iter_size() > 1) { + // if (iter_size > 1) then diffs are accumulated across iterations + if (bwdb_bias_diff->conversion_needed()) { + caffe_axpy((const int)this->blobs_[1]->prv_diff_count(), 1, + reinterpret_cast(bwdb_bias_diff_iter->prv_ptr()), + this->blobs_[1]->mutable_prv_diff()); + } else { + caffe_axpy((const int)this->blobs_[1]->count(), 1, + reinterpret_cast(bwdb_bias_diff_iter->prv_ptr()), + this->blobs_[1]->mutable_cpu_diff()); + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDeconvolutionLayer); +#else +template +void MKLDeconvolutionLayer::Forward_gpu( + const vector*>& bottom, const vector*>& top) + {NOT_IMPLEMENTED;} +template +void MKLDeconvolutionLayer::Backward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLDeconvolutionLayer); +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/layers/mkl_eltwise_layer.cpp b/src/caffe/layers/mkl_eltwise_layer.cpp new file mode 100644 index 00000000000..8b57c4042e3 --- /dev/null +++ b/src/caffe/layers/mkl_eltwise_layer.cpp @@ -0,0 +1,285 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKL2017_SUPPORTED) +#include +#include + +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + +template +MKLEltwiseLayer::~MKLEltwiseLayer() { + dnnDelete(sumPrimitive); +} + +template +void MKLEltwiseLayer::Init(const vector*>& bottom, + const vector*>& top) { + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_ = bottom[0]->num(); + + op_ = this->layer_param_.eltwise_param().operation(); + // Blob-wise coefficients for the elementwise operation. + coeffs_ = vector(bottom.size(), 1); + if (this->layer_param().eltwise_param().coeff_size()) { + for (int i = 0; i < bottom.size(); ++i) { + coeffs_[i] = this->layer_param().eltwise_param().coeff(i); + } + } + stable_prod_grad_ = this->layer_param_.eltwise_param().stable_prod_grad(); + + num_bottoms = bottom.size(); + size_t dim_src = bottom[0]->shape().size(); + size_t sizes_src[dim_src], strides_src[dim_src]; + for (size_t d = 0; d < dim_src; ++d) { + sizes_src[d] = bottom[0]->shape()[dim_src - d - 1]; + strides_src[d] = (d == 0) ? 1 : strides_src[d-1]*sizes_src[d-1]; + } + + for (size_t i = 0; i < num_bottoms; ++i) { + fwd_bottom_data.push_back( + shared_ptr >(new MKLData)); + bwd_bottom_diff.push_back( + shared_ptr >(new MKLDiff)); + CHECK_EQ(dim_src, bottom[i]->shape().size()); + fwd_bottom_data[i]->create_user_layout(dim_src, + sizes_src, + strides_src, + false); + bwd_bottom_diff[i]->create_user_layout(dim_src, + sizes_src, + strides_src, + false); + } + + fwd_top_data->create_user_layout(dim_src, sizes_src, strides_src, false); + + dnnDelete(sumPrimitive); +} + + +template +void MKLEltwiseLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + CHECK(this->layer_param().eltwise_param().coeff_size() == 0 + || this->layer_param().eltwise_param().coeff_size() == bottom.size()) << + "MKLEltwise Layer takes one coefficient per bottom blob."; + CHECK(!(this->layer_param().eltwise_param().operation() + == EltwiseParameter_EltwiseOp_PROD + && this->layer_param().eltwise_param().coeff_size())) << + "MKLEltwise layer only takes coefficients for summation."; + + CHECK(this->layer_param().eltwise_param().operation() == + EltwiseParameter_EltwiseOp_SUM) + << "MKLEltwise Layer only process summation."; + + Init(bottom, top); +} + +template +void MKLEltwiseLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + for (int i = 1; i < bottom.size(); ++i) { + CHECK(bottom[i]->shape() == bottom[0]->shape()); + } + top[0]->ReshapeLike(*bottom[0]); + // If max operation, we will initialize the vector index part. + if (this->layer_param_.eltwise_param().operation() == + EltwiseParameter_EltwiseOp_MAX && top.size() == 1) { + max_idx_.Reshape(bottom[0]->shape()); + } + + if (channels_ == bottom[0]->channels() && + height_ == bottom[0]->height() && + width_ == bottom[0]->width() && + num_ == bottom[0]->num() && + num_bottoms == bottom.size()) { + return; + } + + Init(bottom, top); +} + +template +void MKLEltwiseLayer::Forward_cpu( + const vector*>& bottom, const vector*>& top) { + dnnError_t e; + vector bottom_data; + bool num_prv = 0; + for (size_t i = 0; i < num_bottoms; i++) { + bottom_data.push_back( + reinterpret_cast(const_cast(bottom[i]->prv_data()))); + if (bottom_data[i] != NULL) { + num_prv += 1; + } else { + bottom_data[i] = + reinterpret_cast(const_cast(bottom[i]->cpu_data())); + } + } + + if (num_prv > 0) { + if (sumPrimitive == NULL) { + dnnLayout_t int_layout = NULL; + for (size_t i = 0; i < num_bottoms; ++i) { + if (bottom[i]->prv_data() != NULL) { + CHECK((bottom[i]->get_prv_data_descriptor())->get_descr_type() + == PrvMemDescr::PRV_DESCR_MKL2017); + shared_ptr > mem_descr = + boost::static_pointer_cast >( + bottom[i]->get_prv_data_descriptor()); + CHECK(mem_descr != NULL); + fwd_bottom_data[i] = mem_descr; + if (int_layout == NULL) { + int_layout = mem_descr->layout_int; + } + } + } + e = dnnSumCreate(&sumPrimitive, NULL, + num_bottoms, int_layout, &coeffs_[0]); + CHECK_EQ(e, E_SUCCESS); + + fwd_top_data->create_internal_layout(sumPrimitive, dnnResourceDst); + + for (int i = 0; i < num_bottoms; ++i) { + if (bottom[i]->prv_data() == NULL) { + fwd_bottom_data[i]->create_internal_layout(sumPrimitive, + (dnnResourceType_t)(dnnResourceMultipleSrc + i)); + } + } + } + } else { + if (sumPrimitive == NULL) { + e = dnnSumCreate(&sumPrimitive, NULL, num_bottoms, + fwd_top_data->layout_usr, &coeffs_[0]); + CHECK_EQ(e, E_SUCCESS); + } + } + + switch (op_) { + case EltwiseParameter_EltwiseOp_SUM: + void *eltwise_res[dnnResourceNumber]; + for (int i = 0; i < num_bottoms; ++i) { + if (fwd_bottom_data[i]->convert_to_int) { + eltwise_res[dnnResourceMultipleSrc + i] = + fwd_bottom_data[i]->get_converted_prv(bottom[i], false); + } else { + eltwise_res[dnnResourceMultipleSrc + i] = + reinterpret_cast(bottom_data[i]); + } + } + + if (fwd_top_data->conversion_needed()) { + top[0]->set_prv_data_descriptor(fwd_top_data); + eltwise_res[dnnResourceDst] = + reinterpret_cast(const_cast(top[0]->mutable_prv_data())); + } else { + eltwise_res[dnnResourceDst] = + reinterpret_cast(const_cast(top[0]->mutable_cpu_data())); + } + + { // local scope needed since the macro below contains variable declaration + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(sumPrimitive, eltwise_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + } + CHECK_EQ(e, E_SUCCESS); + + break; + case EltwiseParameter_EltwiseOp_PROD: + case EltwiseParameter_EltwiseOp_MAX: + LOG(FATAL) << "Unsupported elementwise operation."; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } +} + +template +void MKLEltwiseLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + + bool is_top_diff_prv = top[0]->prv_diff() == NULL ? false : true; + + for (int i = 0; i < bottom.size(); ++i) { + if (propagate_down[i]) { + switch (op_) { + case EltwiseParameter_EltwiseOp_SUM: + CHECK_EQ(coeffs_[i], Dtype(1)) << "Not supported yet"; + if (is_top_diff_prv == false) { + bottom[i]->set_cpu_diff(top[0]->mutable_cpu_diff()); + } else { + if (!bwd_bottom_diff[i]->layout_int) { + bwd_bottom_diff[i]->create_internal_layout(sumPrimitive, + (dnnResourceType_t)(dnnResourceMultipleSrc + i)); + } + CHECK_EQ(true, bwd_bottom_diff[i]->layout_compare( + top[0]->get_prv_diff_descriptor())); + bottom[i]->set_prv_diff_descriptor(top[0]->get_prv_diff_descriptor(), + false); + } + break; + case EltwiseParameter_EltwiseOp_MAX: + case EltwiseParameter_EltwiseOp_PROD: + LOG(FATAL) << "Unsupported elementwise operation."; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLEltwiseLayer); +#else +template +void MKLEltwiseLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) {NOT_IMPLEMENTED;} +template +void MKLEltwiseLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLEltwiseLayer); +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/layers/mkl_lrn_layer.cpp b/src/caffe/layers/mkl_lrn_layer.cpp new file mode 100644 index 00000000000..15d31ba84c9 --- /dev/null +++ b/src/caffe/layers/mkl_lrn_layer.cpp @@ -0,0 +1,317 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include + +#include "caffe/layer.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + +template +MKLLRNLayer::~MKLLRNLayer() { + dnnDelete(lrnFwd); + dnnDelete(lrnBwd); + dnnReleaseBuffer(lrn_buffer_); +} + +template +void MKLLRNLayer::Init(const vector*>& bottom, + const vector*>& top) { + size_ = this->layer_param_.lrn_param().local_size(); + CHECK_EQ(size_ % 2, 1) << "LRN only supports odd values for local_size"; + + alpha_ = this->layer_param_.lrn_param().alpha(); + beta_ = this->layer_param_.lrn_param().beta(); + k_ = this->layer_param_.lrn_param().k(); + + size_t dim = 4, sizes[4], strides[4]; + + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_ = bottom[0]->num(); + + sizes[0] = width_; + sizes[1] = height_; + sizes[2] = channels_; + sizes[3] = num_; + + strides[0] = 1; + strides[1] = sizes[0]; + strides[2] = sizes[0]*sizes[1]; + strides[3] = sizes[0]*sizes[1]*sizes[2]; + + fwd_bottom_data->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_top_data->name = "fwd_top_data @ " + this->layer_param_.name(); + bwd_top_diff->name = "bwd_top_diff @ " + this->layer_param_.name(); + bwd_bottom_diff->name = "bwd_bottom_diff @ " + this->layer_param_.name(); + + fwd_bottom_data->create_user_layout(dim, sizes, strides, false); + fwd_top_data ->create_user_layout(dim, sizes, strides, false); + bwd_bottom_diff->create_user_layout(dim, sizes, strides, false); + bwd_top_diff ->create_user_layout(dim, sizes, strides, false); + + // Fwd, Bwd primitives and lrn_buffer_ are allocated in "Lazy" + // mode, because here we don't know + // what layout is used by neighbours. + dnnDelete(lrnFwd); + dnnDelete(lrnBwd); + dnnReleaseBuffer(lrn_buffer_); + lrn_buffer_ = NULL; +} + +template +void MKLLRNLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + Init(bottom, top); +} + +template +void MKLLRNLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + + bool reshaping = true; + if ((num_ == bottom[0]->num()) && + channels_ == bottom[0]->channels() && + height_ == bottom[0]->height() && + width_ == bottom[0]->width()) { + reshaping = false; + } + + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_ = bottom[0]->num(); + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + top[0]->Reshape(num_, channels_, height_, width_); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } + + if (reshaping == true) { + Init(bottom, top); + } +} + +template +void MKLLRNLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelForward_cpu(bottom, top); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +void MKLLRNLayer::CrossChannelForward_cpu( + const vector*>& bottom, const vector*>& top) { + const void* bottom_data = + reinterpret_cast(bottom[0]->prv_data()); + + if (NULL != bottom_data) { + // Is it the first pass? Create a primitive. + if (lrnFwd == NULL) { + CHECK_EQ((bottom[0]->get_prv_data_descriptor())->get_descr_type(), + PrvMemDescr::PRV_DESCR_MKL2017); + shared_ptr > mem_descr + = boost::static_pointer_cast > + (bottom[0]->get_prv_data_descriptor()); + CHECK(mem_descr != NULL); + + fwd_bottom_data = mem_descr; + + dnnError_t e; + dnnLayout_t lrn_buffer_l = NULL; + + e = dnnLRNCreateForward(&lrnFwd, NULL, fwd_bottom_data->layout_int, + size_, alpha_, beta_, k_); + CHECK_EQ(e, E_SUCCESS); + + fwd_top_data->create_internal_layout(lrnFwd, dnnResourceDst); + + e = dnnLRNCreateBackward(&lrnBwd, NULL, + fwd_bottom_data->layout_int, fwd_bottom_data->layout_int, + size_, alpha_, beta_, k_); + CHECK_EQ(e, E_SUCCESS); + + e = dnnLayoutCreateFromPrimitive( + &lrn_buffer_l, lrnFwd, dnnResourceWorkspace); + CHECK_EQ(e, E_SUCCESS); + e = dnnAllocateBuffer( + reinterpret_cast(&lrn_buffer_), lrn_buffer_l); + CHECK_EQ(e, E_SUCCESS); + dnnLayoutDelete(lrn_buffer_l); + + bwd_top_diff->create_internal_layout(lrnBwd, dnnResourceDiffDst); + bwd_bottom_diff->create_internal_layout(lrnBwd, dnnResourceDiffSrc); + } + } else { + DLOG(INFO) << "Using cpu_data in MKLLRNLayer."; + if (lrnFwd == NULL) { + // First pass + dnnError_t e; + dnnLayout_t lrn_buffer_l = NULL; + e = dnnLRNCreateForward(&lrnFwd, NULL, fwd_bottom_data->layout_usr, + size_, alpha_, beta_, k_); + CHECK_EQ(e, E_SUCCESS); + + + e = dnnLayoutCreateFromPrimitive( + &lrn_buffer_l, lrnFwd, dnnResourceWorkspace); + CHECK_EQ(e, E_SUCCESS); + e = dnnAllocateBuffer( + reinterpret_cast(&lrn_buffer_), lrn_buffer_l); + CHECK_EQ(e, E_SUCCESS); + dnnLayoutDelete(lrn_buffer_l); + + e = dnnLRNCreateBackward(&lrnBwd, NULL, + fwd_bottom_data->layout_usr, fwd_bottom_data->layout_usr, + size_, alpha_, beta_, k_); + CHECK_EQ(e, E_SUCCESS); + } + bottom_data = reinterpret_cast(bottom[0]->cpu_data()); + } + + dnnError_t e; + void* lrn_res[dnnResourceNumber]; + lrn_res[dnnResourceSrc] = const_cast(bottom_data); + if (fwd_top_data->conversion_needed()) { + top[0]->set_prv_data_descriptor(fwd_top_data); + lrn_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_prv_data()); + } else { + lrn_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_cpu_data()); + DLOG(INFO) << "Using cpu_data for top in DnnLRN."; + } + lrn_res[dnnResourceWorkspace] = lrn_buffer_; + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(lrnFwd, lrn_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + + CHECK_EQ(e, E_SUCCESS); +} + +template +void MKLLRNLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + CrossChannelBackward_cpu(top, propagate_down, bottom); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +void MKLLRNLayer::CrossChannelBackward_cpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) { + + dnnError_t e; + void* lrn_res[dnnResourceNumber]; + lrn_res[dnnResourceDiffDst] = + bwd_top_diff->get_converted_prv(top[0], true); + lrn_res[dnnResourceWorkspace] = lrn_buffer_; + lrn_res[dnnResourceSrc] = + fwd_bottom_data->get_converted_prv(bottom[0], false); + + if (bwd_bottom_diff->conversion_needed()) { + bottom[0]->set_prv_diff_descriptor(bwd_bottom_diff); + lrn_res[dnnResourceDiffSrc] = bottom[0]->mutable_prv_diff(); + } else { + lrn_res[dnnResourceDiffSrc] = bottom[0]->mutable_cpu_diff(); + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKL_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(lrnBwd, lrn_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + + CHECK_EQ(e, E_SUCCESS); +} + + +#ifdef CPU_ONLY +STUB_GPU(MKLLRNLayer); +STUB_GPU_FORWARD(MKLLRNLayer, CrossChannelForward); +STUB_GPU_BACKWARD(MKLLRNLayer, CrossChannelBackward); +#else +template +void MKLLRNLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) {NOT_IMPLEMENTED;} +template +void MKLLRNLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + {NOT_IMPLEMENTED;} +template +void MKLLRNLayer::CrossChannelForward_gpu( + const vector*>& bottom, const vector*>& top) + {NOT_IMPLEMENTED;} +template +void MKLLRNLayer::CrossChannelBackward_gpu( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) {NOT_IMPLEMENTED;} + +#endif + +INSTANTIATE_CLASS(MKLLRNLayer); +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/layers/mkl_pooling_layer.cpp b/src/caffe/layers/mkl_pooling_layer.cpp new file mode 100644 index 00000000000..8fd2a191d47 --- /dev/null +++ b/src/caffe/layers/mkl_pooling_layer.cpp @@ -0,0 +1,408 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + +template +MKLPoolingLayer::~MKLPoolingLayer() { + dnnDelete(poolingFwd); + dnnDelete(poolingBwd); +} + +template +void MKLPoolingLayer::Init( + const vector*>& bottom, + const vector*>& top) { + PoolingParameter pool_param = this->layer_param_.pooling_param(); + + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_ = bottom[0]->num(); + + if (pool_param.global_pooling()) { + CHECK(!(pool_param.has_kernel_size() || + pool_param.has_kernel_h() || pool_param.has_kernel_w())) + << "With Global_pooling: true Filter size cannot specified"; + } else { + CHECK(!pool_param.has_kernel_size() != + !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(pool_param.has_kernel_size() || + (pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + } + CHECK((!pool_param.has_pad() && pool_param.has_pad_h() + && pool_param.has_pad_w()) + || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!pool_param.has_stride() && pool_param.has_stride_h() + && pool_param.has_stride_w()) + || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + global_pooling_ = pool_param.global_pooling(); + if (global_pooling_) { + kernel_h_ = bottom[0]->height(); + kernel_w_ = bottom[0]->width(); + } else { + if (pool_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = pool_param.kernel_size(); + } else { + kernel_h_ = pool_param.kernel_h(); + kernel_w_ = pool_param.kernel_w(); + } + } + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + if (!pool_param.has_pad_h()) { + pad_h_ = pad_w_ = pool_param.pad(); + } else { + pad_h_ = pool_param.pad_h(); + pad_w_ = pool_param.pad_w(); + } + if (!pool_param.has_stride_h()) { + stride_h_ = stride_w_ = pool_param.stride(); + } else { + stride_h_ = pool_param.stride_h(); + stride_w_ = pool_param.stride_w(); + } + if (global_pooling_) { + CHECK(pad_h_ == 0 && pad_w_ == 0 && stride_h_ == 1 && stride_w_ == 1) + << "With Global_pooling: true; only pad = 0 and stride = 1"; + } + if (pad_h_ != 0 || pad_w_ != 0) { + CHECK(this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE + || this->layer_param_.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) + << "Padding implemented only for average and max pooling."; + CHECK_LT(pad_h_, kernel_h_); + CHECK_LT(pad_w_, kernel_w_); + } + + pooled_height_ = static_cast(ceil(static_cast( + bottom[0]->height() + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1; + pooled_width_ = static_cast(ceil(static_cast( + bottom[0]->width() + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1; + bool force_exclude_padding_flag_ = false; + if (pad_h_ || pad_w_) { + // If we have padding, ensure that the last pooling starts strictly + // inside the image (instead of at the padding); otherwise clip the last. + if ((pooled_height_ - 1) * stride_h_ >= bottom[0]->height() + pad_h_) { + --pooled_height_; + } + if ((pooled_width_ - 1) * stride_w_ >= bottom[0]->width() + pad_w_) { + --pooled_width_; + } + CHECK_LT((pooled_height_ - 1) * stride_h_, bottom[0]->height() + pad_h_); + CHECK_LT((pooled_width_ - 1) * stride_w_, bottom[0]->width() + pad_w_); + } + else + { + force_exclude_padding_flag_ = true; + } + + top[0]->Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + if (top.size() > 1) { + (reinterpret_cast* > (top[1]) )->Reshape(bottom[0]->num(), + channels_, pooled_height_, pooled_width_); + } + // If max/min/avg pooling, we will initialize the vector index part. + if (top.size() == 1) { + max_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + } + // If stochastic pooling, we will initialize the random index part. + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_STOCHASTIC) { + rand_idx_.Reshape(bottom[0]->num(), channels_, pooled_height_, + pooled_width_); + } + + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + this->algorithm = dnnAlgorithmPoolingMax; + break; + case PoolingParameter_PoolMethod_AVE: + if (this->layer_param_.pooling_param().avg_include_pad()) { + this->algorithm = dnnAlgorithmPoolingAvgIncludePadding; + } + else { + this->algorithm = dnnAlgorithmPoolingAvgExcludePadding; + } + // If user did not define padding + // bottom[0]->height/width() + kernel_h/w_ cannot be exact division by stride_h/w_ + // use the exclude padding to align with the result of Caffe + // for exact division situation, exclude padding and include padding will have the same results + if (force_exclude_padding_flag_ == true) + { + this->algorithm = dnnAlgorithmPoolingAvgExcludePadding; + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + + + size_t dim = 4; + size_t src_sizes[4], src_strides[4]; + size_t dst_sizes[4], dst_strides[4]; + + src_sizes[0] = bottom[0]->width(); + src_sizes[1] = bottom[0]->height(); + src_sizes[2] = bottom[0]->channels(); + src_sizes[3] = bottom[0]->num(); + + src_strides[0] = 1; + src_strides[1] = src_sizes[0]; + src_strides[2] = src_sizes[0]*src_sizes[1]; + src_strides[3] = src_sizes[0]*src_sizes[1]*src_sizes[2]; + + dst_sizes[0] = pooled_width_; + dst_sizes[1] = pooled_height_; + dst_sizes[2] = src_sizes[2]; + dst_sizes[3] = src_sizes[3]; + + dst_strides[0] = 1; + dst_strides[1] = dst_sizes[0]; + dst_strides[2] = dst_sizes[0]*dst_sizes[1]; + dst_strides[3] = dst_sizes[0]*dst_sizes[1]*dst_sizes[2]; + + src_offset[0] = -pad_w_; + src_offset[1] = -pad_h_; + + kernel_stride[0] = stride_w_; + kernel_stride[1] = stride_h_; + + kernel_size[0] = kernel_w_; + kernel_size[1] = kernel_h_; + + // Names are for debugging only + fwd_bottom_data->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_top_data->name = "fwd_top_data @ " + this->layer_param_.name(); + bwd_top_diff->name = "bwd_top_diff @ " + this->layer_param_.name(); + bwd_bottom_diff->name = "bwd_bottom_diff @ " + this->layer_param_.name(); + + fwd_bottom_data->create_user_layout(dim, src_sizes, src_strides, false); + fwd_top_data ->create_user_layout(dim, dst_sizes, dst_strides, false); + bwd_bottom_diff->create_user_layout(dim, src_sizes, src_strides, false); + bwd_top_diff ->create_user_layout(dim, dst_sizes, dst_strides, false); + // Primitives will be allocated during the first fwd pass + dnnDelete(poolingFwd); + dnnDelete(poolingBwd); +} + +template +void MKLPoolingLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + Init(bottom, top); +} + +template +void MKLPoolingLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + + if (channels_ == bottom[0]->channels() && + height_ == bottom[0]->height() && + width_ == bottom[0]->width() && + num_ == bottom[0]->num()) { + return; + } + + Init(bottom, top); +} + +template +void MKLPoolingLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + // We'll output the mask to top[1] if it's of size >1. + size_t* mask = NULL; // suppress warnings about uninitalized variables + + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + dnnError_t status; + void* pooling_res[dnnResourceNumber]; + + mask = (use_top_mask) ? + reinterpret_cast(top[1]->mutable_cpu_data()) : + (max_idx_.mutable_cpu_data()); + pooling_res[dnnResourceWorkspace] = reinterpret_cast(mask); + + void* bottom_data = + reinterpret_cast(const_cast(bottom[0]->prv_data())); + if (NULL == bottom_data) { + bottom_data = + reinterpret_cast(const_cast(bottom[0]->cpu_data())); + if (NULL == poolingFwd) { + // Now create poolingFwd + status = dnnPoolingCreateForward(&poolingFwd, NULL, + this->algorithm, fwd_bottom_data->layout_usr, + kernel_size, kernel_stride, src_offset, dnnBorderZeros); + CHECK_EQ(status, E_SUCCESS); + + // Now create poolingBwd + status = dnnPoolingCreateBackward(&poolingBwd, NULL, + this->algorithm, fwd_bottom_data->layout_usr, + kernel_size, kernel_stride, src_offset, dnnBorderZeros); + CHECK_EQ(status, E_SUCCESS); + } + } else if (NULL == poolingFwd) { + // Is it the first pass? Create a primitive. + CHECK_EQ((bottom[0]->get_prv_data_descriptor())->get_descr_type(), + PrvMemDescr::PRV_DESCR_MKL2017); + shared_ptr > mem_descr + = boost::static_pointer_cast > + (bottom[0]->get_prv_data_descriptor()); + CHECK(mem_descr != NULL); + + DLOG(INFO) << "Using layout of " << mem_descr->name + << " as input layout for " << this->layer_param_.name(); + + // copy shared_ptr + fwd_bottom_data = mem_descr; + + // Now create poolingFwd + status = dnnPoolingCreateForward(&poolingFwd, NULL, + this->algorithm, fwd_bottom_data->layout_int, kernel_size, + kernel_stride, src_offset, dnnBorderZeros); + CHECK_EQ(status, E_SUCCESS); + + fwd_top_data->create_internal_layout(poolingFwd, dnnResourceDst); + + // Now create poolingBwd + status = dnnPoolingCreateBackward(&poolingBwd, NULL, + this->algorithm, fwd_bottom_data->layout_int, kernel_size, + kernel_stride, src_offset, dnnBorderZeros); + CHECK_EQ(status, E_SUCCESS); + + bwd_top_diff ->create_internal_layout(poolingFwd, dnnResourceDst); + bwd_bottom_diff->create_internal_layout(poolingFwd, dnnResourceSrc); + } + + pooling_res[dnnResourceSrc] = bottom_data; + if (fwd_top_data->conversion_needed()) { + top[0]->set_prv_data_descriptor(fwd_top_data); + pooling_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_prv_data()); + } else { + pooling_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_cpu_data()); + DLOG(INFO) << "Using cpu_data for top in DnnPooling."; + } + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(poolingFwd, pooling_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + + CHECK_EQ(status, E_SUCCESS); +} + +template +void MKLPoolingLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { + return; + } + // Different pooling methods. We explicitly do the switch outside the for + // loop to save time, although this results in more codes. + + const size_t* mask = NULL; // suppress warnings about uninitialized variables + + // The main loop + dnnError_t e; + void* pooling_res[dnnResourceNumber]; + + mask = (top.size() > 1) ? + reinterpret_cast(top[1]->cpu_data()) : + (max_idx_.cpu_data()); + pooling_res[dnnResourceWorkspace] = + reinterpret_cast(const_cast(mask)); + + pooling_res[dnnResourceDiffDst] = bwd_top_diff->get_converted_prv(top[0], + true); + + if (bwd_bottom_diff->conversion_needed()) { + bottom[0]->set_prv_diff_descriptor(bwd_bottom_diff); + pooling_res[dnnResourceDiffSrc] = bottom[0]->mutable_prv_diff(); + } else { + pooling_res[dnnResourceDiffSrc] = bottom[0]->mutable_cpu_diff(); + } + caffe_set(bottom[0]->count(), Dtype(0), + reinterpret_cast(pooling_res[dnnResourceDiffSrc])); + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKL_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(poolingBwd, pooling_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + + CHECK_EQ(e, E_SUCCESS); +} + + +#ifdef CPU_ONLY +STUB_GPU(MKLPoolingLayer); +#else +template +void MKLPoolingLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) {NOT_IMPLEMENTED;} +template +void MKLPoolingLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLPoolingLayer); +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/layers/mkl_relu_layer.cpp b/src/caffe/layers/mkl_relu_layer.cpp new file mode 100644 index 00000000000..996f1012a1a --- /dev/null +++ b/src/caffe/layers/mkl_relu_layer.cpp @@ -0,0 +1,266 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include +#include + +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + +template +MKLReLULayer::~MKLReLULayer() { + dnnDelete(reluFwd_); + dnnDelete(reluBwd_); +} + +template +void MKLReLULayer::Init( + const vector*>& bottom, + const vector*>& top) { + size_t dim = bottom[0]->shape().size(); + this->sizes_.resize(dim); + this->strides_.resize(dim); + for (size_t d = 0; d < dim; ++d) { + this->sizes_[d] = bottom[0]->shape()[dim - 1 - d]; + this->strides_[d] = (d == 0) ? 1 : this->strides_[d-1]*this->sizes_[d-1]; + } + + // Names are for debugging only + this->fwd_bottom_data_->name = "fwd_bottom_data @ " + + this->layer_param_.name(); + this->fwd_top_data_->name = "fwd_top_data @ " + + this->layer_param_.name(); + this->bwd_bottom_diff_->name = "bwd_bottom_diff @ " + + this->layer_param_.name(); + this->bwd_top_diff_->name = "bwd_top_diff @ " + + this->layer_param_.name(); + + this->fwd_bottom_data_->create_user_layout(dim, &(this->sizes_[0]), + &(this->strides_[0]), false); + this->fwd_top_data_ ->create_user_layout(dim, &(this->sizes_[0]), + &(this->strides_[0]), false); + this->bwd_bottom_diff_->create_user_layout(dim, &(this->sizes_[0]), + &(this->strides_[0]), false); + this->bwd_top_diff_ ->create_user_layout(dim, &(this->sizes_[0]), + &(this->strides_[0]), false); + + // "Lazy" allocation because here we don't know + // what layout is used by neighbours. + dnnDelete(reluFwd_); + dnnDelete(reluBwd_); +} + +template +void MKLReLULayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { +// CHECK_EQ(top[0]->shape(), bottom[0]->shape()); + Init(bottom, top); +} + +template +void MKLReLULayer::Reshape(const vector*>& bottom, + const vector*>& top) { + NeuronLayer::Reshape(bottom, top); + + // Here I check for sizes whther to destroy primitives + size_t dim = bottom[0]->shape().size(); + + // If dimensions of blobs are the same as they were then + // do not really destroy primitives + if (dim == this->sizes_.size()) { + // .. check for strides and size dims if they corresspond each other + + // TODO: speedup comparison? + bool is_match = true; + for (size_t d = 0; d < dim; ++d) { + is_match = is_match && (this->sizes_[d] == + bottom[0]->shape()[dim - 1 - d]); + is_match = is_match && (this->strides_[d] == ((d == 0) ? 1 : + this->strides_[d-1]*this->sizes_[d-1])); + } + + // If no new modification was done to layout sizes, + // strides realtivly to previous iteration then + // no primitives recreation is needed + if (is_match) { + return; + } + } + + Init(bottom, top); +} + + +template +void MKLReLULayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + void* bottom_data = + reinterpret_cast(const_cast(bottom[0]->prv_data())); + + if (bottom_data) { + if (reluFwd_ == NULL) { + // first pass + CHECK_EQ((bottom[0]->get_prv_data_descriptor())->get_descr_type(), + PrvMemDescr::PRV_DESCR_MKL2017); + shared_ptr > mem_descr + = boost::static_pointer_cast > + (bottom[0]->get_prv_data_descriptor()); + CHECK(mem_descr != NULL); + + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + dnnError_t e; + e = dnnReLUCreateForward(&reluFwd_, NULL, mem_descr->layout_int, + negative_slope); + CHECK_EQ(e, E_SUCCESS); + e = dnnReLUCreateBackward(&reluBwd_, NULL, mem_descr->layout_int, + mem_descr->layout_int, negative_slope); + CHECK_EQ(e, E_SUCCESS); + + DLOG(INFO) << "Using layout of " << mem_descr->name + << " as input layout for " << this->layer_param_.name(); + // copy shared_ptr + fwd_bottom_data_ = mem_descr; + + fwd_top_data_ ->create_internal_layout(reluFwd_, dnnResourceDst); + bwd_top_diff_ ->create_internal_layout(reluFwd_, dnnResourceDst); + bwd_bottom_diff_->create_internal_layout(reluFwd_, dnnResourceSrc); + } + } else { + DLOG(INFO) << "Using cpu_data in MKLReLULayer."; + bottom_data = + reinterpret_cast(const_cast(bottom[0]->cpu_data())); + if (reluFwd_ == NULL) { + // first pass + dnnError_t e; + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + e = dnnReLUCreateForward(&reluFwd_, NULL, + fwd_bottom_data_->layout_usr, negative_slope); + CHECK_EQ(e, E_SUCCESS); + e = dnnReLUCreateBackward(&reluBwd_, NULL, + fwd_bottom_data_->layout_usr, fwd_bottom_data_->layout_usr, + negative_slope); + CHECK_EQ(e, E_SUCCESS); + } + } + + dnnError_t e; + void* relu_res[dnnResourceNumber]; + relu_res[dnnResourceSrc] = bottom_data; + + if (fwd_top_data_->conversion_needed()) { + if (bottom[0] == top[0]) { +// top[0]->set_prv_data_descriptor(fwd_bottom_data_); + DLOG(INFO) << "Using bottom as top (in-place) in mklReLU."; + } else { + top[0]->set_prv_data_descriptor(fwd_top_data_); + DLOG(INFO) << "Using mutable_prv (out-of-place) in mklReLU."; + } + relu_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_prv_data()); + } else { + relu_res[dnnResourceDst] = + reinterpret_cast(top[0]->mutable_cpu_data()); + DLOG(INFO) << "Using cpu_data for top in mklReLU."; + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(reluFwd_, relu_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + + CHECK_EQ(e, E_SUCCESS); +} + +template +void MKLReLULayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + if (propagate_down[0]) { + void* bottom_data = + reinterpret_cast(const_cast(bottom[0]->prv_data())); + if (NULL == bottom_data) { + bottom_data = + reinterpret_cast(const_cast(bottom[0]->cpu_data())); + } + + dnnError_t e; + void* relu_res[dnnResourceNumber]; + relu_res[dnnResourceSrc] = bottom_data; + + relu_res[dnnResourceDiffDst] = bwd_top_diff_->get_converted_prv(top[0], + true); + if (bwd_bottom_diff_->conversion_needed()) { + if (NULL != bottom[0]->get_prv_data_descriptor()) { + bottom[0]->set_prv_diff_descriptor(fwd_bottom_data_); + DLOG(INFO) << "Using top as bottom (in-place) in mklReLU-backward."; + } else { + bottom[0]->set_prv_diff_descriptor(bwd_bottom_diff_); + DLOG(INFO) << "Using top as bottom (in-place) in mklReLU-backward."; + } + relu_res[dnnResourceDiffSrc] = bottom[0]->mutable_prv_diff(); + } else { + relu_res[dnnResourceDiffSrc] = bottom[0]->mutable_cpu_diff(); + DLOG(INFO) << "Using mutable_prv (out-of-place) in mklReLU-backward."; + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKL_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(reluBwd_, relu_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + + CHECK_EQ(e, E_SUCCESS); + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLReLULayer); +#else +template +void MKLReLULayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) {NOT_IMPLEMENTED;} +template +void MKLReLULayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLReLULayer); +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/layers/mkl_split_layer.cpp b/src/caffe/layers/mkl_split_layer.cpp new file mode 100644 index 00000000000..4842a518bd1 --- /dev/null +++ b/src/caffe/layers/mkl_split_layer.cpp @@ -0,0 +1,243 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKL2017_SUPPORTED) +#include + +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + +template +MKLSplitLayer::~MKLSplitLayer() { + dnnDelete(sumPrimitive); +} + +template +void MKLSplitLayer::Init( + const vector*>& bottom, + const vector*>& top) { + num_tops = top.size(); + size_t dim_src = bottom[0]->shape().size(); + this->sizes_src_.resize(dim_src); + this->strides_src_.resize(dim_src); + for (size_t d = 0; d < dim_src; ++d) { + this->sizes_src_[d] = bottom[0]->shape()[dim_src - d - 1]; + this->strides_src_[d] = (d == 0) ? + 1 : this->strides_src_[d-1]*this->sizes_src_[d-1]; + } + + for (size_t i = 0; i < num_tops; ++i) { + bwd_top_diff.push_back(shared_ptr >(new MKLDiff)); + bwd_top_diff[i]->create_user_layout(dim_src, + &(this->sizes_src_[0]), + &(this->strides_src_[0]), + false); + } + + // Blob-wise coefficients for the elementwise operation. + coeffs_ = vector(top.size(), 1); + + bwd_bottom_diff->create_user_layout(dim_src, + &(this->sizes_src_[0]), + &(this->strides_src_[0]), + false); + + // Primitive will be created at first time it is to be used + dnnDelete(sumPrimitive); +} + +template +void MKLSplitLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + Init(bottom, top); +} + +template +void MKLSplitLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + int count_ = bottom[0]->count(); + for (int i = 0; i < top.size(); ++i) { + // Do not allow in-place computation in the SplitLayer. Instead, share data + // by reference in the forward pass, and keep separate diff allocations in + // the backward pass. (Technically, it should be possible to share the diff + // blob of the first split output with the input, but this seems to cause + // some strange effects in practice...) + CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " + "allow in-place computation."; + top[i]->ReshapeLike(*bottom[0]); + CHECK_EQ(count_, top[i]->count()); + } + + // Here we check + // Here I check for sizes whther to destroy primitives + size_t dim_src = bottom[0]->shape().size(); + + // If dimensions of blobs are the same as they were then + // do not really destroy primitives + if (dim_src == this->sizes_src_.size()) { + // .. check for strides and size dims if they corresspond each other + + // TODO: speedup comparison? + bool is_match = true; + for (size_t d = 0; d < dim_src; ++d) { + is_match = is_match && (this->sizes_src_[d] == + bottom[0]->shape()[dim_src - 1 - d]); + is_match = is_match && (this->strides_src_[d] == ((d == 0) ? 1 : + this->strides_src_[d-1]*this->sizes_src_[d-1])); + } + + // If no new modification was done to layout sizes, + // strides realtivly to previous iteration then + // no primitives recreation is needed + if (is_match) { + return; + } + } + + Init(bottom, top); +} + +template +void MKLSplitLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + for (int i = 0; i < top.size(); ++i) { + top[i]->ShareData(*bottom[0]); + } +} + +template +void MKLSplitLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (!propagate_down[0]) { return; } + dnnError_t e; + vector top_diff; + bool num_prv = 0; + for (size_t i = 0; i < num_tops; i++) { + top_diff.push_back(reinterpret_cast( + const_cast(top[i]->prv_diff()))); + if (top_diff[i] != NULL) { + num_prv += 1; + } else { + top_diff[i] = reinterpret_cast( + reinterpret_cast(const_cast(top[i]->cpu_diff()))); + } + } + + if (num_prv > 0) { + if (sumPrimitive == NULL) { + dnnLayout_t int_layout = NULL; + for (size_t i = 0; i < num_tops; ++i) { + if (top[i]->prv_diff() != NULL) { + CHECK((top[i]->get_prv_diff_descriptor())->get_descr_type() == + PrvMemDescr::PRV_DESCR_MKL2017); + shared_ptr > mem_descr = + boost::static_pointer_cast >( + top[i]->get_prv_diff_descriptor()); + CHECK(mem_descr != NULL); + bwd_top_diff[i] = mem_descr; + if (int_layout == NULL) { + int_layout = mem_descr->layout_int; + } + } + } + e = dnnSumCreate(&sumPrimitive, NULL, num_tops, + int_layout, &coeffs_[0]); + CHECK_EQ(e, E_SUCCESS); + + bwd_bottom_diff->create_internal_layout(sumPrimitive, dnnResourceDst); + + for (size_t i = 0; i < num_tops; ++i) { + if (top[i]->prv_diff() == NULL) { + bwd_top_diff[i]->create_internal_layout(sumPrimitive, + (dnnResourceType_t)(dnnResourceMultipleSrc + i)); + } + } + } + } else { + if (sumPrimitive == NULL) { + e = dnnSumCreate(&sumPrimitive, NULL, num_tops, + bwd_bottom_diff->layout_usr, &coeffs_[0]); + CHECK_EQ(e, E_SUCCESS); + } + } + + void *sum_res[dnnResourceNumber]; + for (int i = 0; i < num_tops; ++i) { + if (bwd_top_diff[i]->convert_to_int) { + sum_res[dnnResourceMultipleSrc + i] = + bwd_top_diff[i]->get_converted_prv(top[i], false); + } else { + sum_res[dnnResourceMultipleSrc + i] = + reinterpret_cast(top_diff[i]); + } + } + + if (bwd_bottom_diff->conversion_needed()) { + bottom[0]->set_prv_diff_descriptor(bwd_bottom_diff); + sum_res[dnnResourceDst] = + reinterpret_cast(bottom[0]->mutable_prv_diff()); + } else { + sum_res[dnnResourceDst] = + reinterpret_cast(bottom[0]->mutable_cpu_diff()); + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKL_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + e = dnnExecute(sumPrimitive, sum_res); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + + CHECK_EQ(e, E_SUCCESS); +} + +#ifdef CPU_ONLY +STUB_GPU(MKLSplitLayer); +#else +template +void MKLSplitLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) {NOT_IMPLEMENTED;} +template +void MKLSplitLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLSplitLayer); +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/layers/mkldnn_batch_norm_layer.cpp b/src/caffe/layers/mkldnn_batch_norm_layer.cpp new file mode 100644 index 00000000000..4db92b9432f --- /dev/null +++ b/src/caffe/layers/mkldnn_batch_norm_layer.cpp @@ -0,0 +1,513 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include +#include "caffe/filler.hpp" + +#include "caffe/layers/mkldnn_layers.hpp" + +namespace caffe { + +template +void MKLDNNBatchNormLayer::LayerSetUp(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNBatchNormLayer::LayerSetUp: " << this->layer_param_.name(); + + Layer::LayerSetUp(bottom, top); + + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + num_ = bottom[0]->num(); + + eps_ = this->layer_param_.batch_norm_param().eps(); + use_weight_bias_ = this->layer_param_.batch_norm_param().use_weight_bias(); + bias_term_ = this->layer_param_.batch_norm_param().bias_term(); + moving_average_fraction_ = this->layer_param_.batch_norm_param().moving_average_fraction(); + use_global_stats_ = this->phase_ == TEST; + + this->blobs_.resize(3 + (use_weight_bias_ ? 1:0) + (use_weight_bias_ && bias_term_ ? 1:0)); + + vector sz; + sz.push_back(channels_); + this->blobs_[0].reset(new Blob(sz)); + this->blobs_[1].reset(new Blob(sz)); + sz[0]=1; + this->blobs_[2].reset(new Blob(sz)); + for (int i = 0; i < 3; ++i) { + caffe_set(this->blobs_[i]->count(), Dtype(0), + this->blobs_[i]->mutable_cpu_data()); + } + + if (use_weight_bias_) { + // Initialize scale and shift + vector scaleshift_shape(1); + scaleshift_shape[0] = channels_; + VLOG(1) << "MKLDNNBatchNormLayer::LayerSetUp: channels_ = " << channels_; + + this->blobs_[3].reset(new Blob(scaleshift_shape)); + FillerParameter filler_param(this->layer_param_.batch_norm_param().filler()); + if (!this->layer_param_.batch_norm_param().has_filler()) { + filler_param.set_type("constant"); + filler_param.set_value(1); + } + shared_ptr > filler(GetFiller(filler_param)); + VLOG(1) << "MKLDNNBatchNormLayer::LayerSetUp: scaleshift " << __LINE__ << ":" << this->layer_param_.name(); + filler->Fill(this->blobs_[3].get()); + + if ( bias_term_ ) { + this->blobs_[4].reset(new Blob(scaleshift_shape)); + FillerParameter bias_filler_param(this->layer_param_.batch_norm_param().bias_filler()); + if (!this->layer_param_.batch_norm_param().has_bias_filler()) { + bias_filler_param.set_type("constant"); + bias_filler_param.set_value(0); + } + shared_ptr > bias_filler(GetFiller(bias_filler_param)); + VLOG(1) << "MKLDNNBatchNormLayer::LayerSetUp: bias " << __LINE__ << ":" << this->layer_param_.name(); + bias_filler->Fill(this->blobs_[4].get()); + } + } + + // Mask statistics from optimization by setting local learning rates + // for mean, variance, and the bias correction to zero. + for (int i = 0; i < 3; ++i) { + if (this->layer_param_.param_size() == i) { + ParamSpec* fixed_param_spec = this->layer_param_.add_param(); + fixed_param_spec->set_lr_mult(0.f); + } else { + CHECK_EQ(this->layer_param_.param(i).lr_mult(), 0.f) + << "Cannot configure batch normalization statistics as layer " + << "parameters."; + } + } +} + +template +void MKLDNNBatchNormLayer::Reshape(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNBatchNormLayer::Reshape: " << this->layer_param_.name(); + + this->width_ = bottom[0]->width(); + this->height_ = bottom[0]->height(); + this->num_ = bottom[0]->num(); + this->channels_ = bottom[0]->channels(); + + //Fix: should reshape the top blob with the real size of bottom blob + //top[0]->Reshape(this->num_, this->channels_, this->height_, this->width_); +#ifdef DEBUG + LOG(INFO) << "size of bottom blob: " << bottom[0]->shape().size(); +#endif + top[0]->ReshapeLike(*bottom[0]); +} + +template +void MKLDNNBatchNormLayer::InitBatchNorm(const vector*>& bottom, const vector*>& top) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + auto propagation = this->phase_ == TEST ? prop_kind::forward_scoring : prop_kind::forward_training; + + unsigned flags = 0; + if (use_weight_bias_) flags |= use_scale_shift; + if (use_global_stats_) flags |= use_global_stats; + + int32_t n = this->num_; + int32_t iw = this->width_; + int32_t ih = this->height_; + int32_t ic = this->channels_; + + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + + // ---- Initialize memory descriptors ------------- + shared_ptr input_md, output_md, scaleshift_md; + shared_ptr usr_mpd, prv_mpd, scaleshift_mpd; + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); + input_md.reset(new memory::desc(mem_descr->prv_memory_pd()->desc())); + usr_mpd = mem_descr->usr_memory_pd(); + prv_mpd = mem_descr->prv_memory_pd(); + } else { + input_md.reset(new memory::desc({{n, ic, ih, iw}}, mpcsn, memory::format::nchw)); //MKLDNN batch norm only support 4D memory descriptor! + usr_mpd.reset(new memory::primitive_desc(*input_md, cpu_engine)); + } + output_md = input_md; + + // ---- Initialize BatchNorm primitive descriptor ------------- + batch_normalization_forward::desc BatchNormFwd_desc(propagation, *input_md, eps_, flags); + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + BatchNormFwd_pd.reset(new batch_normalization_forward::primitive_desc(BatchNormFwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex))); + } + catch(...) { + continue; + } + break; + } + + CHECK(BatchNormFwd_pd); + + // ---- Create memory --------------------- + if (use_weight_bias_) { + scaleshift_memory.reset(new memory(BatchNormFwd_pd->weights_primitive_desc())); + } + + // --- init primitive and prv_memory descriptors ---------------------- + fwd_bottom_data.reset(new MKLDNNData(usr_mpd, prv_mpd, bottom[0], this)); + input_primitive = fwd_bottom_data->create_input(false); + + fwd_top_data.reset(new MKLDNNData(usr_mpd, prv_mpd, top[0], this)); + output_memory = fwd_top_data->create_output_memory(); + + // ---- Create BatchNorm -------------------- + if (this->phase_ == TEST && !use_global_stats_) { + if (use_weight_bias_) { + BatchNormFwd.reset(new batch_normalization_forward(*BatchNormFwd_pd, + *input_primitive, *scaleshift_memory, *output_memory)); + } else { + BatchNormFwd.reset(new batch_normalization_forward(*BatchNormFwd_pd, + *input_primitive, *output_memory)); + } + } else { + mean_memory.reset(new memory(BatchNormFwd_pd->mean_primitive_desc())); + variance_memory.reset(new memory(BatchNormFwd_pd->variance_primitive_desc())); + + if (use_global_stats_) { + caffe_copy(this->channels_, this->blobs_[0]->cpu_data(), + static_cast(mean_memory->get_data_handle())); + caffe_copy(this->channels_, this->blobs_[1]->cpu_data(), + static_cast(variance_memory->get_data_handle())); + if (use_weight_bias_) { + BatchNormFwd.reset(new batch_normalization_forward(*BatchNormFwd_pd, + *input_primitive, (const primitive::at)*mean_memory, + (const primitive::at)*variance_memory, *scaleshift_memory, + *output_memory)); + } else { + BatchNormFwd.reset(new batch_normalization_forward(*BatchNormFwd_pd, + *input_primitive, (const primitive::at)*mean_memory, + (const primitive::at)*variance_memory, *output_memory)); + } + } else { + if (use_weight_bias_) { + BatchNormFwd.reset(new batch_normalization_forward(*BatchNormFwd_pd, + *input_primitive, *scaleshift_memory, *output_memory, + *mean_memory, *variance_memory)); + } else { + BatchNormFwd.reset(new batch_normalization_forward(*BatchNormFwd_pd, + *input_primitive, *output_memory, *mean_memory, *variance_memory)); + } + } + } + + //fwd_bottom_data->set_mkldnn_primitive(BatchNormFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_bottom_data_primitive_transfer(input_primitive); + fwd_bottom_data->set_mkldnn_primitive(fwd_bottom_data_primitive_transfer); + + //fwd_top_data->set_mkldnn_primitive(BatchNormFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_top_data_memory_transfer(output_memory); + fwd_top_data->set_mkldnn_primitive(fwd_top_data_memory_transfer); + + //Fix: MKLDNN batch norm only support 4D memory descriptor! Use 4D for calculation and reshape to 2D for output! + bool has_spatial = (bottom[0]->shape().size() != 2); +#ifdef DEBUG + LOG(INFO) << "has_spatial flag value: " << has_spatial; +#endif + if (has_spatial == false) + { +#ifdef DEBUG + LOG(INFO) << "size of bottom blob: " << bottom[0]->shape().size(); + LOG(INFO) << "MKLDNN batch norm only support 4D memory descriptor! Use 4D for calculation and reshape to 2D for output!"; +#endif + vector top_shape; + top_shape.push_back(bottom[0]->num()); + top_shape.push_back(bottom[0]->channels()); + top[0]->Reshape(top_shape); + } +} + + +template +void MKLDNNBatchNormLayer::Forward_cpu(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNBatchNormLayer::Forward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNBatchNormLayer::Forward_cpu: " << this->layer_param_.name(); +#endif + + if(BatchNormFwd_pd == NULL) + InitBatchNorm(bottom, top); + // making reorders if needed. + fwd_bottom_data->sync_before_read(); + // update top that head at prv + fwd_top_data->sync_before_write(); + + if (use_global_stats_) { + // use the stored mean/variance estimates. + const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ? + 0 : 1 / this->blobs_[2]->cpu_data()[0]; + Dtype *mean_buffer_ = (Dtype *)(mean_memory->get_data_handle()); + Dtype *variance_buffer_ = (Dtype *)(variance_memory->get_data_handle()); + + //TODO: optimize, do this operation in the InitBatchNorm, so no need to calculate each time + caffe_cpu_scale(this->blobs_[0]->count(), scale_factor, + this->blobs_[0]->cpu_data(), mean_buffer_); + caffe_cpu_scale(this->blobs_[1]->count(), scale_factor, + this->blobs_[1]->cpu_data(), variance_buffer_); + } + if (use_weight_bias_) { + Dtype* scaleShift_buffer_ = (Dtype *)(scaleshift_memory->get_data_handle()); + // Fill ScaleShift buffer + for (int i = 0; i < this->channels_; i++) { + scaleShift_buffer_[i] = this->blobs_[3]->cpu_data()[i]; + scaleShift_buffer_[channels_ + i] = 0; + if (bias_term_) { + scaleShift_buffer_[channels_ + i] = this->blobs_[4]->cpu_data()[i]; + } + } + } + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKLDNN_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + BatchNormFwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); + + if (this->phase_ == TRAIN && !use_global_stats_) { + // compute and save moving average + Dtype *mean_buffer_ = (Dtype *)(mean_memory->get_data_handle()); + Dtype *variance_buffer_ = (Dtype *)(variance_memory->get_data_handle()); + this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_; + this->blobs_[2]->mutable_cpu_data()[0] += 1; + caffe_cpu_axpby(this->channels_, Dtype(1), mean_buffer_, + moving_average_fraction_, this->blobs_[0]->mutable_cpu_data()); + int m = bottom[0]->count()/channels_; + Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1; + caffe_cpu_axpby(this->channels_, bias_correction_factor, + variance_buffer_, moving_average_fraction_, + this->blobs_[1]->mutable_cpu_data()); + } + +} + +template +void MKLDNNBatchNormLayer::InitBatchNormBwd( + const vector*>& top, const vector& propagate_down, + const vector*>& bottom) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + + int32_t n = this->num_; + int32_t w = this->width_; + int32_t h = this->height_; + int32_t c = this->channels_; + + unsigned flags = 0; + if (use_weight_bias_) flags |= use_scale_shift; + if (use_global_stats_) flags |= use_global_stats; + + bool top_diff_is_prv = (const_cast(top[0]->prv_diff()) != NULL); + bool inplace = (bottom[0] == top[0]); + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + + // ---- Initialize memory descriptors ------------- + shared_ptr top_diff_md, top_data_md; + shared_ptr usr_diff_mpd(NULL), prv_diff_mpd(NULL); + if (top_diff_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(top[0]); + top_diff_md.reset(new memory::desc(mem_descr->prv_memory_pd()->desc())); + usr_diff_mpd = mem_descr->usr_memory_pd(); + prv_diff_mpd = mem_descr->prv_memory_pd(); + } else { + top_diff_md.reset(new memory::desc({{n, c, h, w}}, mpcsn, memory::format::nchw)); //MKLDNN batch norm only support 4D memory descriptor! + usr_diff_mpd.reset(new memory::primitive_desc(*top_diff_md, cpu_engine)); + } + + // ---- Initialize bnrm primitive descriptor ------------- + batch_normalization_backward::desc BatchNormBwd_desc(prop_kind::backward, + *top_diff_md, output_memory->get_primitive_desc().desc(), eps_, + flags); + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + BatchNormBwd_pd.reset(new batch_normalization_backward::primitive_desc( + BatchNormBwd_desc, ep.getMKLDNNSubEngine(subEngineIndex), + *BatchNormFwd_pd)); + } + catch(...) { + continue; + } + break; + } + + CHECK(BatchNormBwd_pd); + + // --- init primitive and prv_memory descriptors ---------------------- + bwd_top_diff.reset(new MKLDNNDiff(usr_diff_mpd, prv_diff_mpd, top[0], this)); + bwd_top_diff->name = "bwd_top_diff_data @ " + this->layer_param_.name(); + bwd_top_diff_primitive = bwd_top_diff->create_input(false); + + bwd_bottom_diff.reset(new MKLDNNDiff(usr_diff_mpd, prv_diff_mpd, bottom[0], this)); + bwd_bottom_diff->name = "bwd_bottom_diff_data @ " + this->layer_param_.name(); + bwd_bottom_diff_memory = bwd_bottom_diff->create_output_memory(inplace); + + if (use_weight_bias_) { + bwd_scaleshift_diff_memory.reset(new memory( + BatchNormFwd_pd->weights_primitive_desc())); + BatchNormBwd.reset(new batch_normalization_backward(*BatchNormBwd_pd, + *input_primitive, *mean_memory, *variance_memory, + *bwd_top_diff_primitive, *scaleshift_memory, + *bwd_bottom_diff_memory, *bwd_scaleshift_diff_memory)); + } else { + BatchNormBwd.reset(new batch_normalization_backward(*BatchNormBwd_pd, + *input_primitive, *mean_memory, *variance_memory, + *bwd_top_diff_primitive, *bwd_bottom_diff_memory)); + } + + //bwd_top_diff->set_mkldnn_primitive(BatchNormBwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_top_diff_primitive_transfer(bwd_top_diff_primitive); + bwd_top_diff->set_mkldnn_primitive(bwd_top_diff_primitive_transfer); + + //bwd_bottom_diff->set_mkldnn_primitive(BatchNormBwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_bottom_diff_memory_transfer(bwd_bottom_diff_memory); + bwd_bottom_diff->set_mkldnn_primitive(bwd_bottom_diff_memory_transfer); +} + +template +void MKLDNNBatchNormLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) +{ + VLOG(1) << "MKLDNNBatchNormLayer::Backward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNBatchNormLayer::Backward_cpu: " << this->layer_param_.name(); +#endif + + if (BatchNormBwd_pd == NULL) + InitBatchNormBwd(top, propagate_down, bottom); + // making reorders if needed. + bwd_top_diff->sync_before_read(); + // update bottom that head at prv + bwd_bottom_diff->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKLDNN_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); +#ifdef DEBUG + if (bottom[0]->prv_data() != NULL) + { + LOG(INFO) << "Debug: Bottom prv data: " << *bottom[0]->prv_data(); + } + else + { + LOG(INFO) << "Debug: Bottom prv data is NULL!"; + } + + if (top[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Top prv diff: " << *top[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Top prv diff is NULL!"; + LOG(INFO) << "Debug: Top cpu diff: " << *top[0]->cpu_diff(); + } +#endif + BatchNormBwd.submit(); +#ifdef DEBUG + if (bottom[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Bottom prv diff: " << *bottom[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Bottom prv diff is NULL!"; + LOG(INFO) << "Debug: Bottom cpu diff: " << *bottom[0]->cpu_diff(); + } +#endif + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + + /* FIXME: this wouldn't work with lazy stream */ + if (use_weight_bias_) { + Dtype* dw = (Dtype *)(bwd_scaleshift_diff_memory->get_data_handle()); + for (int i = 0; i < this->channels_; i++) + this->blobs_[3]->mutable_cpu_diff()[i] = dw[i]; + + if (bias_term_) { + dw += channels_; + for (int i = 0; i < this->channels_; i++) + this->blobs_[4]->mutable_cpu_diff()[i] = dw[i]; + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNBatchNormLayer); +#else +template +void MKLDNNBatchNormLayer::Forward_gpu(const vector*>& bottom + ,const vector*>& top) +{ NOT_IMPLEMENTED; } + +template +void MKLDNNBatchNormLayer::Backward_gpu(const vector*>& top + ,const vector& propagate_down + ,const vector*>& bottom) +{ NOT_IMPLEMENTED; } +#endif + +INSTANTIATE_CLASS(MKLDNNBatchNormLayer); +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/layers/mkldnn_concat_layer.cpp b/src/caffe/layers/mkldnn_concat_layer.cpp new file mode 100644 index 00000000000..a0a1cd487cc --- /dev/null +++ b/src/caffe/layers/mkldnn_concat_layer.cpp @@ -0,0 +1,319 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifdef MKLDNN_SUPPORTED +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkldnn_layers.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void MKLDNNConcatLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + // VLOG(1) << "MKLDNNConcatLayer::LayerSetUp: " << this->layer_param_.name(); + + int dim_src = bottom[0]->shape().size(); + // int dim_dst = dim_src; + + num_concats_ = bottom.size(); + channels_ = 0; + + for (auto i = 1; i < num_concats_; ++i) { + CHECK_EQ(bottom[0]->num(), bottom[i]->num()); + CHECK_EQ(bottom[0]->height(), bottom[i]->height()); + CHECK_EQ(bottom[0]->width(), bottom[i]->width()); + } + + split_channels.reserve(num_concats_); + for (auto i = 0; i < num_concats_; ++i) { + CHECK_EQ(dim_src, bottom[i]->shape().size()); + + split_channels[i] = bottom[i]->channels(); + channels_ += split_channels[i]; + } +} + +template +void MKLDNNConcatLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + // VLOG(1) << "MKLDNNConcatLayer::Reshape: " << this->layer_param_.name(); + + num_ = bottom[0]->num(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + + top[0]->Reshape(num_, channels_, height_, width_); +} + +template +void MKLDNNConcatLayer::InitConcatFwd(const vector*>& bottom, + const vector*>& top) { + if (std::is_same::value) NOT_IMPLEMENTED; + + //Fix: MKLDNN concat layer should use 4D blob as input! Reshape the 2D input blob into 4D for calculation! + bool has_spatial = (bottom[0]->shape().size() != 2); +#ifdef DEBUG + LOG(INFO) << "has_spatial flag value: " << has_spatial; +#endif + if (has_spatial == false) + { +#ifdef DEBUG + LOG(INFO) << "size of bottom blob: " << bottom[0]->shape().size(); + LOG(INFO) << "size of top blob: " << top[0]->shape().size(); + LOG(INFO) << "MKLDNN concat layer only support 4D blob as input! Reshape the 2D input blob into 4D for calculation!"; +#endif + vector bottom_4D_shape; + int bottom_4D_height = 1; + int bottom_4D_width = 1; + bottom_4D_shape.push_back(bottom[0]->num()); + bottom_4D_shape.push_back(bottom[0]->channels()); + bottom_4D_shape.push_back(bottom_4D_height); + bottom_4D_shape.push_back(bottom_4D_width); + for (auto i = 0; i < num_concats_; i++) + { + bottom[i]->Reshape(bottom_4D_shape); + } + } + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type data_type = memory::data_type::f32; + // memory::format mfmt_any = memory::format::any; + memory::format mfmt_nchw = memory::format::nchw; + + memory::dims output_tz = {num_, channels_, height_, width_}; + + std::vector srcs_mpd; + std::vector srcs; + for (auto i = 0; i < num_concats_; i++) { + fwd_bottom_data.push_back(boost::shared_ptr >()); + memory::dims input_tz = {num_, split_channels[i], height_, width_}; + memory::format src_mfmt = mfmt_nchw; + shared_ptr prv_src_mpd; + shared_ptr usr_src_mpd( + new memory::primitive_desc({input_tz, data_type, mfmt_nchw}, cpu_engine)); + + if (const_cast(bottom[i]->prv_data()) != NULL) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[i]); + src_mfmt = static_cast( + mem_descr->prv_memory_pd()->desc().data.format); + prv_src_mpd.reset(new memory::primitive_desc( + {input_tz, data_type, src_mfmt}, cpu_engine)); + } + + srcs_mpd.push_back(memory::primitive_desc( + {input_tz, data_type, src_mfmt}, cpu_engine)); + + fwd_bottom_data[i].reset(new MKLDNNData( + usr_src_mpd, prv_src_mpd, bottom[i], this)); + + fwd_input_primitives_.push_back(fwd_bottom_data[i]->create_input(false)); + fwd_input_primitives_at_.push_back(*fwd_input_primitives_[i]); + } + + shared_ptr usr_dst_mpd(new memory::primitive_desc( + {output_tz, data_type, mfmt_nchw}, cpu_engine)); + + // FIXME: concat dimension + concat_dimension = 1; + concatFwd_pd.reset(new concat::primitive_desc(concat_dimension, srcs_mpd)); + + shared_ptr prv_dst_mpd(new memory::primitive_desc( + concatFwd_pd->dst_primitive_desc())); + + fwd_top_data.reset(new MKLDNNData(usr_dst_mpd, prv_dst_mpd, top[0], + this)); + fwd_output_memory = fwd_top_data->create_output_memory(); + + concatFwd.reset(new concat(*concatFwd_pd, fwd_input_primitives_at_, *fwd_output_memory)); + + for (auto i = 0; i < num_concats_; i++) { + //fwd_bottom_data[i]->set_mkldnn_primitive(concatFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_bottom_data_primitive_transfer(fwd_input_primitives_[i]); + fwd_bottom_data[i]->set_mkldnn_primitive(fwd_bottom_data_primitive_transfer); + } + //fwd_top_data->set_mkldnn_primitive(concatFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_top_data_memory_transfer(fwd_output_memory); + fwd_top_data->set_mkldnn_primitive(fwd_top_data_memory_transfer); +} + +template +void MKLDNNConcatLayer::InitConcatBwd(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (std::is_same::value) NOT_IMPLEMENTED; + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type data_type = memory::data_type::f32; + // memory::format mfmt_any = memory::format::any; + memory::format mfmt_nchw = memory::format::nchw; + memory::format diff_dst_mfmt = mfmt_nchw; + + memory::dims input_tz = {num_, channels_, height_, width_}; + memory::dims offsets = {0, 0, 0, 0}; + + // FIXME: concat dimension + concat_dimension = 1; + + shared_ptr prv_diff_dst_mpd; + shared_ptr usr_diff_dst_mpd( + new memory::primitive_desc({input_tz, data_type, mfmt_nchw}, + cpu_engine)); + + bool top_diff_is_prv = (const_cast(top[0]->prv_diff()) != NULL); + + if (top_diff_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(top[0]); + diff_dst_mfmt = static_cast( + mem_descr->prv_memory_pd()->desc().data.format); + prv_diff_dst_mpd.reset(new memory::primitive_desc( + {input_tz, data_type, diff_dst_mfmt}, cpu_engine)); + } + + bwd_top_diff.reset(new MKLDNNDiff( + usr_diff_dst_mpd, prv_diff_dst_mpd, top[0], this)); + + bwd_reorder_input_memory = bwd_top_diff->create_input(false); + + for (auto i = 0; i < num_concats_; i++) { + bwd_bottom_diff.push_back(boost::shared_ptr >()); + reorders.push_back(MKLDNNPrimitive()); + memory::dims dims = {num_, split_channels[i], height_, width_}; + shared_ptr usr_diff_src_mpd( + new memory::primitive_desc({dims, data_type, mfmt_nchw}, + cpu_engine)); + shared_ptr prv_diff_src_mpd( + new memory::primitive_desc({dims, data_type, diff_dst_mfmt}, + cpu_engine)); + bwd_bottom_diff[i].reset(new MKLDNNDiff( + usr_diff_src_mpd, prv_diff_src_mpd, bottom[i], this)); + + auto view_pd = top_diff_is_prv ? + view::primitive_desc(*prv_diff_dst_mpd, dims, offsets) : + view::primitive_desc(*usr_diff_dst_mpd, dims, offsets); + auto view_dst_pd = view_pd.dst_primitive_desc(); + auto reorder_pd = reorder::primitive_desc(view_dst_pd, *prv_diff_src_mpd); + + bwd_reorder_output_memory.push_back(bwd_bottom_diff[i]->create_output_memory()); + + reorders[i].reset(new reorder(reorder_pd, *bwd_reorder_input_memory, + *bwd_reorder_output_memory[i])); + + offsets[concat_dimension] += dims[concat_dimension]; + + //bwd_bottom_diff[i]->set_mkldnn_primitive(reorders[i]); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_bottom_diff_memory_transfer(bwd_reorder_output_memory[i]); + bwd_bottom_diff[i]->set_mkldnn_primitive(bwd_bottom_diff_memory_transfer); + } + + //bwd_top_diff->set_mkldnn_primitive(reorders[0]); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_top_diff_memory_transfer(bwd_reorder_input_memory); + bwd_top_diff->set_mkldnn_primitive(bwd_top_diff_memory_transfer); +} + +template +void MKLDNNConcatLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + VLOG(1) << "MKLDNNConcatLayer::Forward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNConcatLayer::Forward_cpu: " << this->layer_param_.name(); +#endif + + if (NULL == concatFwd_pd) + InitConcatFwd(bottom, top); + for (auto i = 0; i < num_concats_; i++) { + // making reorders if needed. + fwd_bottom_data[i]->sync_before_read(); + } + // update top that head at prv + fwd_top_data->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKLDNN_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + concatFwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); +} + +template +void MKLDNNConcatLayer::Backward_cpu(const vector*>& top + ,const vector& propagate_down + ,const vector*>& bottom) +{ + VLOG(1) << "MKLDNNConcatLayer::Backward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNConcatLayer::Backward_cpu: " << this->layer_param_.name(); +#endif + + if (reorders.size() == 0) + InitConcatBwd(top, propagate_down, bottom); + bwd_top_diff->sync_before_read(); + for (auto i = 0; i < num_concats_; ++i) { + bwd_bottom_diff[i]->sync_before_write(); + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKLDNN_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + reorders[i].submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNConcatLayer); +#else +template +void MKLDNNConcatLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + NOT_IMPLEMENTED; +} + +template +void MKLDNNConcatLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + NOT_IMPLEMENTED; +} +#endif + +INSTANTIATE_CLASS(MKLDNNConcatLayer); + +} // namespace caffe + +#endif diff --git a/src/caffe/layers/mkldnn_convolution_layer.cpp b/src/caffe/layers/mkldnn_convolution_layer.cpp new file mode 100644 index 00000000000..d65dbf3bfac --- /dev/null +++ b/src/caffe/layers/mkldnn_convolution_layer.cpp @@ -0,0 +1,600 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkldnn_layers.hpp" +//#include "mkl_service.h" + +// TODO: Correct process case if there are no bias +// TODO: Exception handling - mkl-dnn produces exceptions on errors + +namespace caffe { + +template +MKLDNNConvolutionLayer::MKLDNNConvolutionLayer(const LayerParameter& param) + : MKLDNNLayer(), ConvolutionLayer(param) + , fwd_bottom_data(NULL), fwd_top_data(NULL), fwd_weights_data(NULL), fwd_bias_data(NULL) + , bwdd_weights_data(NULL), bwdw_bottom_data(NULL) + , bwdd_bottom_diff(NULL), bwdd_top_diff(NULL) + , bwdw_top_diff(NULL), bwdw_weights_diff(NULL), bwdw_bias_diff(NULL) + , convFwd_pd(NULL), convBwdData_pd(NULL), convBwdWeights_pd(NULL) + , fwd_top_data_memory(NULL), bwdd_bottom_diff_memory(NULL) + , bwdw_weights_diff_memory(NULL), bwdw_bias_diff_memory(NULL) + , fwd_bottom_data_primitive(NULL), fwd_weights_data_primitive(NULL), fwd_bias_data_primitive(NULL) + , bwdd_top_diff_primitive(NULL), bwdd_weights_data_primitive(NULL) + , bwdw_top_diff_primitive(NULL), bwdw_bottom_data_primitive(NULL) + , width_(0), height_(0), width_out_(0), height_out_(0), kernel_w_(0), kernel_h_(0) + , stride_w_(0), stride_h_(0), pad_w_(0), pad_h_(0) +{ + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_weights_); +} + +template +void MKLDNNConvolutionLayer::compute_output_shape() +{ + ConvolutionLayer::compute_output_shape(); + this->height_out_ = (this->height_ + 2 * this->pad_h_ - this->kernel_h_) + / this->stride_h_ + 1; + this->width_out_ = (this->width_ + 2 * this->pad_w_ - this->kernel_w_) + / this->stride_w_ + 1; +} + +template +void MKLDNNConvolutionLayer::init_properties(const vector*>& bottom + , const vector*>& top) +{ + this->stride_w_ = this->stride_.cpu_data()[1]; + this->stride_h_ = this->stride_.cpu_data()[0]; + this->width_ = bottom[0]->width(); + this->height_ = bottom[0]->height(); + this->pad_w_ = this->pad_.cpu_data()[1]; + this->pad_h_ = this->pad_.cpu_data()[0]; + this->kernel_w_ = this->kernel_shape_.cpu_data()[1]; + this->kernel_h_ = this->kernel_shape_.cpu_data()[0]; + string _conv_algorithm = this->layer_param_.convolution_param().conv_algorithm(); + if(_conv_algorithm == "direct") + { + conv_algorithm = algorithm::convolution_direct; + } + else if(_conv_algorithm == "winograd") + { + conv_algorithm = algorithm::convolution_winograd; + } + else + { + LOG(ERROR) << "Unsupported convolution algorithm."; + CHECK(false); + } +} + +template +void MKLDNNConvolutionLayer::LayerSetUp(const vector*>& bottom + , const vector*>& top) +{ + VLOG(1) << "<< MKLDNNConvolutionLayer::LayerSetUp: " << this->layer_param_.name(); + ConvolutionLayer::LayerSetUp(bottom, top); + init_properties(bottom, top); + this->bottom_shape_ = &bottom[0]->shape(); +} + +template +void MKLDNNConvolutionLayer::Reshape(const vector*>& bottom + , const vector*>& top) +{ + VLOG(1) << " MKLDNNConvolutionLayer::Reshape: " << this->layer_param_.name(); + BaseConvolutionLayer::ReshapeForMKL(bottom, top); + init_properties(bottom, top); +} + +template +void MKLDNNConvolutionLayer::InitConvolutionFwd(const vector*>& bottom + , const vector*>& top) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + auto propagation = this->phase_ == TEST ? prop_kind::forward_scoring : prop_kind::forward_training; + bool relu = this->layer_param_.convolution_param().relu(); + Dtype negative_slope = 0; + if(relu) + { + propagation = prop_kind::forward_inference; + negative_slope = this->layer_param_.relu_param().negative_slope(); + } + + int32_t g = std::max(this->group_, 1); + int32_t n = this->num_; + int32_t iw = this->width_; + int32_t ih = this->height_; + int32_t ic = this->channels_; + + int32_t ow = this->width_out_; + int32_t oh = this->height_out_; + int32_t oc = this->num_output_; + + int32_t kw = this->kernel_w_; + int32_t kh = this->kernel_h_; + + memory::dims convolutionStrides {this->stride_h_, this->stride_w_}; + memory::dims padding {this->pad_h_, this->pad_w_}; + + // ---- Initialize memory descriptors (fromat = any) to create convolution descriptor ------------- + memory::data_type mpcsn = memory::data_type::f32; + memory::format mfmt_any = memory::format::any; + + memory::dims bottom_tz = {n, ic, ih, iw}; + memory::dims bias_tz = {oc}; + memory::dims top_tz = {n, oc, oh, ow}; + memory::dims weights_tz = (g!= 1) ? memory::dims{g, oc/g, ic/g, kh, kw} : memory::dims{oc, ic, kh, kw}; + + // ---- Memory descriptors for initializing of convolution primitive descriptor ------------- + memory::desc init_bottom_md({bottom_tz}, mpcsn, mfmt_any); + memory::desc init_bias_md({bias_tz}, mpcsn, mfmt_any); + memory::desc init_top_md({top_tz}, mpcsn, mfmt_any); + memory::desc init_weights_md({weights_tz}, mpcsn, mfmt_any); + + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + shared_ptr convReluFwd_pd; + mkldnn::algorithm eligibleAlgorithms[2] = {conv_algorithm, algorithm::convolution_direct}; + for (auto &convAlgorithm : eligibleAlgorithms) { + // ---- Initialize convolution primitive descriptor ------------- + shared_ptr convFwd_desc; + if (this->bias_term_) { + convFwd_desc.reset(new convolution_forward::desc(propagation, convAlgorithm + , init_bottom_md, init_weights_md, init_bias_md, init_top_md + , convolutionStrides, padding, padding, padding_kind::zero)); + } else { + convFwd_desc.reset(new convolution_forward::desc(propagation, convAlgorithm + , init_bottom_md, init_weights_md, init_top_md + , convolutionStrides, padding, padding, padding_kind::zero)); + } + shared_ptr convReluFwd_desc; + if(relu) convReluFwd_desc.reset(new convolution_relu_forward::desc(*convFwd_desc, negative_slope)); + + for(subEngineIndex=0; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + convFwd_pd.reset(new convolution_forward::primitive_desc(*convFwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex))); + if(relu) convReluFwd_pd.reset(new convolution_relu_forward::primitive_desc(*convReluFwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex))); + } + catch(...) { + continue; + } + break; + } + if ((convFwd_pd) && (!relu || convReluFwd_pd)) + break; + } + + CHECK(convFwd_pd); + if (relu) CHECK(convReluFwd_pd); + engine cpu_engine = CpuEngine::Instance().get_engine(); + + // ---- Create priv memory primitive descriptors stored as class members ------------- + typedef typename memory::primitive_desc MemPD; // short name for memory::primitive_desc + + shared_ptr prv_fwd_bottom_data_memory_pd(new MemPD(convFwd_pd->src_primitive_desc())); + shared_ptr prv_fwd_top_data_memory_pd(new MemPD(convFwd_pd->dst_primitive_desc())); + shared_ptr prv_fwd_weights_data_memory_pd(new MemPD(convFwd_pd->weights_primitive_desc())); + + // ---- Create usr memory primitive descriptors ------------- + memory::format mfmt_nchw = memory::format::nchw; + memory::format weights_mfmt = (g!= 1) ? memory::format::goihw : memory::format::oihw; + + // TODO: There should not be a problem to use this for Backward as well + shared_ptr usr_bottom_data_memory_pd(new MemPD({{bottom_tz}, mpcsn, mfmt_nchw}, cpu_engine)); + shared_ptr usr_bias_data_memory_pd(new MemPD({{bias_tz}, mpcsn, memory::format::x}, cpu_engine)); + shared_ptr usr_top_data_memory_pd(new MemPD({{top_tz}, mpcsn, mfmt_nchw}, cpu_engine)); + shared_ptr usr_weights_data_memory_pd(new MemPD({{weights_tz}, mpcsn, weights_mfmt}, cpu_engine)); + + + // --- init primitive and prv_memory descriptors ---------------------- + fwd_bottom_data.reset(new MKLDNNData(usr_bottom_data_memory_pd, prv_fwd_bottom_data_memory_pd, bottom[0], this)); + fwd_bottom_data ->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_bottom_data_primitive = fwd_bottom_data->create_input(false); + + fwd_top_data.reset(new MKLDNNData(usr_top_data_memory_pd, prv_fwd_top_data_memory_pd, top[0], this)); + fwd_top_data ->name = "fwd_top_data @ " + this->layer_param_.name(); + fwd_top_data_memory = fwd_top_data->create_output_memory(); + + fwd_weights_data.reset(new MKLDNNData(usr_weights_data_memory_pd, prv_fwd_weights_data_memory_pd, this->blobs_[0].get(), this)); + fwd_weights_data->name = "fwd_weights_data @ " + this->layer_param_.name(); + fwd_weights_data_primitive = fwd_weights_data->create_input(true); + + if (this->bias_term_) { + shared_ptr prv_fwd_bias_data_memory_pd(new MemPD(convFwd_pd->bias_primitive_desc())); + fwd_bias_data.reset(new MKLDNNData(usr_bias_data_memory_pd, prv_fwd_bias_data_memory_pd, this->blobs_[1].get(), this)); + fwd_bias_data->name = "fwd_bias_data @ " + this->layer_param_.name(); + fwd_bias_data_primitive = fwd_bias_data->create_input(true); + if(relu) { + convFwd.reset(new convolution_relu_forward(*convReluFwd_pd + , *fwd_bottom_data_primitive, *fwd_weights_data_primitive + , *fwd_bias_data_primitive, *fwd_top_data_memory)); + } else { + convFwd.reset(new convolution_forward(*convFwd_pd + , *fwd_bottom_data_primitive, *fwd_weights_data_primitive + , *fwd_bias_data_primitive, *fwd_top_data_memory)); + } + //fwd_bias_data->set_mkldnn_primitive(convFwd); //Wrong passed primitive! (For sure!) + MKLDNNPrimitive fwd_bias_data_primitive_transfer(fwd_bias_data_primitive); + fwd_bias_data->set_mkldnn_primitive(fwd_bias_data_primitive_transfer); + } else { + if(relu) { + convFwd.reset(new convolution_relu_forward(*convReluFwd_pd + , *fwd_bottom_data_primitive, *fwd_weights_data_primitive + , *fwd_top_data_memory)); + } else { + convFwd.reset(new convolution_forward(*convFwd_pd + , *fwd_bottom_data_primitive, *fwd_weights_data_primitive + , *fwd_top_data_memory)); + } + } + //fwd_bottom_data->set_mkldnn_primitive(convFwd); //Wrong passed primitive! (For sure!) + MKLDNNPrimitive fwd_bottom_data_primitive_transfer(fwd_bottom_data_primitive); + fwd_bottom_data->set_mkldnn_primitive(fwd_bottom_data_primitive_transfer); + + //fwd_top_data->set_mkldnn_primitive(convFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_top_data_memory_transfer(fwd_top_data_memory); + fwd_top_data->set_mkldnn_primitive(fwd_top_data_memory_transfer); + + //fwd_weights_data->set_mkldnn_primitive(convFwd); //Wrong passed primitive! (For sure!) + MKLDNNPrimitive fwd_weights_data_primitive_transfer(fwd_weights_data_primitive); + fwd_weights_data->set_mkldnn_primitive(fwd_weights_data_primitive_transfer); + + // Names are for debugging purposes only. +} + +template +void MKLDNNConvolutionLayer::Forward_cpu(const vector*>& bottom + , const vector*>& top) +{ + VLOG(1) << "MKLDNNConvolutionLayer::Forward_cpu: " << this->layer_param_.name(); + if( convFwd_pd == NULL) + InitConvolutionFwd(bottom, top); + // making reorders if needed. + fwd_bottom_data->sync_before_read(); + fwd_weights_data->sync_before_read(); + if (this->bias_term_) + fwd_bias_data->sync_before_read(); + // update top that head at prv + fwd_top_data->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKLDNN_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + convFwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); +} + + +template +void MKLDNNConvolutionLayer::InitConvolutionBwd(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + + int32_t g = std::max(this->group_, 1); + int32_t n = this->num_; + int32_t iw = this->width_; + int32_t ih = this->height_; + int32_t ic = this->channels_; + + int32_t ow = this->width_out_; + int32_t oh = this->height_out_; + int32_t oc = this->num_output_; + + int32_t kw = this->kernel_w_; + int32_t kh = this->kernel_h_; + + memory::dims convolutionStrides {this->stride_h_, this->stride_w_}; + memory::dims padding {this->pad_h_, this->pad_w_}; + + // ---- Initialize memory descriptors (fromat = any) to create convolution descriptor ------------- + memory::data_type mpcsn = memory::data_type::f32; + memory::format mfmt_any = memory::format::any; + + memory::dims bottom_tz = {n, ic, ih, iw}; + memory::dims bias_tz = {oc}; + memory::dims top_tz = {n, oc, oh, ow}; + memory::dims weights_tz = ( g!= 1) ? memory::dims{g, oc/g, ic/g, kh, kw} : memory::dims{oc, ic, kh, kw}; + + // ---- Memory descriptors for initializing of convolution primitive descriptor ------------- + memory::desc init_bottom_md({bottom_tz}, mpcsn, mfmt_any); + memory::desc init_bias_md({bias_tz}, mpcsn, mfmt_any); + memory::desc init_top_md({top_tz}, mpcsn, mfmt_any); + memory::desc init_weights_md({weights_tz}, mpcsn, mfmt_any); + + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + + auto eligibleAlgorithms = {conv_algorithm, algorithm::convolution_direct}; + for (auto &convAlgorithm : eligibleAlgorithms) { + // ---- Initialize convolution primitive descriptor ------------- + shared_ptr convBwdData_desc; + shared_ptr convBwdWeights_desc; + if (this->bias_term_) { + convBwdWeights_desc.reset(new convolution_backward_weights::desc(convAlgorithm + , init_bottom_md, init_weights_md, init_bias_md, init_top_md + , convolutionStrides, padding, padding, padding_kind::zero)); + } else { + convBwdWeights_desc.reset(new convolution_backward_weights::desc(convAlgorithm + , init_bottom_md, init_weights_md, init_top_md + , convolutionStrides, padding, padding, padding_kind::zero)); + } + + convBwdData_desc.reset(new convolution_backward_data::desc(convAlgorithm + , init_bottom_md, init_weights_md, init_top_md + , convolutionStrides, padding, padding, padding_kind::zero)); + + for(subEngineIndex=0; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + convBwdData_pd.reset(new convolution_backward_data::primitive_desc(*convBwdData_desc, + ep.getMKLDNNSubEngine(subEngineIndex), *convFwd_pd)); + + convBwdWeights_pd.reset(new convolution_backward_weights::primitive_desc(*convBwdWeights_desc, + ep.getMKLDNNSubEngine(subEngineIndex), *convFwd_pd)); + } + catch(...) { + continue; + } + break; + } + if (convBwdData_pd && convBwdWeights_pd) + break; + } + + CHECK(convBwdData_pd); + CHECK(convBwdWeights_pd); + engine cpu_engine = CpuEngine::Instance().get_engine(); + + // ---- Create priv memory primitive descriptors stored as class members ------------- + typedef typename memory::primitive_desc MemPD; // short name for memory::primitive_desc + + shared_ptr prv_bwdd_bottom_diff_memory_pd(new MemPD(convBwdData_pd->diff_src_primitive_desc())); + shared_ptr prv_bwdd_top_diff_memory_pd(new MemPD(convBwdData_pd->diff_dst_primitive_desc())); + shared_ptr prv_bwdd_weights_data_memory_pd(new MemPD(convBwdData_pd->weights_primitive_desc())); + + shared_ptr prv_bwdw_bottom_data_memory_pd(new MemPD(convBwdWeights_pd->src_primitive_desc())); + shared_ptr prv_bwdw_top_diff_memory_pd(new MemPD(convBwdWeights_pd->diff_dst_primitive_desc())); + shared_ptr prv_bwdw_weights_diff_memory_pd(new MemPD(convBwdWeights_pd->diff_weights_primitive_desc())); + + // ---- Create usr memory primitive descriptors ------------- + memory::format mfmt_nchw = memory::format::nchw; + memory::format weights_mfmt = ( g!= 1) ? memory::format::goihw : memory::format::oihw; + + // ???!!! can we use usr memory primitive descrittors for backward?? + shared_ptr usr_bottom_data_memory_pd(new MemPD({{bottom_tz}, mpcsn, mfmt_nchw}, cpu_engine)); + shared_ptr usr_bias_data_memory_pd(new MemPD({{bias_tz}, mpcsn, memory::format::x}, cpu_engine)); + shared_ptr usr_top_data_memory_pd(new MemPD({{top_tz}, mpcsn, mfmt_nchw}, cpu_engine)); + shared_ptr usr_weights_data_memory_pd(new MemPD({{weights_tz}, mpcsn, weights_mfmt}, cpu_engine)); + + + // --- init primitive and prv_memory descriptors ---------------------- + bwdd_bottom_diff.reset(new MKLDNNDiff(usr_bottom_data_memory_pd, prv_bwdd_bottom_diff_memory_pd, bottom[0], this)); + bwdd_bottom_diff ->name = "bwdd_bottom_diff @ " + this->layer_param_.name(); + bwdd_bottom_diff_memory = bwdd_bottom_diff->create_output_memory(); + bwdw_bottom_data.reset(new MKLDNNData(usr_bottom_data_memory_pd, prv_bwdw_bottom_data_memory_pd, bottom[0], this)); + bwdw_bottom_data ->name = "bwdw_bottom_data @ " + this->layer_param_.name(); + bwdw_bottom_data_primitive = bwdw_bottom_data->create_input(false); + + bwdd_top_diff.reset(new MKLDNNDiff(usr_top_data_memory_pd, prv_bwdd_top_diff_memory_pd, top[0], this)); + bwdd_top_diff ->name = "bwdd_top_diff @ " + this->layer_param_.name(); + bwdd_top_diff_primitive = bwdd_top_diff->create_input(false); + bwdw_top_diff.reset(new MKLDNNDiff(usr_top_data_memory_pd, prv_bwdw_top_diff_memory_pd, top[0], this)); + bwdw_top_diff ->name = "bwdw_top_diff @ " + this->layer_param_.name(); + bwdw_top_diff_primitive = bwdw_top_diff->create_input(false); + + bwdd_weights_data.reset(new MKLDNNData(usr_weights_data_memory_pd, prv_bwdd_weights_data_memory_pd, this->blobs_[0].get(), this)); + bwdd_weights_data->name = "bwdd_weights_data @ " + this->layer_param_.name(); + bwdd_weights_data_primitive = bwdd_weights_data->create_input(false); + bwdw_weights_diff.reset(new MKLDNNDiff(usr_weights_data_memory_pd, prv_bwdw_weights_diff_memory_pd, this->blobs_[0].get(), this)); + bwdw_weights_diff->name = "bwdw_weights_diff @ " + this->layer_param_.name(); + bwdw_weights_diff_memory = bwdw_weights_diff->create_output_memory(); + + if (this->bias_term_) { + shared_ptr prv_bwdw_bias_diff_memory_pd(new MemPD(convBwdWeights_pd->diff_bias_primitive_desc())); + bwdw_bias_diff.reset(new MKLDNNDiff(usr_bias_data_memory_pd, prv_bwdw_bias_diff_memory_pd, this->blobs_[1].get(), this)); + bwdw_bias_diff->name = "bwdw_bias_diff @ " + this->layer_param_.name(); + bwdw_bias_diff_memory = bwdw_bias_diff->create_output_memory(); + + convBwdWeights.reset(new convolution_backward_weights(*convBwdWeights_pd + , *bwdw_bottom_data_primitive, *bwdw_top_diff_primitive + , *bwdw_weights_diff_memory, *bwdw_bias_diff_memory)); + + //bwdw_bias_diff->set_mkldnn_primitive(convBwdWeights); //Wrong passed primitive! (For sure!) + MKLDNNPrimitive bwdw_bias_diff_memory_transfer(bwdw_bias_diff_memory); + bwdw_bias_diff->set_mkldnn_primitive(bwdw_bias_diff_memory_transfer); + } else { + convBwdWeights.reset(new convolution_backward_weights(*convBwdWeights_pd + , *bwdw_bottom_data_primitive, *bwdw_top_diff_primitive + , *bwdw_weights_diff_memory)); + } + + convBwdData.reset(new convolution_backward_data(*convBwdData_pd + , *bwdd_top_diff_primitive, *bwdd_weights_data_primitive + , *bwdd_bottom_diff_memory)); + + //bwdd_bottom_diff->set_mkldnn_primitive(convBwdData); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdd_bottom_diff_memory_transfer(bwdd_bottom_diff_memory); + bwdd_bottom_diff->set_mkldnn_primitive(bwdd_bottom_diff_memory_transfer); + + //bwdd_top_diff->set_mkldnn_primitive(convBwdData); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdd_top_diff_primitive_transfer(bwdd_top_diff_primitive); + bwdd_top_diff->set_mkldnn_primitive(bwdd_top_diff_primitive_transfer); + + //bwdd_weights_data->set_mkldnn_primitive(convBwdData); //Wrong passed primitive! (For sure!) + MKLDNNPrimitive bwdd_weights_data_primitive_transfer(bwdd_weights_data_primitive); + bwdd_weights_data->set_mkldnn_primitive(bwdd_weights_data_primitive_transfer); + + + //bwdw_bottom_data->set_mkldnn_primitive(convBwdWeights); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdw_bottom_data_primitive_transfer(bwdw_bottom_data_primitive); + bwdw_bottom_data->set_mkldnn_primitive(bwdw_bottom_data_primitive_transfer); + + //bwdw_top_diff->set_mkldnn_primitive(convBwdWeights); //Wrong passed primitive! (For sure!) + MKLDNNPrimitive bwdw_top_diff_primitive_transfer(bwdw_top_diff_primitive); + bwdw_top_diff->set_mkldnn_primitive(bwdw_top_diff_primitive_transfer); + + //bwdw_weights_diff->set_mkldnn_primitive(convBwdWeights); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdw_weights_diff_memory_transfer(bwdw_weights_diff_memory); + bwdw_weights_diff->set_mkldnn_primitive(bwdw_weights_diff_memory_transfer); + + // Names are for debugging purposes only. +} + + +template +void MKLDNNConvolutionLayer::Backward_cpu(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + VLOG(1) << "MKLDNNConvolutionLayer::Backward_cpu: " << this->layer_param_.name(); + if( convBwdData_pd == NULL) + InitConvolutionBwd(top, propagate_down, bottom); + if (propagate_down[0]) { + // making reorders if needed. + bwdd_top_diff->sync_before_read(); + bwdd_weights_data->sync_before_read(); + bwdd_bottom_diff->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKLDNN_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); +#ifdef DEBUG + if (bottom[0]->prv_data() != NULL) + { + LOG(INFO) << "Debug: Bottom prv data: " << *bottom[0]->prv_data(); + } + else + { + LOG(INFO) << "Debug: Bottom prv data is NULL!"; + //LOG(INFO) << "Debug: Bottom cpu data: " << *bottom[0]->cpu_data(); + } + + if (top[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Top prv diff: " << *top[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Top prv diff is NULL!"; + LOG(INFO) << "Debug: Top cpu diff: " << *top[0]->cpu_diff(); + } + + if (this->blobs_[0]->prv_data() != NULL) + { + LOG(INFO) << "Debug: Weights prv data from blobs_[0]: " << *this->blobs_[0]->prv_data(); + } + else + { + LOG(INFO) << "Debug: Weights prv data is NULL!"; + LOG(INFO) << "Debug: Weights cpu data: " << *this->blobs_[0]->cpu_data(); + } + //Before submit, so get_prv_ptr() always has the value + LOG(INFO) << "Debug: Weights prv data from get_prv_ptr: " << *bwdd_weights_data->get_prv_ptr(); +#endif + convBwdData.submit(); +#ifdef DEBUG + if (bottom[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Bottom prv diff: " << *bottom[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Bottom prv diff is NULL!"; + LOG(INFO) << "Debug: Bottom cpu diff: " << *bottom[0]->cpu_diff(); + } +#endif + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + } + if (this->param_propagate_down(0)) { + // making reorders if needed. + bwdw_top_diff->sync_before_read(); + bwdw_bottom_data->sync_before_read(); + // update top that head at prv + bwdw_weights_diff->sync_before_write(); + if (this->param_propagate_down(1)) { + CHECK(bwdw_bias_diff); + bwdw_bias_diff->sync_before_write(); + } + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_weights_, + PERFORMANCE_MKLDNN_NAME_DETAILED("BW", "_weights")); + PERFORMANCE_MEASUREMENT_BEGIN(); + convBwdWeights.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_weights_); + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNConvolutionLayer); +#else + +template +void MKLDNNConvolutionLayer::Forward_gpu(const vector*>& bottom + , const vector*>& top) +{ + NOT_IMPLEMENTED; +} + +template +void MKLDNNConvolutionLayer::Backward_gpu(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + NOT_IMPLEMENTED; +} +#endif + +INSTANTIATE_CLASS(MKLDNNConvolutionLayer); + +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/layers/mkldnn_eltwise_layer.cpp b/src/caffe/layers/mkldnn_eltwise_layer.cpp new file mode 100644 index 00000000000..060467e8244 --- /dev/null +++ b/src/caffe/layers/mkldnn_eltwise_layer.cpp @@ -0,0 +1,272 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkldnn_layers.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void MKLDNNEltwiseLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) +{ + VLOG(1) << "MKLDNNEltwiseLayer::LayerSetUp: " << this->layer_param_.name(); + + Layer::LayerSetUp(bottom, top); + + CHECK(this->layer_param().eltwise_param().coeff_size() == 0 + || this->layer_param().eltwise_param().coeff_size() == bottom.size()) << + "Eltwise Layer takes one coefficient per bottom blob."; + CHECK(!(this->layer_param().eltwise_param().operation() == EltwiseParameter_EltwiseOp_PROD + && this->layer_param().eltwise_param().coeff_size())) << + "Eltwise layer only takes coefficients for summation."; + op_ = this->layer_param_.eltwise_param().operation(); + // Blob-wise coefficients for the elementwise operation. + coeffs_ = vector(bottom.size(), 1); + if (this->layer_param().eltwise_param().coeff_size()) + { + for (int i = 0; i < bottom.size(); ++i) + { + coeffs_[i] = this->layer_param().eltwise_param().coeff(i); + } + } + num_bottoms_ = bottom.size(); + stable_prod_grad_ = this->layer_param_.eltwise_param().stable_prod_grad(); +} + +template +void MKLDNNEltwiseLayer::Reshape(const vector*>& bottom, const vector*>& top) +{ + VLOG(1) << "MKLDNNEltwiseLayer::Reshape: " << this->layer_param_.name(); + + this->width_ = bottom[0]->width(); + this->height_ = bottom[0]->height(); + this->num_ = bottom[0]->num(); + this->channels_ = bottom[0]->channels(); + + switch (op_) + { + case EltwiseParameter_EltwiseOp_PROD: + NOT_IMPLEMENTED; + break; + case EltwiseParameter_EltwiseOp_SUM: + { + for (int i = 1; i < num_bottoms_; ++i) + { + CHECK(bottom[i]->shape() == bottom[0]->shape()); + } + top[0]->ReshapeLike(*bottom[0]); + } + break; + case EltwiseParameter_EltwiseOp_MAX: + NOT_IMPLEMENTED; + /* + { + for (int i = 1; i < num_bottoms_; ++i) + { + CHECK(bottom[i]->shape() == bottom[0]->shape()); + } + top[0]->ReshapeLike(*bottom[0]); + // If max operation, we will initialize the vector index part. + if (this->layer_param_.eltwise_param().operation() == EltwiseParameter_EltwiseOp_MAX && top.size() == 1) + { + max_idx_.Reshape(bottom[0]->shape()); + } + } + */ + break; + default: + LOG(FATAL) << "Unknown elementwise operation."; + } +} + +template +void MKLDNNEltwiseLayer::InitEltwiseFwd(const vector*>& bottom, const vector*>& top) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + + int32_t n = this->num_; + int32_t iw = this->width_; + int32_t ih = this->height_; + int32_t ic = this->channels_; + + // If we just do simple adding, scale is 1.0 for all inputs we have + std::vector scale(num_bottoms_, 1.0); + //Eltwise layer is supporting multiplication coefficient and this scale value can be used for that. + for (int i = 0; i < num_bottoms_; ++i) + { + scale[i] = coeffs_[i]; + } + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + memory::format mfmt_nchw = memory::format::nchw; + + // ---- Initialize memory descriptors ------------- + shared_ptr bottom_data_md, top_data_md; + + std::vector bottom_data_mpd; + for (auto i = 0; i < num_bottoms_; i++) + { + fwd_bottom_data.push_back(boost::shared_ptr >()); + memory::format bottom_data_mfmt = mfmt_nchw; + shared_ptr prv_bottom_data_mpd; + shared_ptr usr_bottom_data_mpd( + new memory::primitive_desc({{n, ic, ih, iw}, mpcsn, bottom_data_mfmt}, cpu_engine)); + + bool bottom_data_is_prv = (const_cast(bottom[i]->prv_data()) != NULL); + if (bottom_data_is_prv) + { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[i]); + bottom_data_mfmt = static_cast( + mem_descr->prv_memory_pd()->desc().data.format); + bottom_data_md.reset(new memory::desc(mem_descr->prv_memory_pd()->desc())); + prv_bottom_data_mpd.reset(new memory::primitive_desc( + {{n, ic, ih, iw}, mpcsn, bottom_data_mfmt}, cpu_engine)); + } + else + { + bottom_data_md.reset(new memory::desc({{n, ic, ih, iw}}, mpcsn, bottom_data_mfmt)); + } + top_data_md = bottom_data_md; + + bottom_data_mpd.push_back(memory::primitive_desc( + {{n, ic, ih, iw}, mpcsn, bottom_data_mfmt}, cpu_engine)); + + fwd_bottom_data[i].reset(new MKLDNNData( + usr_bottom_data_mpd, prv_bottom_data_mpd, bottom[i], this)); + fwd_bottom_data[i]->name = "fwd_bottom_data[i] @ " + this->layer_param_.name(); + fwd_bottom_data_primitives_.push_back(fwd_bottom_data[i]->create_input(false)); + fwd_bottom_data_primitives_at_.push_back(*fwd_bottom_data_primitives_[i]); + } + + shared_ptr usr_top_data_mpd(new memory::primitive_desc( + {{n, ic, ih, iw}, mpcsn, mfmt_nchw}, cpu_engine)); + + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + eltwiseFwd_pd.reset(new sum::primitive_desc({{n, ic, ih, iw}, mpcsn, memory::format::any}, scale, bottom_data_mpd)); + CHECK(eltwiseFwd_pd); + + shared_ptr prv_top_data_mpd(new memory::primitive_desc(eltwiseFwd_pd->dst_primitive_desc())); + + fwd_top_data.reset(new MKLDNNData(usr_top_data_mpd, prv_top_data_mpd, top[0], this)); + fwd_top_data->name = "fwd_top_data @ " + this->layer_param_.name(); + fwd_top_data_memory = fwd_top_data->create_output_memory(); + + eltwiseFwd.reset(new sum(*eltwiseFwd_pd, fwd_bottom_data_primitives_at_, *fwd_top_data_memory)); + + for (auto i = 0; i < num_bottoms_; i++) + { + //fwd_bottom_data[i]->set_mkldnn_primitive(eltwiseFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_bottom_data_primitive_transfer(fwd_bottom_data_primitives_[i]); + fwd_bottom_data[i]->set_mkldnn_primitive(fwd_bottom_data_primitive_transfer); + } + //fwd_top_data->set_mkldnn_primitive(eltwiseFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_top_data_memory_transfer(fwd_top_data_memory); + fwd_top_data->set_mkldnn_primitive(fwd_top_data_memory_transfer); +} + + +template +void MKLDNNEltwiseLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) +{ + VLOG(1) << "MKLDNNEltwiseLayer::Forward_cpu: " << this->layer_param_.name(); + + if(eltwiseFwd_pd == NULL) + InitEltwiseFwd(bottom, top); + for (auto i = 0; i < num_bottoms_; i++) + { + // making reorders if needed. + fwd_bottom_data[i]->sync_before_read(); + } + // update top that head at prv + fwd_top_data->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKLDNN_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + eltwiseFwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); +} + +template +void MKLDNNEltwiseLayer::Backward_cpu(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + VLOG(1) << "MKLDNNEltwiseLayer::Backward_cpu: " << this->layer_param_.name(); + + for (int i = 0; i < num_bottoms_; ++i) + { + //Eltwise layer is not supporting multiplication coefficient in Backward due to lack of supporting scale and copy primitives in MKL-DNN + CHECK_EQ(coeffs_[i], Dtype(1)) << "Not supported yet"; + + bottom[i]->ShareDiff(*top[0]); + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNEltwiseLayer); +#else + +template +void MKLDNNEltwiseLayer::Forward_gpu(const vector*>& bottom, const vector*>& top) +{ + NOT_IMPLEMENTED; +} + +template +void MKLDNNEltwiseLayer::Backward_gpu(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + NOT_IMPLEMENTED; +} +#endif + +INSTANTIATE_CLASS(MKLDNNEltwiseLayer); +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/layers/mkldnn_inner_product_layer.cpp b/src/caffe/layers/mkldnn_inner_product_layer.cpp new file mode 100644 index 00000000000..1c92669c16a --- /dev/null +++ b/src/caffe/layers/mkldnn_inner_product_layer.cpp @@ -0,0 +1,568 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include +#include + +#include "caffe/filler.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkldnn_layers.hpp" + +#if 0 +#include "mkldnn_types.h" + +using namespace mkldnn; +#endif + +// TODO: Add transposed weights support + +namespace caffe { +template +MKLDNNInnerProductLayer::MKLDNNInnerProductLayer( + const LayerParameter& param) : + MKLDNNLayer(), + InnerProductLayer(param), + fwd_bottom_data(NULL), + fwd_top_data(NULL), + fwd_weights_data(NULL), + fwd_bias_data(NULL), + bwdd_weights_data(NULL), + bwdw_bottom_data(NULL), + bwdd_bottom_diff(NULL), + bwdd_top_diff(NULL), + bwdw_top_diff(NULL), + bwdw_weights_diff(NULL), + bwdw_bias_diff(NULL), + ipFwd_pd(NULL), + ipBwdData_pd(NULL), + ipBwdWeights_pd(NULL), + fwd_top_data_memory(NULL), + bwdd_bottom_diff_memory(NULL), + bwdw_weights_diff_memory(NULL), + bwdw_bias_diff_memory(NULL), + fwd_bottom_data_primitive(NULL), + fwd_weights_data_primitive(NULL), + fwd_bias_data_primitive(NULL), + bwdd_top_diff_primitive(NULL), + bwdd_weights_data_primitive(NULL), + bwdw_top_diff_primitive(NULL), + bwdw_bottom_data_primitive(NULL), + w_(0), + h_(0) +{ + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_weights_); +} + +template +MKLDNNInnerProductLayer::~MKLDNNInnerProductLayer() +{ +} + +template +void MKLDNNInnerProductLayer::LayerSetUp(const vector*>& bottom + , const vector*>& top) +{ + VLOG(1) << "MKLDNNInnerProductLayer::LayerSetUp: " << this->layer_param_.name(); + InnerProductLayer::LayerSetUp(bottom, top); +} + +template +void MKLDNNInnerProductLayer::Reshape(const vector*>& bottom + , const vector*>& top) +{ + VLOG(1) << "MKLDNNInnerProductLayer::Reshape: " << this->layer_param_.name(); + InnerProductLayer::Reshape(bottom, top); + + this->w_ = bottom[0]->width(); + this->h_ = bottom[0]->height(); +} + +template +void MKLDNNInnerProductLayer::InitInnerProductFwd(const vector*>& bottom + , const vector*>& top) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + auto propagation = this->phase_ == TEST ? prop_kind::forward_scoring : prop_kind::forward_training; + + int32_t n = this->M_; + int32_t w = this->w_; + int32_t h = this->h_; + int32_t oc = this->N_; + int32_t ic = this->K_/h_/w_; + bool has_spatial = (bottom[0]->shape().size() != 2); + + // Initialize memory descriptors (fromat = any) to create inner_product descriptor + memory::data_type mpcsn = memory::data_type::f32; + memory::format mfmt = memory::format::any; + + memory::dims bottom_tz = (has_spatial) ? memory::dims{n, ic, h, w} : memory::dims{n, ic}; + memory::dims top_tz = {n, oc}; + memory::dims weights_tz = (has_spatial) ? memory::dims {oc, ic, h, w} : memory::dims{oc, ic}; + memory::dims bias_tz = {oc}; + +#ifdef DEBUG + LOG(INFO) << "has_spatial flag value: " << has_spatial; + if (has_spatial) + { + LOG(INFO) << "Dimension of bottom for MKLDNN: " << n << " " << ic << " " << h << " " << w; + LOG(INFO) << "Dimension of weights for MKLDNN: " << oc << " " << ic << " " << h << " " << w; + } + else + { + LOG(INFO) << "Dimension of bottom for MKLDNN: " << n << " " << ic; + LOG(INFO) << "Dimension of weights for MKLDNN: " << oc << " " << ic; + } +#endif + + memory::desc init_bottom_md({bottom_tz}, mpcsn, mfmt); + memory::desc init_top_md({top_tz}, mpcsn, mfmt); + memory::desc init_weights_md({weights_tz}, mpcsn, mfmt); + memory::desc init_bias_md({bias_tz}, mpcsn, mfmt); + + // Initialize inner_product primitive descriptor + shared_ptr ipFwd_desc; + + if (this->bias_term_) { + ipFwd_desc.reset(new inner_product_forward::desc(propagation, init_bottom_md, init_weights_md + ,init_bias_md, init_top_md)); + } else { + ipFwd_desc.reset(new inner_product_forward::desc(propagation, init_bottom_md, init_weights_md + , init_top_md)); + } + + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + ipFwd_pd.reset(new inner_product_forward::primitive_desc(*ipFwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex))); + } + catch(...) { + continue; + } + break; + } + + CHECK(ipFwd_pd); + + // Create priv memory primitive descriptors stored as class members + typedef typename memory::primitive_desc MemPD; // short name for memory::primitive_desc + + shared_ptr prv_fwd_bottom_data_memory_pd(new MemPD(ipFwd_pd->src_primitive_desc())); + shared_ptr prv_fwd_top_data_memory_pd(new MemPD(ipFwd_pd->dst_primitive_desc())); + shared_ptr prv_fwd_weights_data_memory_pd(new MemPD(ipFwd_pd->weights_primitive_desc())); + + // Create usr memory primitive descriptors stored as class members + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::format input_mfmt = has_spatial ? memory::format::nchw : memory::format::nc; + shared_ptr usr_bottom_data_memory_pd(new MemPD({{bottom_tz}, mpcsn, input_mfmt}, cpu_engine)); + shared_ptr usr_bias_data_memory_pd(new MemPD({{bias_tz}, mpcsn, memory::format::x}, cpu_engine)); + shared_ptr usr_top_data_memory_pd(new MemPD({{top_tz}, mpcsn, memory::format::nc}, cpu_engine)); + memory::format weights_mfmt = has_spatial ? memory::format::oihw : memory::format::oi; + shared_ptr usr_weights_data_memory_pd(new MemPD({{weights_tz}, mpcsn, weights_mfmt}, cpu_engine)); +#ifdef DEBUG + LOG(INFO) << "Memory format of usr_bottom_data_memory_pd: " << input_mfmt; + LOG(INFO) << "Memory format of usr_weights_data_memory_pd: " << weights_mfmt; +#endif + + // --- init primitive and prv_memory descriptors ---------------------- + fwd_bottom_data.reset(new MKLDNNData(usr_bottom_data_memory_pd, prv_fwd_bottom_data_memory_pd, bottom[0], this)); + fwd_bottom_data ->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_bottom_data_primitive = fwd_bottom_data->create_input(false); + + fwd_top_data.reset(new MKLDNNData(usr_top_data_memory_pd, prv_fwd_top_data_memory_pd, top[0], this)); + fwd_top_data ->name = "fwd_top_data @ " + this->layer_param_.name(); + fwd_top_data_memory = fwd_top_data->create_output_memory(); + + fwd_weights_data.reset(new MKLDNNData(usr_weights_data_memory_pd, prv_fwd_weights_data_memory_pd, this->blobs_[0].get(), this)); + fwd_weights_data->name = "fwd_weights_data @ " + this->layer_param_.name(); + fwd_weights_data_primitive = fwd_weights_data->create_input(true); + + if (this->bias_term_) { + shared_ptr prv_fwd_bias_data_memory_pd(new MemPD(ipFwd_pd->bias_primitive_desc())); + fwd_bias_data.reset(new MKLDNNData(usr_bias_data_memory_pd, prv_fwd_bias_data_memory_pd, this->blobs_[1].get(), this)); + fwd_bias_data ->name = "fwd_bias_data @ " + this->layer_param_.name(); + fwd_bias_data_primitive = fwd_bias_data->create_input(true); + ipFwd.reset(new inner_product_forward(*ipFwd_pd + , *fwd_bottom_data_primitive, *fwd_weights_data_primitive + , *fwd_bias_data_primitive, *fwd_top_data_memory)); + } else { + ipFwd.reset(new inner_product_forward(*ipFwd_pd + , *fwd_bottom_data_primitive, *fwd_weights_data_primitive + , *fwd_top_data_memory)); + } + + //Because the inputs of inner product layer always come from user memory, so will not trigger the wrong reorder from extprv to prv + //fwd_bottom_data->set_mkldnn_primitive(ipFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_bottom_data_primitive_transfer(fwd_bottom_data_primitive); + fwd_bottom_data->set_mkldnn_primitive(fwd_bottom_data_primitive_transfer); + + //fwd_top_data->set_mkldnn_primitive(ipFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_top_data_memory_transfer(fwd_top_data_memory); + fwd_top_data->set_mkldnn_primitive(fwd_top_data_memory_transfer); + + //fwd_weights_data->set_mkldnn_primitive(ipFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_weights_data_primitive_transfer(fwd_weights_data_primitive); + fwd_weights_data->set_mkldnn_primitive(fwd_weights_data_primitive_transfer); + + if (this->bias_term_) + { + //fwd_bias_data->set_mkldnn_primitive(ipFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_bias_data_primitive_transfer(fwd_bias_data_primitive); + fwd_bias_data->set_mkldnn_primitive(fwd_bias_data_primitive_transfer); + } +} + +template +void MKLDNNInnerProductLayer::Forward_cpu(const vector*>& bottom + , const vector*>& top) +{ + VLOG(1) << "MKLDNNInnerProductLayer::Forward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNInnerProductLayer::Forward_cpu: " << this->layer_param_.name(); +#endif + + if( ipFwd_pd == NULL) + InitInnerProductFwd(bottom, top); + // making reorders if needed. + fwd_bottom_data->sync_before_read(); + fwd_weights_data->sync_before_read(); + if (this->bias_term_) + fwd_bias_data->sync_before_read(); + // update top that head at prv + fwd_top_data->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKLDNN_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + ipFwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); +} + +template +void MKLDNNInnerProductLayer::InitInnerProductBwd(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + + int32_t n = this->M_; + int32_t w = this->w_; + int32_t h = this->h_; + int32_t oc = this->N_; + int32_t ic = this->K_/h_/w_; + bool has_spatial = (bottom[0]->shape().size() != 2); + + // Initialize memory descriptors (format = any) to create inner_product descriptor + memory::data_type mpcsn = memory::data_type::f32; + memory::format mfmt = memory::format::any; + + memory::dims bottom_tz = (has_spatial) ? memory::dims{n, ic, h, w} : memory::dims{n, ic}; + memory::dims top_tz = {n, oc}; + memory::dims weights_tz = (has_spatial) ? memory::dims {oc, ic, h, w} : memory::dims{oc, ic}; + memory::dims bias_tz = {oc}; + +#ifdef DEBUG + LOG(INFO) << "has_spatial flag value: " << has_spatial; + if (has_spatial) + { + LOG(INFO) << "Dimension of bottom for MKLDNN: " << n << " " << ic << " " << h << " " << w; + LOG(INFO) << "Dimension of weights for MKLDNN: " << oc << " " << ic << " " << h << " " << w; + } + else + { + LOG(INFO) << "Dimension of bottom for MKLDNN: " << n << " " << ic; + LOG(INFO) << "Dimension of weights for MKLDNN: " << oc << " " << ic; + } +#endif + + memory::desc init_bottom_md({bottom_tz}, mpcsn, mfmt); + memory::desc init_top_md({top_tz}, mpcsn, mfmt); + memory::desc init_weights_md({weights_tz}, mpcsn, mfmt); + memory::desc init_bias_md({bias_tz}, mpcsn, mfmt); + + // Initialize inner_product primitive descriptor + shared_ptr ipBwdData_desc; + shared_ptr ipBwdWeights_desc; + if (this->bias_term_) + ipBwdWeights_desc.reset(new inner_product_backward_weights::desc(init_bottom_md, init_weights_md + , init_bias_md, init_top_md)); + else + ipBwdWeights_desc.reset(new inner_product_backward_weights::desc(init_bottom_md, init_weights_md + , init_top_md)); + + ipBwdData_desc.reset(new inner_product_backward_data::desc(init_bottom_md, init_weights_md, init_top_md)); + + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + ipBwdData_pd.reset(new inner_product_backward_data::primitive_desc(*ipBwdData_desc, + ep.getMKLDNNSubEngine(subEngineIndex), *ipFwd_pd)); + + ipBwdWeights_pd.reset(new inner_product_backward_weights::primitive_desc(*ipBwdWeights_desc, + ep.getMKLDNNSubEngine(subEngineIndex), *ipFwd_pd)); + } + catch(...) { + continue; + } + break; + } + + CHECK(ipBwdData_pd); + CHECK(ipBwdWeights_pd); + + // Create priv memory primitive descriptors stored as class members + typedef typename memory::primitive_desc MemPD; // short name for memory::primitive_desc + + shared_ptr prv_bwdd_bottom_diff_memory_pd(new MemPD(ipBwdData_pd->diff_src_primitive_desc())); + shared_ptr prv_bwdd_top_diff_memory_pd(new MemPD(ipBwdData_pd->diff_dst_primitive_desc())); + shared_ptr prv_bwdd_weights_data_memory_pd(new MemPD(ipBwdData_pd->weights_primitive_desc())); + + shared_ptr prv_bwdw_bottom_data_memory_pd(new MemPD(ipBwdWeights_pd->src_primitive_desc())); + shared_ptr prv_bwdw_top_diff_memory_pd(new MemPD(ipBwdWeights_pd->diff_dst_primitive_desc())); + shared_ptr prv_bwdw_weights_diff_memory_pd(new MemPD(ipBwdWeights_pd->diff_weights_primitive_desc())); + + // Create usr memory primitive descriptors stored as class members + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::format input_mfmt = has_spatial ? memory::format::nchw : memory::format::nc; + shared_ptr usr_bottom_data_memory_pd(new MemPD({{bottom_tz}, mpcsn, input_mfmt}, cpu_engine)); + shared_ptr usr_bias_data_memory_pd(new MemPD({{bias_tz}, mpcsn, memory::format::x}, cpu_engine)); + shared_ptr usr_top_data_memory_pd(new MemPD({{top_tz}, mpcsn, memory::format::nc}, cpu_engine)); + memory::format weights_mfmt = has_spatial ? memory::format::oihw : memory::format::oi; + shared_ptr usr_weights_data_memory_pd(new MemPD({{weights_tz}, mpcsn, weights_mfmt}, cpu_engine)); +#ifdef DEBUG + LOG(INFO) << "Memory format of usr_bottom_data_memory_pd: " << input_mfmt; + LOG(INFO) << "Memory format of usr_weights_data_memory_pd: " << weights_mfmt; +#endif + + // --- init primitive and prv_memory descriptors ---------------------- + bwdd_bottom_diff.reset(new MKLDNNDiff(usr_bottom_data_memory_pd, prv_bwdd_bottom_diff_memory_pd, bottom[0], this)); + bwdd_bottom_diff ->name = "bwdd_bottom_diff @ " + this->layer_param_.name(); + bwdd_bottom_diff_memory = bwdd_bottom_diff->create_output_memory(); + bwdw_bottom_data.reset(new MKLDNNData(usr_bottom_data_memory_pd, prv_bwdw_bottom_data_memory_pd, bottom[0], this)); + bwdw_bottom_data ->name = "bwdw_bottom_data @ " + this->layer_param_.name(); + bwdw_bottom_data_primitive = bwdw_bottom_data->create_input(false); + + bwdd_top_diff.reset(new MKLDNNDiff(usr_top_data_memory_pd, prv_bwdd_top_diff_memory_pd, top[0], this)); + bwdd_top_diff ->name = "bwdd_top_diff @ " + this->layer_param_.name(); + bwdd_top_diff_primitive = bwdd_top_diff->create_input(false); + bwdw_top_diff.reset(new MKLDNNDiff(usr_top_data_memory_pd, prv_bwdw_top_diff_memory_pd, top[0], this)); + bwdw_top_diff ->name = "bwdw_top_diff @ " + this->layer_param_.name(); + bwdw_top_diff_primitive = bwdw_top_diff->create_input(false); + + bwdd_weights_data.reset(new MKLDNNData(usr_weights_data_memory_pd, prv_bwdd_weights_data_memory_pd, this->blobs_[0].get(), this)); + bwdd_weights_data->name = "bwdd_weights_data @ " + this->layer_param_.name(); + bwdd_weights_data_primitive = bwdd_weights_data->create_input(false); + bwdw_weights_diff.reset(new MKLDNNDiff(usr_weights_data_memory_pd, prv_bwdw_weights_diff_memory_pd, this->blobs_[0].get(), this)); + bwdw_weights_diff->name = "bwdw_weights_diff @ " + this->layer_param_.name(); + bwdw_weights_diff_memory = bwdw_weights_diff->create_output_memory(); + + if (this->bias_term_) { + shared_ptr prv_bwdw_bias_diff_memory_pd(new MemPD(ipBwdWeights_pd->diff_bias_primitive_desc())); + bwdw_bias_diff.reset(new MKLDNNDiff(usr_bias_data_memory_pd, prv_bwdw_bias_diff_memory_pd, this->blobs_[1].get(), this)); + bwdw_bias_diff ->name = "bwdw_bias_diff @ " + this->layer_param_.name(); + bwdw_bias_diff_memory = bwdw_bias_diff->create_output_memory(); + + ipBwdWeights.reset(new inner_product_backward_weights(*ipBwdWeights_pd + , *bwdw_bottom_data_primitive, *bwdw_top_diff_primitive + , *bwdw_weights_diff_memory, *bwdw_bias_diff_memory)); + } else { + ipBwdWeights.reset(new inner_product_backward_weights(*ipBwdWeights_pd + , *bwdw_bottom_data_primitive, *bwdw_top_diff_primitive + , *bwdw_weights_diff_memory)); + } + + ipBwdData.reset(new inner_product_backward_data(*ipBwdData_pd + , *bwdd_top_diff_primitive, *bwdd_weights_data_primitive + , *bwdd_bottom_diff_memory)); + + //bwdd_bottom_diff->set_mkldnn_primitive(ipBwdData); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdd_bottom_diff_memory_transfer(bwdd_bottom_diff_memory); + bwdd_bottom_diff->set_mkldnn_primitive(bwdd_bottom_diff_memory_transfer); + + //bwdd_top_diff->set_mkldnn_primitive(ipBwdData); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdd_top_diff_primitive_transfer(bwdd_top_diff_primitive); + bwdd_top_diff->set_mkldnn_primitive(bwdd_top_diff_primitive_transfer); + + //bwdd_weights_data->set_mkldnn_primitive(ipBwdData); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdd_weights_data_primitive_transfer(bwdd_weights_data_primitive); + bwdd_weights_data->set_mkldnn_primitive(bwdd_weights_data_primitive_transfer); + + + //bwdw_bottom_data->set_mkldnn_primitive(ipBwdWeights); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdw_bottom_data_primitive_transfer(bwdw_bottom_data_primitive); + bwdw_bottom_data->set_mkldnn_primitive(bwdw_bottom_data_primitive_transfer); + + //bwdw_top_diff->set_mkldnn_primitive(ipBwdWeights); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdw_top_diff_primitive_transfer(bwdw_top_diff_primitive); + bwdw_top_diff->set_mkldnn_primitive(bwdw_top_diff_primitive_transfer); + + //bwdw_weights_diff->set_mkldnn_primitive(ipBwdWeights); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdw_weights_diff_memory_transfer(bwdw_weights_diff_memory); + bwdw_weights_diff->set_mkldnn_primitive(bwdw_weights_diff_memory_transfer); + + if (this->bias_term_) + { + //bwdw_bias_diff->set_mkldnn_primitive(ipBwdWeights); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwdw_bias_diff_memory_transfer(bwdw_bias_diff_memory); + bwdw_bias_diff->set_mkldnn_primitive(bwdw_bias_diff_memory_transfer); + } +} + + + +template +void MKLDNNInnerProductLayer::Backward_cpu(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + VLOG(1) << "MKLDNNInnerProductLayer::Backward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNInnerProductLayer::Backward_cpu: " << this->layer_param_.name(); +#endif + + if( ipBwdData_pd == NULL) + InitInnerProductBwd(top, propagate_down, bottom); + if (propagate_down[0]) { + // making reorders if needed. + bwdd_top_diff->sync_before_read(); + bwdd_weights_data->sync_before_read(); + bwdd_bottom_diff->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKLDNN_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); +#ifdef DEBUG + if (bottom[0]->prv_data() != NULL) + { + LOG(INFO) << "Debug: Bottom prv data: " << *bottom[0]->prv_data(); + } + else + { + LOG(INFO) << "Debug: Bottom prv data is NULL!"; + //LOG(INFO) << "Debug: Bottom cpu data: " << *bottom[0]->cpu_data(); + //Chong: if don't have this LOG print, will cause: this->_cpu_ptr == cpu_ptr crash, without the fix in dropout_layer.cpp + } + + if (top[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Top prv diff: " << *top[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Top prv diff is NULL!"; + LOG(INFO) << "Debug: Top cpu diff: " << *top[0]->cpu_diff(); + } + + if (this->blobs_[0]->prv_data() != NULL) + { + LOG(INFO) << "Debug: Weights prv data from blobs_[0]: " << *this->blobs_[0]->prv_data(); + } + else + { + LOG(INFO) << "Debug: Weights prv data is NULL!"; + LOG(INFO) << "Debug: Weights cpu data: " << *this->blobs_[0]->cpu_data(); + } + //Before submit, so get_prv_ptr() always has the value + LOG(INFO) << "Debug: Weights prv data from get_prv_ptr: " << *bwdd_weights_data->get_prv_ptr(); +#endif + ipBwdData.submit(); +#ifdef DEBUG + if (bottom[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Bottom prv diff: " << *bottom[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Bottom prv diff is NULL!"; + LOG(INFO) << "Debug: Bottom cpu diff: " << *bottom[0]->cpu_diff(); + } +#endif + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); + } + if (this->param_propagate_down(0)) { + // making reorders if needed. + bwdw_top_diff->sync_before_read(); + bwdw_bottom_data->sync_before_read(); + // update top that head at prv + bwdw_weights_diff->sync_before_write(); + if (this->param_propagate_down(1)) { + CHECK(bwdw_bias_diff); + bwdw_bias_diff->sync_before_write(); + } + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_weights_, + PERFORMANCE_MKLDNN_NAME_DETAILED("BW", "_weights")); + PERFORMANCE_MEASUREMENT_BEGIN(); + ipBwdWeights.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_weights_); + } +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNInnerProductLayer); +#else + +template +void MKLDNNInnerProductLayer::Forward_gpu(const vector*>& bottom + , const vector*>& top) +{ + NOT_IMPLEMENTED; +} + +template +void MKLDNNInnerProductLayer::Backward_gpu(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + NOT_IMPLEMENTED; +} +#endif + +INSTANTIATE_CLASS(MKLDNNInnerProductLayer); +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/layers/mkldnn_lrn_layer.cpp b/src/caffe/layers/mkldnn_lrn_layer.cpp new file mode 100644 index 00000000000..6c589c73e63 --- /dev/null +++ b/src/caffe/layers/mkldnn_lrn_layer.cpp @@ -0,0 +1,394 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include + +#include "caffe/layer.hpp" +#include "caffe/layers/mkldnn_layers.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +MKLDNNLRNLayer::MKLDNNLRNLayer(const LayerParameter& param) + : MKLDNNLayer(), Layer(param) + , fwd_top_data(NULL), fwd_bottom_data(NULL) + , bwd_top_diff(NULL), bwd_bottom_diff(NULL) + , lrnFwd_pd(NULL), lrnBwd_pd(NULL) + , fwd_top_data_memory(NULL), bwd_bottom_diff_memory(NULL) + , scratch_memory(NULL) + , fwd_bottom_data_primitive(NULL), bwd_top_diff_primitive(NULL) + , alpha_(0), beta_(0), k_(0) + , size_(0), num_(0), width_(0), height_(0), channels_(0) +{ + PERFORMANCE_EVENT_ID_RESET(perf_id_fw_); + PERFORMANCE_EVENT_ID_RESET(perf_id_bw_); +} + +template +void MKLDNNLRNLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) +{ + VLOG(1) << "MKLDNNLRNLayer::LayerSetUp: " << this->layer_param_.name(); + + Layer::LayerSetUp(bottom, top); + + size_ = this->layer_param_.lrn_param().local_size(); + CHECK_EQ(size_ % 2, 1) << "LRN only supports odd values for local_size"; + + // Fwd, Bwd primitives and lrn_buffer_ are allocated in "Lazy" + // mode, because here we don't know + // what layout is used by neighbours. +} + +template +void MKLDNNLRNLayer::Reshape(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNLRNLayer::Reshape: " << this->layer_param_.name(); + alpha_ = this->layer_param_.lrn_param().alpha(); + beta_ = this->layer_param_.lrn_param().beta(); + + // TODO: k_ is not used now in mkldnn + k_ = this->layer_param_.lrn_param().k(); + + width_ = bottom[0]->width(); + height_ = bottom[0]->height(); + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + + CHECK_EQ(4, bottom[0]->num_axes()) + << "Input must have 4 axes, corresponding to (num, channels, height, width)"; + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + top[0]->Reshape(num_, channels_, height_, width_); + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + top[0]->Reshape(num_, channels_, height_, width_); + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +template +void MKLDNNLRNLayer::InitLRNFwd(const vector*>& bottom, const vector*>& top) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + auto propagation = this->phase_ == TEST ? prop_kind::forward_scoring : prop_kind::forward_training; + + algorithm lrn_algorithm; + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + lrn_algorithm = algorithm::lrn_across_channels; + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + if (this->phase_ == TEST) + lrn_algorithm = algorithm::lrn_within_channel; + else + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } + + int32_t n = this->num_; + int32_t iw = this->width_; + int32_t ih = this->height_; + int32_t ic = this->channels_; + + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + // ---- Initialize memory descriptors ------------- + memory::dims tz = {n, ic, ih, iw}; + shared_ptr top_md; + shared_ptr usr_mpd, prv_mpd; + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); + bottom_md.reset(new memory::desc(mem_descr->prv_memory_pd()->desc())); + usr_mpd = mem_descr->usr_memory_pd(); + prv_mpd = mem_descr->prv_memory_pd(); + } else { + bottom_md.reset(new memory::desc({tz}, mpcsn, memory::format::nchw)); + usr_mpd.reset(new memory::primitive_desc(*bottom_md, cpu_engine)); + } + top_md = bottom_md; + + // ---- Initialize LRN primitive descriptor ------------- + lrn_forward::desc lrnFwd_desc(propagation, lrn_algorithm, *bottom_md, + size_, alpha_, beta_); + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + lrnFwd_pd.reset(new lrn_forward::primitive_desc(lrnFwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex))); + } + catch(...) { + continue; + } + break; + } + + CHECK(lrnFwd_pd); + // ---- Create priv memory primitive descriptors stored as class members ------------- + typedef typename memory::primitive_desc MemPD; // short name for memory::primitive_desc + shared_ptr prv_fwd_bottom_data_memory_pd(new MemPD(lrnFwd_pd->src_primitive_desc())); + shared_ptr prv_fwd_top_data_memory_pd(new MemPD(lrnFwd_pd->dst_primitive_desc())); + + // ---- Create usr memory primitive descriptors ------------- + memory::format mfmt_nchw = memory::format::nchw; + + shared_ptr usr_data_memory_pd(new MemPD({{tz}, mpcsn, mfmt_nchw}, cpu_engine)); + + // --- init primitive and prv_memory descriptors ---------------------- + fwd_bottom_data.reset(new MKLDNNData(usr_data_memory_pd, prv_fwd_bottom_data_memory_pd, bottom[0], this)); + fwd_bottom_data->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_bottom_data_primitive = fwd_bottom_data->create_input(false); + fwd_top_data.reset(new MKLDNNData(usr_mpd, prv_fwd_top_data_memory_pd, top[0], this)); + fwd_top_data->name = "fwd_top_data @ " + this->layer_param_.name(); + fwd_top_data_memory = fwd_top_data->create_output_memory(); + + if ( propagation == prop_kind::forward_training ) { + memory::primitive_desc scratch_mpd(lrnFwd_pd->workspace_primitive_desc()); + scratch_memory.reset(new memory(scratch_mpd)); + lrnFwd.reset(new lrn_forward(*lrnFwd_pd, *fwd_bottom_data_primitive, *scratch_memory, *fwd_top_data_memory)); + } else { + lrnFwd.reset(new lrn_forward(*lrnFwd_pd, *fwd_bottom_data_primitive, *fwd_top_data_memory)); + } + //fwd_bottom_data->set_mkldnn_primitive(lrnFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_bottom_data_primitive_transfer(fwd_bottom_data_primitive); + fwd_bottom_data->set_mkldnn_primitive(fwd_bottom_data_primitive_transfer); + + //fwd_top_data->set_mkldnn_primitive(lrnFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_top_data_memory_transfer(fwd_top_data_memory); + fwd_top_data->set_mkldnn_primitive(fwd_top_data_memory_transfer); +} + + +template +void MKLDNNLRNLayer::Forward_cpu(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNLRNLayer::Forward_cpu: " << this->layer_param_.name(); + if( lrnFwd_pd == NULL) + InitLRNFwd(bottom, top); + // making reorders if needed. + fwd_bottom_data->sync_before_read(); + // update top that head at prv + fwd_top_data->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKLDNN_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + lrnFwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); +} + +template +void MKLDNNLRNLayer::InitLRNBwd(const vector*>& top + ,const vector& propagate_down + ,const vector*>& bottom) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + + algorithm lrn_algorithm; + switch (this->layer_param_.lrn_param().norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + lrn_algorithm = algorithm::lrn_across_channels; + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } + + int32_t n = this->num_; + int32_t iw = this->width_; + int32_t ih = this->height_; + int32_t ic = this->channels_; + + bool top_diff_is_prv = (const_cast(top[0]->prv_diff()) != NULL); + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + // ---- Initialize memory descriptors ------------- + memory::dims tz = {n, ic, ih, iw}; + shared_ptr bottom_diff_md, top_diff_md; + shared_ptr usr_diff_mpd, prv_diff_mpd; + if (top_diff_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(top[0]); + memory::format bwd_prv_top_diff_mfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); +#ifdef DEBUG + LOG(INFO) << "MKLDNNLRNLayer::InitLRNBwd: memory format of prv top diff is: " << bwd_prv_top_diff_mfmt; +#endif + top_diff_md.reset(new memory::desc(mem_descr->prv_memory_pd()->desc())); + usr_diff_mpd = mem_descr->usr_memory_pd(); + prv_diff_mpd = mem_descr->prv_memory_pd(); + + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); + memory::format fwd_prv_bottom_data_mfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); +#ifdef DEBUG + LOG(INFO) << "MKLDNNLRNLayer::InitLRNBwd: memory format of prv bottom data is: " << fwd_prv_bottom_data_mfmt; +#endif + if (bwd_prv_top_diff_mfmt != fwd_prv_bottom_data_mfmt) + { +#ifdef DEBUG + LOG(INFO) << "MKLDNNLRNLayer::InitLRNBwd: Reorder the prv top/bottom diff to the format of prv bottom data! (Performance consideration)"; +#endif + top_diff_md.reset(new memory::desc({tz}, mpcsn, fwd_prv_bottom_data_mfmt)); + } + //top[0]->set_prv_diff_descriptor(NULL); + } + } else { + memory::format bwd_cmfmt = memory::format::nchw; + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); + memory::format fwd_prv_bottom_data_mfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); +#ifdef DEBUG + LOG(INFO) << "MKLDNNLRNLayer::InitLRNBwd: memory format of prv bottom data is: " << fwd_prv_bottom_data_mfmt; + LOG(INFO) << "MKLDNNLRNLayer::InitLRNBwd: Reorder the usr top/bottom diff to the format of prv bottom data! (Performance consideration)"; +#endif + bwd_cmfmt = fwd_prv_bottom_data_mfmt; + //top[0]->set_prv_diff_descriptor(NULL); + } + + top_diff_md.reset(new memory::desc({tz}, mpcsn, bwd_cmfmt)); + usr_diff_mpd.reset(new memory::primitive_desc(*top_diff_md, cpu_engine)); + } + bottom_diff_md = top_diff_md; + + // ---- Initialize LRN primitive descriptor ------------- + lrn_backward::desc lrnBwd_desc(lrn_algorithm, *bottom_md, *top_diff_md, + size_, alpha_, beta_); + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + lrnBwd_pd.reset(new lrn_backward::primitive_desc(lrnBwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex), *lrnFwd_pd)); + } + catch(...) { + continue; + } + break; + } + CHECK(lrnBwd_pd); + // ---- Create priv memory primitive descriptors stored as class members ------------- + typedef typename memory::primitive_desc MemPD; // short name for memory::primitive_desc + shared_ptr prv_bwd_bottom_diff_memory_pd(new MemPD(lrnBwd_pd->diff_src_primitive_desc())); + shared_ptr prv_bwd_top_diff_memory_pd(new MemPD(lrnBwd_pd->diff_dst_primitive_desc())); + + // ---- Create usr memory primitive descriptors ------------- + memory::format mfmt_nchw = memory::format::nchw; + + shared_ptr usr_data_memory_pd(new MemPD({{tz}, mpcsn, mfmt_nchw}, cpu_engine)); + + // --- init primitive and prv_memory descriptors ---------------------- + bwd_bottom_diff.reset(new MKLDNNDiff(usr_data_memory_pd, prv_bwd_bottom_diff_memory_pd, bottom[0], this)); + bwd_bottom_diff->name = "bwd_bottom_diff_data @ " + this->layer_param_.name(); + bwd_bottom_diff_memory = bwd_bottom_diff->create_output_memory(); + bwd_top_diff.reset(new MKLDNNDiff(usr_diff_mpd, prv_bwd_top_diff_memory_pd, top[0], this)); + bwd_top_diff->name = "bwd_top_diff_data @ " + this->layer_param_.name(); + bwd_top_diff_primitive = bwd_top_diff->create_input(false); + + lrnBwd.reset(new lrn_backward(*lrnBwd_pd, *fwd_bottom_data_primitive, *bwd_top_diff_primitive, *scratch_memory, *bwd_bottom_diff_memory)); + //bwd_bottom_diff->set_mkldnn_primitive(lrnBwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_bottom_diff_memory_transfer(bwd_bottom_diff_memory); + bwd_bottom_diff->set_mkldnn_primitive(bwd_bottom_diff_memory_transfer); + + //bwd_top_diff->set_mkldnn_primitive(lrnBwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_top_diff_primitive_transfer(bwd_top_diff_primitive); + bwd_top_diff->set_mkldnn_primitive(bwd_top_diff_primitive_transfer); +} + + +template +void MKLDNNLRNLayer::Backward_cpu(const vector*>& top + ,const vector& propagate_down + ,const vector*>& bottom) +{ + VLOG(1) << "MKLDNNLRNLayer::Backward_cpu: " << this->layer_param_.name(); + if (!propagate_down[0]) { + return; + } + if( lrnBwd_pd == NULL) + InitLRNBwd(top, propagate_down, bottom); + bwd_top_diff->sync_before_read(); + bwd_bottom_diff->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKLDNN_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + lrnBwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNLRNLayer); +#else +template +void MKLDNNLRNLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) +{NOT_IMPLEMENTED;} +template +void MKLDNNLRNLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down + ,const vector*>& bottom) +{NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLDNNLRNLayer); +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/layers/mkldnn_pooling_layer.cpp b/src/caffe/layers/mkldnn_pooling_layer.cpp new file mode 100644 index 00000000000..40f5cf228a4 --- /dev/null +++ b/src/caffe/layers/mkldnn_pooling_layer.cpp @@ -0,0 +1,546 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkldnn_layers.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + + +namespace caffe { + +template +void MKLDNNPoolingLayer::LayerSetUp(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNPoolingLayer::LayerSetUp: " << this->layer_param_.name(); + + Layer::LayerSetUp(bottom, top); + PoolingParameter pool_param = this->layer_param_.pooling_param(); + + if (pool_param.global_pooling()) { + CHECK(!(pool_param.has_kernel_size() || pool_param.has_kernel_h() || pool_param.has_kernel_w())) + << "With Global_pooling: true Filter size cannot specified"; + } else { + CHECK(!pool_param.has_kernel_size() != !(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "Filter size is kernel_size OR kernel_h and kernel_w; not both"; + CHECK(pool_param.has_kernel_size() ||(pool_param.has_kernel_h() && pool_param.has_kernel_w())) + << "For non-square filters both kernel_h and kernel_w are required."; + } + CHECK((!pool_param.has_pad() && pool_param.has_pad_h() && pool_param.has_pad_w()) + || (!pool_param.has_pad_h() && !pool_param.has_pad_w())) + << "pad is pad OR pad_h and pad_w are required."; + CHECK((!pool_param.has_stride() && pool_param.has_stride_h() && pool_param.has_stride_w()) + || (!pool_param.has_stride_h() && !pool_param.has_stride_w())) + << "Stride is stride OR stride_h and stride_w are required."; + + global_pooling_ = pool_param.global_pooling(); + if (global_pooling_) { + kernel_h_ = bottom[0]->height(); + kernel_w_ = bottom[0]->width(); + } else { + if (pool_param.has_kernel_size()) { + kernel_h_ = kernel_w_ = pool_param.kernel_size(); + } else { + kernel_h_ = pool_param.kernel_h(); + kernel_w_ = pool_param.kernel_w(); + } + } + CHECK_GT(kernel_h_, 0) << "Filter dimensions cannot be zero."; + CHECK_GT(kernel_w_, 0) << "Filter dimensions cannot be zero."; + if (!pool_param.has_pad_h()) { + pad_t_ = pad_b_ = pad_l_ = pad_r_ = pool_param.pad(); + } else { + pad_t_ = pad_b_ = pool_param.pad_h(); + pad_l_ = pad_r_ = pool_param.pad_w(); + } + if (!pool_param.has_stride_h()) { + stride_h_ = stride_w_ = pool_param.stride(); + } else { + stride_h_ = pool_param.stride_h(); + stride_w_ = pool_param.stride_w(); + } + if (global_pooling_) { + CHECK(pad_t_ == 0 && pad_l_ == 0 && stride_h_ == 1 && stride_w_ == 1) + << "With Global_pooling: true; only pad = 0 and stride = 1"; + } + if (pad_t_ != 0 || pad_l_ != 0) { + CHECK(this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_AVE + || this->layer_param_.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) + << "Padding implemented only for average and max pooling."; + CHECK_LT(pad_t_, kernel_h_); + CHECK_LT(pad_l_, kernel_w_); + } + + height_out_ = static_cast(ceil(static_cast( + bottom[0]->height() + pad_t_ + pad_b_ - kernel_h_) / stride_h_)) + 1; + width_out_ = static_cast(ceil(static_cast( + bottom[0]->width() + pad_r_ + pad_l_ - kernel_w_) / stride_w_)) + 1; + + if (pad_t_ || pad_b_ || pad_r_ || pad_l_) { + // If we have padding, ensure that the last pooling starts strictly + // inside the image (instead of at the padding); otherwise clip the last. + if ((height_out_ - 1) * stride_h_ >= bottom[0]->height() + pad_t_) { + --height_out_; + } + if ((width_out_ - 1) * stride_w_ >= bottom[0]->width() + pad_l_) { + --width_out_; + } + CHECK_LT((height_out_ - 1) * stride_h_, bottom[0]->height() + pad_t_); + CHECK_LT((width_out_ - 1) * stride_w_, bottom[0]->width() + pad_l_); + } + else + { + // If user did not define padding, just use the exclude padding + force_exclude_padding_flag_ = true; + } + + //Add the pad to make sure h/w + kernel_h/w_ can be exact division by stride_h/w_ + auto h = bottom[0]->height() + pad_t_; + while (h + pad_b_ < stride_h_ * (height_out_ - 1) + kernel_h_) pad_b_++; + + auto w = bottom[0]->width() + pad_l_; + while (w + pad_r_ < stride_w_ * (width_out_ - 1) + kernel_w_) pad_r_++; +} + +template +void MKLDNNPoolingLayer::Reshape(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNPoolingLayer::Reshape: " << this->layer_param_.name(); + + num_ = bottom[0]->num(); + channels_ = bottom[0]->channels(); + height_ = bottom[0]->height(); + width_ = bottom[0]->width(); + + CHECK_EQ(4, bottom[0]->num_axes()) << "Input must have 4 axes, " + << "corresponding to (num, channels, height, width)"; + + top[0]->Reshape(bottom[0]->num(), channels_, height_out_, width_out_); + + if (top.size() > 1) { + (reinterpret_cast* > (top[1]) )->Reshape(num_, + channels_, height_out_, width_out_); + } + if (top.size() == 1) { + max_idx_.Reshape(bottom[0]->num(), channels_, height_out_, width_out_); + } +} + +template +void MKLDNNPoolingLayer::InitPoolingFwd(const vector*>& bottom, const vector*>& top) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + + auto propagation = this->phase_ == TEST ? prop_kind::forward_scoring : prop_kind::forward_training; + + algorithm pooling_algorithm; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + pooling_algorithm = algorithm::pooling_max; + break; + case PoolingParameter_PoolMethod_AVE: + if (this->layer_param_.pooling_param().avg_include_pad()) { + pooling_algorithm = algorithm::pooling_avg_include_padding; + }else { + pooling_algorithm = algorithm::pooling_avg_exclude_padding; + } + // If user did not define padding + // bottom[0]->height/width() + kernel_h/w_ cannot be exact division by stride_h/w_ + // use the exclude padding to align with the result of Caffe + // for exact division situation, exclude padding and include padding will have the same results + if (force_exclude_padding_flag_ == true) + { + pooling_algorithm = algorithm::pooling_avg_exclude_padding; + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + + int32_t n = this->num_; + int32_t c = this->channels_; + int32_t ih = this->height_; + int32_t iw = this->width_; + int32_t oh = this->height_out_; + int32_t ow = this->width_out_; + + int32_t kh = this->kernel_h_; + int32_t kw = this->kernel_w_; + + int32_t sh = this->stride_h_; + int32_t sw = this->stride_w_; + + int32_t pt = this->pad_t_; + int32_t pb = this->pad_b_; + int32_t pl = this->pad_l_; + int32_t pr = this->pad_r_; + + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + memory::dims bottom_tz = {n, c, ih, iw}; + memory::dims top_tz = {n, c, oh, ow}; + memory::format mfmt_nchw = memory::format::nchw; + + // ---- Initialize memory descriptors ------------- + typedef typename memory::primitive_desc MemPD; // short name for memory::primitive_desc + memory::format cmfmt = mfmt_nchw; + + shared_ptr usr_bottom_data_mpd(new MemPD({{bottom_tz}, mpcsn, mfmt_nchw}, cpu_engine)); + shared_ptr usr_top_data_mpd(new MemPD({{top_tz}, mpcsn, mfmt_nchw}, cpu_engine)); + + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); + cmfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); + mpcsn = static_cast(mem_descr->prv_memory_pd()->desc().data.data_type); + } + shared_ptr init_fwd_bottom_md(new memory::desc({bottom_tz}, mpcsn, cmfmt)); + shared_ptr init_fwd_top_md(new memory::desc({top_tz}, mpcsn, cmfmt)); + + // ---- Initialize pooling primitive descriptor ------------- + pooling_forward::desc poolingFwd_desc(propagation, pooling_algorithm, *init_fwd_bottom_md,*init_fwd_top_md + , {sh, sw}, {kh, kw}, {pt, pl}, {pb, pr}, padding_kind::zero); + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + poolingFwd_pd.reset(new pooling_forward::primitive_desc(poolingFwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex))); + } + catch(...) { + continue; + } + break; + } + + CHECK(poolingFwd_pd); + engine engine = ep.getMKLDNNSubEngine(subEngineIndex); + + // ---- Initialize remaining memory descriptors ------------- + shared_ptr prv_fwd_bottom_data_mpd; + shared_ptr prv_fwd_top_data_mpd; + if (bottom_data_is_prv) { + prv_fwd_bottom_data_mpd.reset(new MemPD(*init_fwd_bottom_md, engine)); + prv_fwd_top_data_mpd.reset(new MemPD(*init_fwd_top_md, engine)); + } + + // ---- Create prv memory --------------------- + + // We'll output the mask to top[1] if it's of size >1. + uint32_t* mask = NULL; // suppress warnings about uninitalized variables + // We'll output the mask to top[1] if it's of size >1. + const bool use_top_mask = top.size() > 1; + mask = (use_top_mask) ? reinterpret_cast(top[1]->mutable_cpu_data()) + : max_idx_.mutable_cpu_data(); + + // --- init primitive and prv_memory descriptors ---------------------- + fwd_bottom_data.reset(new MKLDNNData(usr_bottom_data_mpd, prv_fwd_bottom_data_mpd, bottom[0], this)); + fwd_bottom_data_primitive = fwd_bottom_data->create_input(false); + + fwd_top_data.reset(new MKLDNNData(usr_top_data_mpd, prv_fwd_top_data_mpd, top[0], this)); + fwd_top_data_memory = fwd_top_data->create_output_memory(); + + if (propagation == prop_kind::forward_training && + pooling_algorithm != algorithm::pooling_avg_exclude_padding && + pooling_algorithm != algorithm::pooling_avg_include_padding) { + indices_pd.reset(new MemPD(poolingFwd_pd->workspace_primitive_desc())); + indices_memory.reset(new memory(*indices_pd, reinterpret_cast(mask))); + poolingFwd.reset(new pooling_forward(*poolingFwd_pd, *fwd_bottom_data_primitive, *fwd_top_data_memory, *indices_memory)); + } else { + poolingFwd.reset(new pooling_forward(*poolingFwd_pd, *fwd_bottom_data_primitive, *fwd_top_data_memory)); + } + //fwd_bottom_data->set_mkldnn_primitive(poolingFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_bottom_data_primitive_transfer(fwd_bottom_data_primitive); + fwd_bottom_data->set_mkldnn_primitive(fwd_bottom_data_primitive_transfer); + + //fwd_top_data->set_mkldnn_primitive(poolingFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_top_data_memory_transfer(fwd_top_data_memory); + fwd_top_data->set_mkldnn_primitive(fwd_top_data_memory_transfer); +} + +// TODO(Yangqing): Is there a faster way to do pooling in the channel-first +// case? +template +void MKLDNNPoolingLayer::Forward_cpu(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNPoolingLayer::Forward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNPoolingLayer::Forward_cpu: " << this->layer_param_.name(); +#endif + + if (NULL == poolingFwd_pd) + InitPoolingFwd(bottom, top); + // making reorders if needed. + fwd_bottom_data->sync_before_read(); + // update top that head at prv + fwd_top_data->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKLDNN_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + poolingFwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); +} + +template +void MKLDNNPoolingLayer::InitPoolingBwd(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + + algorithm pooling_algorithm; + switch (this->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + pooling_algorithm = algorithm::pooling_max; + break; + case PoolingParameter_PoolMethod_AVE: + if (this->layer_param_.pooling_param().avg_include_pad()) { + pooling_algorithm = algorithm::pooling_avg_include_padding; + }else { + pooling_algorithm = algorithm::pooling_avg_exclude_padding; + } + + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } + + int32_t n = this->num_; + int32_t c = this->channels_; + int32_t ih = this->height_; + int32_t iw = this->width_; + int32_t oh = this->height_out_; + int32_t ow = this->width_out_; + + int32_t kh = this->kernel_h_; + int32_t kw = this->kernel_w_; + + int32_t sh = this->stride_h_; + int32_t sw = this->stride_w_; + + int32_t pt = this->pad_t_; + int32_t pb = this->pad_b_; + + int32_t pr = this->pad_r_; + int32_t pl = this->pad_l_; + + bool top_diff_is_prv = (const_cast(top[0]->prv_diff()) != NULL); + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + memory::dims bottom_tz = {n, c, ih, iw}; + memory::dims top_tz = {n, c, oh, ow}; + memory::format mfmt_nchw = memory::format::nchw; + + // ---- Initialize memory descriptors ------------- + typedef typename memory::primitive_desc MemPD; // short name for memory::primitive_desc + + memory::format bwd_cmfmt = mfmt_nchw; + if (top_diff_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(top[0]); + bwd_cmfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); + } + + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); + memory::format fwd_prv_bottom_data_mfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); +#ifdef DEBUG + LOG(INFO) << "MKLDNNPoolingLayer::InitPoolingBwd: memory format of prv bottom data is: " << fwd_prv_bottom_data_mfmt; + LOG(INFO) << "MKLDNNPoolingLayer::InitPoolingBwd: Reorder the top and bottom diff to the format of prv bottom data! (Performance consideration)"; +#endif + bwd_cmfmt = fwd_prv_bottom_data_mfmt; + } + + shared_ptr init_bwd_bottom_md(new memory::desc({bottom_tz}, mpcsn, bwd_cmfmt)); + shared_ptr init_bwd_top_md(new memory::desc({top_tz}, mpcsn, bwd_cmfmt)); + + shared_ptr usr_bottom_data_mpd(new MemPD({{bottom_tz}, mpcsn, mfmt_nchw}, cpu_engine)); + shared_ptr usr_top_data_mpd(new MemPD({{top_tz}, mpcsn, mfmt_nchw}, cpu_engine)); + // ---- Initialize pooling primitive descriptor ------------- + pooling_backward::desc poolingBwd_desc(pooling_algorithm, *init_bwd_bottom_md,*init_bwd_top_md + , {sh, sw}, {kh, kw}, {pt, pl}, {pb, pr}, padding_kind::zero); + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + poolingBwd_pd.reset(new pooling_backward::primitive_desc(poolingBwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex), *poolingFwd_pd)); + } + catch(...) { + continue; + } + break; + } + + CHECK(poolingBwd_pd); + engine engine = ep.getMKLDNNSubEngine(subEngineIndex); + + // ---- Initialize remaining memory descriptors ------------- + shared_ptr prv_bwd_bottom_diff_mpd, prv_bwd_top_diff_mpd; + if (top_diff_is_prv || bottom_data_is_prv) { + prv_bwd_bottom_diff_mpd.reset(new MemPD(*init_bwd_bottom_md, engine)); + prv_bwd_top_diff_mpd.reset(new MemPD(*init_bwd_top_md, engine)); + } + + // --- init primitive and prv_memory descriptors ---------------------- + bwd_bottom_diff.reset(new MKLDNNDiff(usr_bottom_data_mpd, prv_bwd_bottom_diff_mpd, bottom[0], this)); + bwd_bottom_diff->name = "bwd_bottom_diff_data @ " + this->layer_param_.name(); + bwd_bottom_diff_memory = bwd_bottom_diff->create_output_memory(); + + bwd_top_diff.reset(new MKLDNNDiff(usr_top_data_mpd, prv_bwd_top_diff_mpd, top[0], this)); + bwd_top_diff->name = "bwd_top_diff_data @ " + this->layer_param_.name(); + bwd_top_diff_primitive = bwd_top_diff->create_input(false); + + if (pooling_algorithm != algorithm::pooling_avg_include_padding && + pooling_algorithm != algorithm::pooling_avg_exclude_padding) + poolingBwd.reset(new pooling_backward(*poolingBwd_pd, + *bwd_top_diff_primitive, *indices_memory, + *bwd_bottom_diff_memory)); + else + poolingBwd.reset(new pooling_backward(*poolingBwd_pd, + *bwd_top_diff_primitive, *bwd_bottom_diff_memory)); + //bwd_bottom_diff->set_mkldnn_primitive(poolingBwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_bottom_diff_memory_transfer(bwd_bottom_diff_memory); + bwd_bottom_diff->set_mkldnn_primitive(bwd_bottom_diff_memory_transfer); + + //bwd_top_diff->set_mkldnn_primitive(poolingBwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_top_diff_primitive_transfer(bwd_top_diff_primitive); + bwd_top_diff->set_mkldnn_primitive(bwd_top_diff_primitive_transfer); +} + +template +void MKLDNNPoolingLayer::Backward_cpu(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + VLOG(1) << "MKLDNNPoolingLayer::Backward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNPoolingLayer::Backward_cpu: " << this->layer_param_.name(); +#endif + + if (!propagate_down[0]) { + return; + } + if (NULL == poolingBwd_pd) + InitPoolingBwd(top, propagate_down, bottom); + + bwd_top_diff->sync_before_read(); + bwd_bottom_diff->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKLDNN_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); +#ifdef DEBUG + if (bottom[0]->prv_data() != NULL) + { + LOG(INFO) << "Debug: Bottom prv data: " << *bottom[0]->prv_data(); + } + else + { + LOG(INFO) << "Debug: Bottom prv data is NULL!"; + //LOG(INFO) << "Debug: Bottom cpu data: " << *bottom[0]->cpu_data(); + } + + if (top[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Top prv diff: " << *top[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Top prv diff is NULL!"; + //LOG(INFO) << "Debug: Top cpu diff: " << *top[0]->cpu_diff(); + } +#endif + poolingBwd.submit(); +#ifdef DEBUG + if (bottom[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Bottom prv diff: " << *bottom[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Bottom prv diff is NULL!"; + } +#endif + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNPoolingLayer); +#else +template +void MKLDNNPoolingLayer::Forward_gpu(const vector*>& bottom + ,const vector*>& top) +{ NOT_IMPLEMENTED; } + +template +void MKLDNNPoolingLayer::Backward_gpu(const vector*>& top + ,const vector& propagate_down + ,const vector*>& bottom) +{ NOT_IMPLEMENTED; } +#endif + +INSTANTIATE_CLASS(MKLDNNPoolingLayer); +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/layers/mkldnn_relu_layer.cpp b/src/caffe/layers/mkldnn_relu_layer.cpp new file mode 100644 index 00000000000..7eb46612ac0 --- /dev/null +++ b/src/caffe/layers/mkldnn_relu_layer.cpp @@ -0,0 +1,374 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include + +#include "caffe/layers/mkldnn_layers.hpp" + +namespace caffe { + +template +void MKLDNNReLULayer::LayerSetUp(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNReLULayer::LayerSetUp: " << this->layer_param_.name(); + + NeuronLayer::LayerSetUp(bottom, top); +} + +template +void MKLDNNReLULayer::Reshape(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNReLULayer::Reshape: " << this->layer_param_.name(); + + NeuronLayer::Reshape(bottom, top); + + this->width_ = bottom[0]->width(); + this->height_ = bottom[0]->height(); + this->num_ = bottom[0]->num(); + this->channels_ = bottom[0]->channels(); + +} + +template +void MKLDNNReLULayer::InitReLUFwd(const vector*>& bottom, const vector*>& top) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + auto propagation = this->phase_ == TEST ? prop_kind::forward_scoring : prop_kind::forward_training; + int32_t n = this->num_; + int32_t iw = this->width_; + int32_t ih = this->height_; + int32_t ic = this->channels_; + + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + bool inplace = (bottom[0] == top[0]); + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + // ---- Initialize memory descriptors ------------- + shared_ptr bottom_data_md, top_data_md; + shared_ptr usr_data_mpd(NULL), prv_data_mpd(NULL); + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); + bottom_data_md.reset(new memory::desc(mem_descr->prv_memory_pd()->desc())); + usr_data_mpd = mem_descr->usr_memory_pd(); + prv_data_mpd = mem_descr->prv_memory_pd(); + } else { + bottom_data_md.reset(new memory::desc({{n, ic, ih, iw}}, mpcsn, memory::format::nchw)); + usr_data_mpd.reset(new memory::primitive_desc(*bottom_data_md, cpu_engine)); + } + top_data_md = bottom_data_md; + + // ---- Initialize relu primitive descriptor ------------- + //relu_forward::desc reluFwd_desc(propagation, *bottom_data_md, negative_slope); + // MKLDNN is deprecating standalone relu primitive in MKL-DNN. + // Now MKLDNN has eltwise primitive with eltwise_relu algorithm inside. + eltwise_forward::desc eltwise_reluFwd_desc(propagation, eltwise_relu, *bottom_data_md, negative_slope); + + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + reluFwd_pd.reset(new relu_forward::primitive_desc(eltwise_reluFwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex))); + } + catch(...) { + continue; + } + break; + } + CHECK(reluFwd_pd); + + // --- init primitive and prv_memory descriptors ---------------------- + fwd_bottom_data.reset(new MKLDNNData(usr_data_mpd, prv_data_mpd, bottom[0], this)); + fwd_bottom_data->name = "fwd_bottom_data @ " + this->layer_param_.name(); + fwd_bottom_data_primitive = fwd_bottom_data->create_input(false); + + fwd_top_data.reset(new MKLDNNData(usr_data_mpd, prv_data_mpd, top[0], this)); + fwd_top_data->name = "fwd_top_data @ " + this->layer_param_.name(); + + fwd_top_data_memory = fwd_top_data->create_output_memory(inplace); + + reluFwd.reset(new relu_forward(*reluFwd_pd, *fwd_bottom_data_primitive, *fwd_top_data_memory)); + //fwd_bottom_data->set_mkldnn_primitive(reluFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_bottom_data_primitive_transfer(fwd_bottom_data_primitive); + fwd_bottom_data->set_mkldnn_primitive(fwd_bottom_data_primitive_transfer); + + //fwd_top_data->set_mkldnn_primitive(reluFwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive fwd_top_data_memory_transfer(fwd_top_data_memory); + fwd_top_data->set_mkldnn_primitive(fwd_top_data_memory_transfer); +} + + +template +void MKLDNNReLULayer::Forward_cpu(const vector*>& bottom + ,const vector*>& top) +{ + VLOG(1) << "MKLDNNReLULayer::Forward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNReLULayer::Forward_cpu: " << this->layer_param_.name(); +#endif + + bool inplace = (bottom[0] == top[0]); + if( reluFwd_pd == NULL) + InitReLUFwd(bottom, top); + + if(this->layer_param_.relu_param().fuse()) { + top[0]->ShareData(*bottom[0]); + return; + } + // making reorders if needed. + fwd_bottom_data->sync_before_read(); + // update top that head at prv + fwd_top_data->sync_before_write(inplace); + + PERFORMANCE_EVENT_ID_INIT(perf_id_fw_, PERFORMANCE_MKLDNN_NAME("FW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + reluFwd.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_fw_); +} + +template +void MKLDNNReLULayer::InitReLUBwd(const vector*>& top + ,const vector& propagate_down + ,const vector*>& bottom) +{ + if (std::is_same::value) NOT_IMPLEMENTED; + + int32_t n = this->num_; + int32_t iw = this->width_; + int32_t ih = this->height_; + int32_t ic = this->channels_; + + Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); + bool top_diff_is_prv = top[0]->prv_diff() != NULL; + bool inplace = (bottom[0] == top[0]); + + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type mpcsn = memory::data_type::f32; + + // ---- Initialize memory descriptors ------------- + shared_ptr bottom_diff_md; + shared_ptr top_diff_md; + shared_ptr top_data_md; + + shared_ptr usr_diff_mpd; + shared_ptr prv_diff_mpd; + + if (top_diff_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(top[0]); + memory::format bwd_prv_top_diff_mfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); +#ifdef DEBUG + LOG(INFO) << "MKLDNNReLULayer::InitReLUBwd: memory format of prv top diff is: " << bwd_prv_top_diff_mfmt; +#endif + top_diff_md.reset(new memory::desc(mem_descr->prv_memory_pd()->desc())); + usr_diff_mpd = mem_descr->usr_memory_pd(); + prv_diff_mpd = mem_descr->prv_memory_pd(); + + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); + memory::format fwd_prv_bottom_data_mfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); +#ifdef DEBUG + LOG(INFO) << "MKLDNNReLULayer::InitReLUBwd: memory format of prv bottom data is: " << fwd_prv_bottom_data_mfmt; +#endif + if (bwd_prv_top_diff_mfmt != fwd_prv_bottom_data_mfmt) + { +#ifdef DEBUG + LOG(INFO) << "MKLDNNReLULayer::InitReLUBwd: Reorder the prv top/bottom diff to the format of prv bottom data! (Performance consideration)"; +#endif + prv_diff_mpd = mem_descr->prv_memory_pd(); + } + //top[0]->set_prv_diff_descriptor(NULL); + } + } else { + bool bottom_data_is_prv = (const_cast(bottom[0]->prv_data()) != NULL); + if (bottom_data_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(bottom[0]); +#ifdef DEBUG + memory::format fwd_prv_bottom_data_mfmt = static_cast(mem_descr->prv_memory_pd()->desc().data.format); + LOG(INFO) << "MKLDNNReLULayer::InitReLUBwd: memory format of prv bottom data is: " << fwd_prv_bottom_data_mfmt; + LOG(INFO) << "MKLDNNReLULayer::InitReLUBwd: Reorder the usr top/bottom diff to the format of prv bottom data! (Performance consideration)"; +#endif + prv_diff_mpd = mem_descr->prv_memory_pd(); + //top[0]->prv_data() is empty, however top[0]->get_prv_diff_descriptor() has value. + //Find root cause in the mkldnn_memory: create_output_memory() and sync_before_write() functions. + //But that a major fix, will lead the nan in the AlexNet training. + //So need investigation further, however, this will fix ICL-84. + top[0]->set_prv_diff_descriptor(NULL); + } + + top_diff_md.reset(new memory::desc({{n, ic, ih, iw}}, mpcsn, memory::format::nchw)); + usr_diff_mpd.reset(new memory::primitive_desc(*top_diff_md, cpu_engine)); + } + + top_data_md = top_diff_md; + bottom_diff_md = top_diff_md; + + // ---- Initialize relu primitive descriptor ------------- + //relu_backward::desc reluBwd_desc(*top_diff_md, *top_data_md, negative_slope); + // MKLDNN is deprecating standalone relu primitive in MKL-DNN. + // Now MKLDNN has eltwise primitive with eltwise_relu algorithm inside. + eltwise_backward::desc eltwise_reluBwd_desc(eltwise_relu, *top_diff_md, *top_data_md, negative_slope); + + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + EngineParser ep(subengines); + unsigned subEngineIndex = 0; + for(; subEngineIndex < ep.getNumberOfSubEngines(); subEngineIndex++) { + try { + reluBwd_pd.reset(new relu_backward::primitive_desc(eltwise_reluBwd_desc, + ep.getMKLDNNSubEngine(subEngineIndex), *reluFwd_pd)); + } + catch(...) { + continue; + } + break; + } + CHECK(reluBwd_pd); + + // --- init primitive and prv_memory descriptors ---------------------- + bwd_top_diff.reset(new MKLDNNDiff(usr_diff_mpd, prv_diff_mpd, top[0], this)); + bwd_top_diff->name = "bwd_top_diff_data @ " + this->layer_param_.name(); + bwd_top_diff_primitive = bwd_top_diff->create_input(/* set_prv_ptr */ false); + + bwd_bottom_diff.reset(new MKLDNNDiff(usr_diff_mpd, prv_diff_mpd, bottom[0], this)); + bwd_bottom_diff->name = "bwd_bottom_diff_data @ " + this->layer_param_.name(); + bwd_bottom_diff_memory = bwd_bottom_diff->create_output_memory(inplace); + + reluBwd.reset(new relu_backward(*reluBwd_pd, *fwd_bottom_data_primitive, *bwd_top_diff_primitive, *bwd_bottom_diff_memory)); + //bwd_top_diff->set_mkldnn_primitive(reluBwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_top_diff_primitive_transfer(bwd_top_diff_primitive); + bwd_top_diff->set_mkldnn_primitive(bwd_top_diff_primitive_transfer); + + //bwd_bottom_diff->set_mkldnn_primitive(reluBwd); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_bottom_diff_memory_transfer(bwd_bottom_diff_memory); + bwd_bottom_diff->set_mkldnn_primitive(bwd_bottom_diff_memory_transfer); +} + +template +void MKLDNNReLULayer::Backward_cpu(const vector*>& top + , const vector& propagate_down + , const vector*>& bottom) +{ + VLOG(1) << "MKLDNNReLULayer::Backward_cpu: " << this->layer_param_.name(); +#ifdef DEBUG + LOG(INFO) << "MKLDNNReLULayer::Backward_cpu: " << this->layer_param_.name(); +#endif + + //bool inplace = (bottom[0] == top[0]); + if (!propagate_down[0]) { + return; + } + if (reluBwd_pd == NULL) { + InitReLUBwd(top, propagate_down, bottom); + } + + bwd_top_diff->sync_before_read(); + //For MKLDNN, it always create two memory for input and output + //For Intel Caffe, if we set the inplace flag to true, input and output will use one same buffer + //Then the update of output will not pass to MKLDNN + //bwd_bottom_diff->sync_before_write(inplace); //Wrong due to the MKLDNN API design. + bwd_bottom_diff->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKLDNN_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); +#ifdef DEBUG + if (bottom[0]->prv_data() != NULL) + { + LOG(INFO) << "Debug: Bottom prv data: " << *bottom[0]->prv_data(); + } + else + { + LOG(INFO) << "Debug: Bottom prv data is NULL!"; + } + + if (top[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Top prv diff: " << *top[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Top prv diff is NULL!"; + } +#endif + reluBwd.submit(); +#ifdef DEBUG + if (bottom[0]->prv_diff() != NULL) + { + LOG(INFO) << "Debug: Bottom prv diff: " << *bottom[0]->prv_diff(); + } + else + { + LOG(INFO) << "Debug: Bottom prv diff is NULL!"; + } +#endif + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNReLULayer); +#else +template +void MKLDNNReLULayer::Forward_gpu(const vector*>& bottom + ,const vector*>& top) +{ NOT_IMPLEMENTED; } + +template +void MKLDNNReLULayer::Backward_gpu(const vector*>& top + ,const vector& propagate_down + ,const vector*>& bottom) +{ NOT_IMPLEMENTED; } +#endif + +INSTANTIATE_CLASS(MKLDNNReLULayer); +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/layers/mkldnn_split_layer.cpp b/src/caffe/layers/mkldnn_split_layer.cpp new file mode 100644 index 00000000000..ab2c5156a8a --- /dev/null +++ b/src/caffe/layers/mkldnn_split_layer.cpp @@ -0,0 +1,233 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ +#ifdef MKLDNN_SUPPORTED +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/layer.hpp" +#include "caffe/layers/mkldnn_layers.hpp" +#include "caffe/syncedmem.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +MKLDNNSplitLayer::~MKLDNNSplitLayer() { } + +template +void MKLDNNSplitLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + for (int i = 0; i < top.size(); ++i) { + // Do not allow in-place computation in the SplitLayer. Instead, share data + // by reference in the forward pass, and keep separate diff allocations in + // the backward pass. (Technically, it should be possible to share the diff + // blob of the first split output with the input, but this seems to cause + // some strange effects in practice...) + CHECK_NE(top[i], bottom[0]) << this->type() << " Layer does not " + "allow in-place computation."; + top[i]->ReshapeLike(*bottom[0]); + CHECK_EQ(count, top[i]->count()); + } + size_t dim_src = bottom[0]->shape().size(); + this->sizes_src_.resize(dim_src); + this->strides_src_.resize(dim_src); + for (size_t d = 0; d < dim_src; ++d) { + this->sizes_src_[d] = bottom[0]->shape()[d]; + this->strides_src_[d] = (d == 0) ? + 1 : this->strides_src_[d-1]*this->sizes_src_[d-1]; + } + + // TODO: Add checking to reinitialize Backward, to be + // done when Reshape is to be supported by MKLDNN layers +} + +template +void MKLDNNSplitLayer::InitSplitBwd(const vector*>& bottom, + const vector*>& top) { + if (std::is_same::value) NOT_IMPLEMENTED; + + // We just do simple adding so scale is 1.0 for all inputs we have + std::vector scale(top.size(), 1.0); + engine cpu_engine = CpuEngine::Instance().get_engine(); + memory::data_type data_type = memory::data_type::f32; + // TODO: shouldn't we have format here that is well suited for earlier layer. + // eg. Netcompiler should some of knowledge provided + memory::format mfmt_nchw = memory::format::nchw; + memory::format diff_dst_mfmt = mfmt_nchw; + + // Dimensions of bottom and top blobs. There is a number of + // top blobs each of the same size as the bottom one + memory::dims bottom_tz = {static_cast(this->sizes_src_[0]), + static_cast(this->sizes_src_[1]), + static_cast(this->sizes_src_[2]), + static_cast(this->sizes_src_[3])}; + + shared_ptr prv_diff_dst_mpd; + shared_ptr usr_diff_dst_mpd( + new memory::primitive_desc({bottom_tz, data_type, mfmt_nchw}, + cpu_engine)); + + // We will get final destination layout of bottom diff after first top... + bool first_top_diff_is_prv = (const_cast(top[0]->prv_diff()) != NULL); + + if (first_top_diff_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(top[0]); + diff_dst_mfmt = static_cast( + mem_descr->prv_memory_pd()->desc().data.format); + } + prv_diff_dst_mpd.reset(new memory::primitive_desc( + {bottom_tz, data_type, diff_dst_mfmt}, cpu_engine)); + + bwd_bottom_diff_.reset(new MKLDNNDiff( + usr_diff_dst_mpd, prv_diff_dst_mpd, bottom[0], this)); + bwd_bottom_diff_memory_ = bwd_bottom_diff_->create_output_memory(); + + memory::dims top_tz = bottom_tz; + shared_ptr usr_diff_src_mpd( + new memory::primitive_desc({top_tz, data_type, mfmt_nchw}, + cpu_engine)); + + // Gather diff descriptors of top difs (inputs for BW) + std::vector prv_diff_srcs_mpd; + boost::shared_ptr mpd_ptr; + for (int i = 0; i < top.size(); ++i) { + // If diff is in private layout then copy descriptor from it + memory::format diff_src_mfmt = mfmt_nchw; + bool top_diff_is_prv = top[i]->prv_diff() != NULL; + if (top_diff_is_prv) { + shared_ptr > mem_descr + = get_mkldnn_prv_descriptor(top[i]); + diff_src_mfmt = static_cast( + mem_descr->prv_memory_pd()->desc().data.format); + } + prv_diff_srcs_mpd.push_back(memory::primitive_desc( + {top_tz, data_type, diff_src_mfmt}, cpu_engine)); + + mpd_ptr.reset(new memory::primitive_desc({top_tz, data_type, diff_src_mfmt}, + cpu_engine) ); + bwd_top_diffs_.push_back(boost::shared_ptr >()); + bwd_top_diffs_[i].reset(new MKLDNNDiff( + usr_diff_src_mpd, mpd_ptr, top[i], this)); + bwd_top_diff_primitives_.push_back(bwd_top_diffs_[i]->create_input(false)); + bwd_top_diffs_primitives_at_.push_back(*bwd_top_diff_primitives_[i]); + } + + // ---- Determining engine to use ----------------------- + std::string subengines = this->layer_param_.engine(); + if (subengines == "" || subengines == "MKLDNN") + subengines = "MKLDNN:CPU"; + splitBwd_pd_.reset(new sum::primitive_desc({bottom_tz, data_type, diff_dst_mfmt},scale, prv_diff_srcs_mpd)); + CHECK(splitBwd_pd_); + + splitBwd_.reset(new sum(*splitBwd_pd_, bwd_top_diffs_primitives_at_, *bwd_bottom_diff_memory_)); + + // Descriptors need to have Split primitive referenced as + // there may be reorders to be done for inputs(tops' diffs) + // so it match SplitBwd primitive inputs format expectations + for(int i = 0; i < top.size(); ++i) { + //bwd_top_diffs_[i]->set_mkldnn_primitive(splitBwd_); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_top_diff_primitive_transfer(bwd_top_diff_primitives_[i]); + bwd_top_diffs_[i]->set_mkldnn_primitive(bwd_top_diff_primitive_transfer); + } + + //bwd_bottom_diff_->set_mkldnn_primitive(splitBwd_); //Wrong passed primitive! (TODO: Checking!) + MKLDNNPrimitive bwd_bottom_diff_memory_transfer(bwd_bottom_diff_memory_); + bwd_bottom_diff_->set_mkldnn_primitive(bwd_bottom_diff_memory_transfer); +} + + +template +void MKLDNNSplitLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + // TODO: consider doing something +} + +template +void MKLDNNSplitLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + for (int i = 0; i < top.size(); ++i) { + top[i]->ShareData(*bottom[0]); + } +} + +template +void MKLDNNSplitLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) +{ + VLOG(1) << "MKLDNNSplitLayer::Backward_cpu: " << this->layer_param_.name(); + // If no gradient to be computed for eariler layers then we do need to do + // any computation + if (!propagate_down[0]) { + return; + } + if (splitBwd_pd_ == NULL) { + InitSplitBwd(bottom, top); + } + + for(int i = 0; i < top.size(); ++i) { + bwd_top_diffs_[i]->sync_before_read(); + } + + bwd_bottom_diff_->sync_before_write(); + + PERFORMANCE_EVENT_ID_INIT(perf_id_bw_, PERFORMANCE_MKLDNN_NAME("BW")); + PERFORMANCE_MEASUREMENT_BEGIN(); + splitBwd_.submit(); + PERFORMANCE_MEASUREMENT_END_ID(perf_id_bw_); +} + +#ifdef CPU_ONLY +STUB_GPU(MKLDNNSplitLayer); +#else +template +void MKLDNNSplitLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) {NOT_IMPLEMENTED;} +template +void MKLDNNSplitLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) + {NOT_IMPLEMENTED;} +#endif + +INSTANTIATE_CLASS(MKLDNNSplitLayer); + +} // namespace caffe + +#endif diff --git a/src/caffe/layers/multibox_loss_layer.cpp b/src/caffe/layers/multibox_loss_layer.cpp new file mode 100644 index 00000000000..c25109e3fc4 --- /dev/null +++ b/src/caffe/layers/multibox_loss_layer.cpp @@ -0,0 +1,421 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include "caffe/layers/multibox_loss_layer.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + +template +void MultiBoxLossLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + if (this->layer_param_.propagate_down_size() == 0) { + this->layer_param_.add_propagate_down(true); + this->layer_param_.add_propagate_down(true); + this->layer_param_.add_propagate_down(false); + this->layer_param_.add_propagate_down(false); + } + const MultiBoxLossParameter& multibox_loss_param = + this->layer_param_.multibox_loss_param(); + multibox_loss_param_ = this->layer_param_.multibox_loss_param(); + + num_ = bottom[0]->num(); + num_priors_ = bottom[2]->height() / 4; + // Get other parameters. + CHECK(multibox_loss_param.has_num_classes()) << "Must provide num_classes."; + num_classes_ = multibox_loss_param.num_classes(); + CHECK_GE(num_classes_, 1) << "num_classes should not be less than 1."; + share_location_ = multibox_loss_param.share_location(); + loc_classes_ = share_location_ ? 1 : num_classes_; + background_label_id_ = multibox_loss_param.background_label_id(); + use_difficult_gt_ = multibox_loss_param.use_difficult_gt(); + mining_type_ = multibox_loss_param.mining_type(); + if (multibox_loss_param.has_do_neg_mining()) { + LOG(WARNING) << "do_neg_mining is deprecated, use mining_type instead."; + do_neg_mining_ = multibox_loss_param.do_neg_mining(); + CHECK_EQ(do_neg_mining_, + mining_type_ != MultiBoxLossParameter_MiningType_NONE); + } + do_neg_mining_ = mining_type_ != MultiBoxLossParameter_MiningType_NONE; + + if (!this->layer_param_.loss_param().has_normalization() && + this->layer_param_.loss_param().has_normalize()) { + normalization_ = this->layer_param_.loss_param().normalize() ? + LossParameter_NormalizationMode_VALID : + LossParameter_NormalizationMode_BATCH_SIZE; + } else { + normalization_ = this->layer_param_.loss_param().normalization(); + } + + if (do_neg_mining_) { + CHECK(share_location_) + << "Currently only support negative mining if share_location is true."; + } + + vector loss_shape(1, 1); + // Set up localization loss layer. + loc_weight_ = multibox_loss_param.loc_weight(); + loc_loss_type_ = multibox_loss_param.loc_loss_type(); + // fake shape. + vector loc_shape(1, 1); + loc_shape.push_back(4); + loc_pred_.Reshape(loc_shape); + loc_gt_.Reshape(loc_shape); + loc_bottom_vec_.push_back(&loc_pred_); + loc_bottom_vec_.push_back(&loc_gt_); + loc_loss_.Reshape(loss_shape); + loc_top_vec_.push_back(&loc_loss_); + if (loc_loss_type_ == MultiBoxLossParameter_LocLossType_L2) { + LayerParameter layer_param; + layer_param.set_name(this->layer_param_.name() + "_l2_loc"); + layer_param.set_type("EuclideanLoss"); + layer_param.add_loss_weight(loc_weight_); + loc_loss_layer_ = LayerRegistry::CreateLayer(layer_param); + loc_loss_layer_->SetUp(loc_bottom_vec_, loc_top_vec_); + } else if (loc_loss_type_ == MultiBoxLossParameter_LocLossType_SMOOTH_L1) { + LayerParameter layer_param; + layer_param.set_name(this->layer_param_.name() + "_smooth_L1_loc"); + layer_param.set_type("SmoothL1Loss"); + layer_param.add_loss_weight(loc_weight_); + loc_loss_layer_ = LayerRegistry::CreateLayer(layer_param); + loc_loss_layer_->SetUp(loc_bottom_vec_, loc_top_vec_); + } else { + LOG(FATAL) << "Unknown localization loss type."; + } + // Set up confidence loss layer. + conf_loss_type_ = multibox_loss_param.conf_loss_type(); + conf_bottom_vec_.push_back(&conf_pred_); + conf_bottom_vec_.push_back(&conf_gt_); + conf_loss_.Reshape(loss_shape); + conf_top_vec_.push_back(&conf_loss_); + if (conf_loss_type_ == MultiBoxLossParameter_ConfLossType_SOFTMAX) { + CHECK_GE(background_label_id_, 0) + << "background_label_id should be within [0, num_classes) for Softmax."; + CHECK_LT(background_label_id_, num_classes_) + << "background_label_id should be within [0, num_classes) for Softmax."; + LayerParameter layer_param; + layer_param.set_name(this->layer_param_.name() + "_softmax_conf"); + layer_param.set_type("SoftmaxWithLoss"); + layer_param.add_loss_weight(Dtype(1.)); + layer_param.mutable_loss_param()->set_normalization( + LossParameter_NormalizationMode_NONE); + SoftmaxParameter* softmax_param = layer_param.mutable_softmax_param(); + softmax_param->set_axis(1); + // Fake reshape. + vector conf_shape(1, 1); + conf_gt_.Reshape(conf_shape); + conf_shape.push_back(num_classes_); + conf_pred_.Reshape(conf_shape); + conf_loss_layer_ = LayerRegistry::CreateLayer(layer_param); + conf_loss_layer_->SetUp(conf_bottom_vec_, conf_top_vec_); + } else if (conf_loss_type_ == MultiBoxLossParameter_ConfLossType_LOGISTIC) { + LayerParameter layer_param; + layer_param.set_name(this->layer_param_.name() + "_logistic_conf"); + layer_param.set_type("SigmoidCrossEntropyLoss"); + layer_param.add_loss_weight(Dtype(1.)); + // Fake reshape. + vector conf_shape(1, 1); + conf_shape.push_back(num_classes_); + conf_gt_.Reshape(conf_shape); + conf_pred_.Reshape(conf_shape); + conf_loss_layer_ = LayerRegistry::CreateLayer(layer_param); + conf_loss_layer_->SetUp(conf_bottom_vec_, conf_top_vec_); + } else { + LOG(FATAL) << "Unknown confidence loss type."; + } +} + +template +void MultiBoxLossLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + LossLayer::Reshape(bottom, top); + num_ = bottom[0]->num(); + num_priors_ = bottom[2]->height() / 4; + num_gt_ = bottom[3]->height(); + CHECK_EQ(bottom[0]->num(), bottom[1]->num()); + CHECK_EQ(num_priors_ * loc_classes_ * 4, bottom[0]->channels()) + << "Number of priors must match number of location predictions."; + CHECK_EQ(num_priors_ * num_classes_, bottom[1]->channels()) + << "Number of priors must match number of confidence predictions."; +} + +template +void MultiBoxLossLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* loc_data = bottom[0]->cpu_data(); + const Dtype* conf_data = bottom[1]->cpu_data(); + const Dtype* prior_data = bottom[2]->cpu_data(); + const Dtype* gt_data = bottom[3]->cpu_data(); + + // Retrieve all ground truth. + map > all_gt_bboxes; + GetGroundTruth(gt_data, num_gt_, background_label_id_, use_difficult_gt_, + &all_gt_bboxes); + + // Retrieve all prior bboxes. It is same within a batch since we assume all + // images in a batch are of same dimension. + vector prior_bboxes; + vector > prior_variances; + GetPriorBBoxes(prior_data, num_priors_, &prior_bboxes, &prior_variances); + + // Retrieve all predictions. + vector all_loc_preds; + GetLocPredictions(loc_data, num_, num_priors_, loc_classes_, share_location_, + &all_loc_preds); + + // Find matches between source bboxes and ground truth bboxes. + vector > > all_match_overlaps; + FindMatches(all_loc_preds, all_gt_bboxes, prior_bboxes, prior_variances, + multibox_loss_param_, &all_match_overlaps, &all_match_indices_); + + num_matches_ = 0; + int num_negs = 0; + // Sample hard negative (and positive) examples based on mining type. + MineHardExamples(*bottom[1], all_loc_preds, all_gt_bboxes, prior_bboxes, + prior_variances, all_match_overlaps, multibox_loss_param_, + &num_matches_, &num_negs, &all_match_indices_, + &all_neg_indices_); + + if (num_matches_ >= 1) { + // Form data to pass on to loc_loss_layer_. + vector loc_shape(2); + loc_shape[0] = 1; + loc_shape[1] = num_matches_ * 4; + loc_pred_.Reshape(loc_shape); + loc_gt_.Reshape(loc_shape); + Dtype* loc_pred_data = loc_pred_.mutable_cpu_data(); + Dtype* loc_gt_data = loc_gt_.mutable_cpu_data(); + EncodeLocPrediction(all_loc_preds, all_gt_bboxes, all_match_indices_, + prior_bboxes, prior_variances, multibox_loss_param_, + loc_pred_data, loc_gt_data); + {PERFORMANCE_MEASUREMENT_BEGIN(); + loc_loss_layer_->Reshape(loc_bottom_vec_, loc_top_vec_); + loc_loss_layer_->Forward(loc_bottom_vec_, loc_top_vec_); + PERFORMANCE_MEASUREMENT_END_STATIC("FW_Smooth_L1");} + } else { + loc_loss_.mutable_cpu_data()[0] = 0; + } + + // Form data to pass on to conf_loss_layer_. + if (do_neg_mining_) { + num_conf_ = num_matches_ + num_negs; + } else { + num_conf_ = num_ * num_priors_; + } + if (num_conf_ >= 1) { + // Reshape the confidence data. + vector conf_shape; + if (conf_loss_type_ == MultiBoxLossParameter_ConfLossType_SOFTMAX) { + conf_shape.push_back(num_conf_); + conf_gt_.Reshape(conf_shape); + conf_shape.push_back(num_classes_); + conf_pred_.Reshape(conf_shape); + } else if (conf_loss_type_ == MultiBoxLossParameter_ConfLossType_LOGISTIC) { + conf_shape.push_back(1); + conf_shape.push_back(num_conf_); + conf_shape.push_back(num_classes_); + conf_gt_.Reshape(conf_shape); + conf_pred_.Reshape(conf_shape); + } else { + LOG(FATAL) << "Unknown confidence loss type."; + } + if (!do_neg_mining_) { + // Consider all scores. + // Share data and diff with bottom[1]. + CHECK_EQ(conf_pred_.count(), bottom[1]->count()); + conf_pred_.ShareData(*(bottom[1])); + } + Dtype* conf_pred_data = conf_pred_.mutable_cpu_data(); + Dtype* conf_gt_data = conf_gt_.mutable_cpu_data(); + caffe_set(conf_gt_.count(), Dtype(background_label_id_), conf_gt_data); + EncodeConfPrediction(conf_data, num_, num_priors_, multibox_loss_param_, + all_match_indices_, all_neg_indices_, all_gt_bboxes, + conf_pred_data, conf_gt_data); + {PERFORMANCE_MEASUREMENT_BEGIN(); + conf_loss_layer_->Reshape(conf_bottom_vec_, conf_top_vec_); + conf_loss_layer_->Forward(conf_bottom_vec_, conf_top_vec_); + PERFORMANCE_MEASUREMENT_END_STATIC("FW_Softmax");} + } else { + conf_loss_.mutable_cpu_data()[0] = 0; + } + + top[0]->mutable_cpu_data()[0] = 0; + if (this->layer_param_.propagate_down(0)) { + Dtype normalizer = LossLayer::GetNormalizer( + normalization_, num_, num_priors_, num_matches_); + top[0]->mutable_cpu_data()[0] += + loc_weight_ * loc_loss_.cpu_data()[0] / normalizer; + } + if (this->layer_param_.propagate_down(1)) { + Dtype normalizer = LossLayer::GetNormalizer( + normalization_, num_, num_priors_, num_matches_); + top[0]->mutable_cpu_data()[0] += conf_loss_.cpu_data()[0] / normalizer; + } +} + +template +void MultiBoxLossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, + const vector*>& bottom) { + + if (propagate_down[2]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to prior inputs."; + } + if (propagate_down[3]) { + LOG(FATAL) << this->type() + << " Layer cannot backpropagate to label inputs."; + } + + // Back propagate on location prediction. + if (propagate_down[0]) { + Dtype* loc_bottom_diff = bottom[0]->mutable_cpu_diff(); + caffe_set(bottom[0]->count(), Dtype(0), loc_bottom_diff); + if (num_matches_ >= 1) { + vector loc_propagate_down; + // Only back propagate on prediction, not ground truth. + loc_propagate_down.push_back(true); + loc_propagate_down.push_back(false); + {PERFORMANCE_MEASUREMENT_BEGIN(); + loc_loss_layer_->Backward(loc_top_vec_, loc_propagate_down, + loc_bottom_vec_); + PERFORMANCE_MEASUREMENT_END_STATIC("BW_Smooth_L1");} + // Scale gradient. + Dtype normalizer = LossLayer::GetNormalizer( + normalization_, num_, num_priors_, num_matches_); + Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer; + caffe_scal(loc_pred_.count(), loss_weight, loc_pred_.mutable_cpu_diff()); + // Copy gradient back to bottom[0]. + const Dtype* loc_pred_diff = loc_pred_.cpu_diff(); + int count = 0; + for (int i = 0; i < num_; ++i) { + for (map >::iterator it = + all_match_indices_[i].begin(); + it != all_match_indices_[i].end(); ++it) { + const int label = share_location_ ? 0 : it->first; + const vector& match_index = it->second; + for (int j = 0; j < match_index.size(); ++j) { + if (match_index[j] <= -1) { + continue; + } + // Copy the diff to the right place. + int start_idx = loc_classes_ * 4 * j + label * 4; + caffe_copy(4, loc_pred_diff + count * 4, + loc_bottom_diff + start_idx); + ++count; + } + } + loc_bottom_diff += bottom[0]->offset(1); + } + } + } + + // Back propagate on confidence prediction. + if (propagate_down[1]) { + Dtype* conf_bottom_diff = bottom[1]->mutable_cpu_diff(); + caffe_set(bottom[1]->count(), Dtype(0), conf_bottom_diff); + if (num_conf_ >= 1) { + vector conf_propagate_down; + // Only back propagate on prediction, not ground truth. + conf_propagate_down.push_back(true); + conf_propagate_down.push_back(false); + {PERFORMANCE_MEASUREMENT_BEGIN(); + conf_loss_layer_->Backward(conf_top_vec_, conf_propagate_down, + conf_bottom_vec_); + PERFORMANCE_MEASUREMENT_END_STATIC("BW_Softmax");} + // Scale gradient. + Dtype normalizer = LossLayer::GetNormalizer( + normalization_, num_, num_priors_, num_matches_); + Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer; + caffe_scal(conf_pred_.count(), loss_weight, + conf_pred_.mutable_cpu_diff()); + // Copy gradient back to bottom[1]. + const Dtype* conf_pred_diff = conf_pred_.cpu_diff(); + if (do_neg_mining_) { + int count = 0; + for (int i = 0; i < num_; ++i) { + // Copy matched (positive) bboxes scores' diff. + const map >& match_indices = all_match_indices_[i]; + for (map >::const_iterator it = + match_indices.begin(); it != match_indices.end(); ++it) { + const vector& match_index = it->second; + CHECK_EQ(match_index.size(), num_priors_); + for (int j = 0; j < num_priors_; ++j) { + if (match_index[j] <= -1) { + continue; + } + // Copy the diff to the right place. + caffe_copy(num_classes_, + conf_pred_diff + count * num_classes_, + conf_bottom_diff + j * num_classes_); + ++count; + } + } + // Copy negative bboxes scores' diff. + for (int n = 0; n < all_neg_indices_[i].size(); ++n) { + int j = all_neg_indices_[i][n]; + CHECK_LT(j, num_priors_); + caffe_copy(num_classes_, + conf_pred_diff + count * num_classes_, + conf_bottom_diff + j * num_classes_); + ++count; + } + conf_bottom_diff += bottom[1]->offset(1); + } + } else { + // The diff is already computed and stored. + bottom[1]->ShareDiff(conf_pred_); + } + } + } + + // After backward, remove match statistics. + all_match_indices_.clear(); + all_neg_indices_.clear(); +} + +INSTANTIATE_CLASS(MultiBoxLossLayer); +REGISTER_LAYER_CLASS(MultiBoxLoss); + +} // namespace caffe diff --git a/src/caffe/layers/multinomial_logistic_loss_layer.cpp b/src/caffe/layers/multinomial_logistic_loss_layer.cpp index 65664998d2c..87be37910cb 100644 --- a/src/caffe/layers/multinomial_logistic_loss_layer.cpp +++ b/src/caffe/layers/multinomial_logistic_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/layers/mvn_layer.cpp b/src/caffe/layers/mvn_layer.cpp index 8fe4ef8c0a8..09542b105fa 100644 --- a/src/caffe/layers/mvn_layer.cpp +++ b/src/caffe/layers/mvn_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/mvn_layer.hpp" diff --git a/src/caffe/layers/neuron_layer.cpp b/src/caffe/layers/neuron_layer.cpp index d7b5f389310..a005c0b2672 100644 --- a/src/caffe/layers/neuron_layer.cpp +++ b/src/caffe/layers/neuron_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/neuron_layer.hpp" diff --git a/src/caffe/layers/normalize_layer.cpp b/src/caffe/layers/normalize_layer.cpp new file mode 100644 index 00000000000..33d776304a2 --- /dev/null +++ b/src/caffe/layers/normalize_layer.cpp @@ -0,0 +1,271 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include "caffe/filler.hpp" +#include "caffe/layers/normalize_layer.hpp" + +namespace caffe { + +template +void NormalizeLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + CHECK_GE(bottom[0]->num_axes(), 2) + << "Number of axes of bottom blob must be >=2."; + buffer_.Reshape(1, bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + buffer_channel_.Reshape(1, bottom[0]->channels(), 1, 1); + buffer_spatial_.Reshape(1, 1, bottom[0]->height(), bottom[0]->width()); + NormalizeParameter norm_param = this->layer_param().norm_param(); + across_spatial_ = norm_param.across_spatial(); + if (across_spatial_) { + norm_.Reshape(bottom[0]->num(), 1, 1, 1); + } else { + norm_.Reshape(bottom[0]->num(), 1, bottom[0]->height(), bottom[0]->width()); + } + eps_ = norm_param.eps(); + int channels = bottom[0]->channels(); + int spatial_dim = bottom[0]->width() * bottom[0]->height(); + sum_channel_multiplier_.Reshape(1, channels, 1, 1); + caffe_set(channels, Dtype(1), sum_channel_multiplier_.mutable_cpu_data()); + sum_spatial_multiplier_.Reshape( + 1, 1, bottom[0]->height(), bottom[0]->width()); + caffe_set(spatial_dim, Dtype(1), sum_spatial_multiplier_.mutable_cpu_data()); + channel_shared_ = norm_param.channel_shared(); + if (this->blobs_.size() > 0) { + LOG(INFO) << "Skipping parameter initialization"; + } else { + this->blobs_.resize(1); + if (channel_shared_) { + this->blobs_[0].reset(new Blob(vector(0))); + } else { + this->blobs_[0].reset(new Blob(vector(1, channels))); + } + shared_ptr > scale_filler; + if (norm_param.has_scale_filler()) { + scale_filler.reset(GetFiller(norm_param.scale_filler())); + } else { + FillerParameter filler_param; + filler_param.set_type("constant"); + filler_param.set_value(1.0); + scale_filler.reset(GetFiller(filler_param)); + } + scale_filler->Fill(this->blobs_[0].get()); + } + if (channel_shared_) { + CHECK_EQ(this->blobs_[0]->count(), 1) + << "Scale size is inconsistent with prototxt config"; + } else { + CHECK_EQ(this->blobs_[0]->count(), channels) + << "Scale size is inconsistent with prototxt config"; + } + this->param_propagate_down_.resize(this->blobs_.size(), true); +} + +template +void NormalizeLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + CHECK_GE(bottom[0]->num_axes(), 2) + << "Number of axes of bottom blob must be >=2."; + top[0]->ReshapeLike(*bottom[0]); + buffer_.Reshape(1, bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + if (!across_spatial_) { + norm_.Reshape(bottom[0]->num(), 1, bottom[0]->height(), bottom[0]->width()); + } + int spatial_dim = bottom[0]->height() * bottom[0]->width(); + if (spatial_dim != sum_spatial_multiplier_.count()) { + sum_spatial_multiplier_.Reshape( + 1, 1, bottom[0]->height(), bottom[0]->width()); + caffe_set(spatial_dim, Dtype(1), + sum_spatial_multiplier_.mutable_cpu_data()); + buffer_spatial_.Reshape(1, 1, bottom[0]->height(), bottom[0]->width()); + } +} + +template +void NormalizeLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const Dtype* scale = this->blobs_[0]->cpu_data(); + Dtype* buffer_data = buffer_.mutable_cpu_data(); + Dtype* norm_data = norm_.mutable_cpu_data(); + // add eps to avoid overflow + caffe_set(norm_.count(), Dtype(eps_), norm_data); + const Dtype* sum_channel_multiplier = sum_channel_multiplier_.cpu_data(); + const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.cpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / num; + int spatial_dim = bottom[0]->height() * bottom[0]->width(); + int channels = bottom[0]->channels(); + for (int n = 0; n < num; ++n) { + caffe_sqr(dim, bottom_data, buffer_data); + if (across_spatial_) { + // add eps to avoid overflow + norm_data[n] = pow(caffe_cpu_asum(dim, buffer_data)+eps_, + Dtype(0.5)); + caffe_cpu_scale(dim, Dtype(1.0 / norm_data[n]), bottom_data, + top_data); + } else { + caffe_cpu_gemv(CblasTrans, channels, spatial_dim, Dtype(1), + buffer_data, sum_channel_multiplier, Dtype(1), + norm_data); + // compute norm + caffe_powx(spatial_dim, norm_data, Dtype(0.5), norm_data); + // scale the layer + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, + 1, Dtype(1), sum_channel_multiplier, norm_data, + Dtype(0), buffer_data); + caffe_div(dim, bottom_data, buffer_data, top_data); + norm_data += spatial_dim; + } + // scale the output + if (channel_shared_) { + caffe_scal(dim, scale[0], top_data); + } else { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, + 1, Dtype(1), scale, sum_spatial_multiplier, + Dtype(0), + buffer_data); + caffe_mul(dim, top_data, buffer_data, top_data); + } + bottom_data += dim; + top_data += dim; + } +} + +template +void NormalizeLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* top_diff = top[0]->cpu_diff(); + const Dtype* top_data = top[0]->cpu_data(); + const Dtype* bottom_data = bottom[0]->cpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* scale = this->blobs_[0]->cpu_data(); + const Dtype* norm_data = norm_.cpu_data(); + Dtype* buffer_data = buffer_.mutable_cpu_data(); + Dtype* buffer_channel = buffer_channel_.mutable_cpu_data(); + Dtype* buffer_spatial = buffer_spatial_.mutable_cpu_data(); + const Dtype* sum_channel_multiplier = sum_channel_multiplier_.cpu_data(); + const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.cpu_data(); + int count = top[0]->count(); + int num = top[0]->num(); + int dim = count / num; + int spatial_dim = top[0]->height() * top[0]->width(); + int channels = top[0]->channels(); + + // Propagate to param + if (this->param_propagate_down_[0]) { + Dtype* scale_diff = this->blobs_[0]->mutable_cpu_diff(); + if (channel_shared_) { + scale_diff[0] += + caffe_cpu_dot(count, top_data, top_diff) / scale[0]; + } else { + for (int n = 0; n < num; ++n) { + caffe_mul(dim, top_data+n*dim, top_diff+n*dim, buffer_data); + caffe_cpu_gemv(CblasNoTrans, channels, spatial_dim, Dtype(1), + buffer_data, sum_spatial_multiplier, Dtype(0), + buffer_channel); + // store a / scale[i] in buffer_data temporary + caffe_div(channels, buffer_channel, scale, buffer_channel); + caffe_add(channels, buffer_channel, scale_diff, scale_diff); + } + } + } + + // Propagate to bottom + if (propagate_down[0]) { + for (int n = 0; n < num; ++n) { + if (across_spatial_) { + Dtype a = caffe_cpu_dot(dim, bottom_data, top_diff); + caffe_cpu_scale(dim, a / norm_data[n] / norm_data[n], + bottom_data, bottom_diff); + caffe_sub(dim, top_diff, bottom_diff, bottom_diff); + caffe_scal(dim, Dtype(1.0 / norm_data[n]), bottom_diff); + } else { + // dot product between bottom_data and top_diff + caffe_mul(dim, bottom_data, top_diff, buffer_data); + caffe_cpu_gemv(CblasTrans, channels, spatial_dim, Dtype(1), + buffer_data, sum_channel_multiplier, Dtype(0), + buffer_spatial); + // scale bottom_diff + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, + 1, Dtype(1), sum_channel_multiplier, + buffer_spatial, Dtype(0), buffer_data); + caffe_mul(dim, bottom_data, buffer_data, bottom_diff); + // divide by square of norm + caffe_powx(spatial_dim, norm_data, Dtype(2), buffer_spatial); + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, + 1, Dtype(1), sum_channel_multiplier, + buffer_spatial, Dtype(0), buffer_data); + caffe_div(dim, bottom_diff, buffer_data, bottom_diff); + // subtract + caffe_sub(dim, top_diff, bottom_diff, bottom_diff); + // divide by norm + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, + 1, Dtype(1), sum_channel_multiplier, norm_data, + Dtype(0), buffer_data); + caffe_div(dim, bottom_diff, buffer_data, bottom_diff); + norm_data += spatial_dim; + } + // scale the diff + if (channel_shared_) { + caffe_scal(dim, scale[0], bottom_diff); + } else { + caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, spatial_dim, + 1, Dtype(1), scale, sum_spatial_multiplier, + Dtype(0), buffer_data); + caffe_mul(dim, bottom_diff, buffer_data, bottom_diff); + } + bottom_data += dim; + top_diff += dim; + bottom_diff += dim; + } + } +} + + +#ifdef CPU_ONLY +STUB_GPU(NormalizeLayer); +#endif + +INSTANTIATE_CLASS(NormalizeLayer); +REGISTER_LAYER_CLASS(Normalize); + +} // namespace caffe diff --git a/src/caffe/layers/normalize_layer.cu b/src/caffe/layers/normalize_layer.cu new file mode 100644 index 00000000000..659f3eb4faa --- /dev/null +++ b/src/caffe/layers/normalize_layer.cu @@ -0,0 +1,220 @@ +#include +#include +#include + +#include "thrust/device_vector.h" + +#include "caffe/filler.hpp" +#include "caffe/layers/normalize_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +// divid a matrix with vector +template +__global__ void DivBsx(const int nthreads, const Dtype* A, + const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, + Dtype* B) { + CUDA_KERNEL_LOOP(index, nthreads) { + int c = index % cols; + int r = (index / cols) % rows; + if (trans == CblasNoTrans) { + B[index] = A[index] / v[c]; + } else { + B[index] = A[index] / v[r]; + } + } +} + +template +__global__ void MulBsx(const int nthreads, const Dtype* A, + const Dtype* v, const int rows, const int cols, const CBLAS_TRANSPOSE trans, + Dtype* B) { + CUDA_KERNEL_LOOP(index, nthreads) { + int c = index % cols; + int r = (index / cols) % rows; + if (trans == CblasNoTrans) { + B[index] = A[index] * v[c]; + } else { + B[index] = A[index] * v[r]; + } + } +} + +template +void NormalizeLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + const Dtype* bottom_data = bottom[0]->gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + Dtype* buffer_data = buffer_.mutable_gpu_data(); + Dtype* norm_data; + if (across_spatial_) { + // need to index it + norm_data = norm_.mutable_cpu_data(); + } else { + norm_data = norm_.mutable_gpu_data(); + // add eps to avoid overflow + caffe_gpu_set(norm_.count(), Dtype(eps_), norm_data); + } + const Dtype* scale; + if (channel_shared_) { + scale = this->blobs_[0]->cpu_data(); + } else { + scale = this->blobs_[0]->gpu_data(); + } + const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); + int num = bottom[0]->num(); + int dim = bottom[0]->count() / num; + int spatial_dim = bottom[0]->height() * bottom[0]->width(); + int channels = bottom[0]->channels(); + for (int n = 0; n < num; ++n) { + caffe_gpu_powx(dim, bottom_data, Dtype(2), buffer_data); + if (across_spatial_) { + Dtype normsqr; + caffe_gpu_asum(dim, buffer_data, &normsqr); + // add eps to avoid overflow + norm_data[n] = pow(normsqr+eps_, Dtype(0.5)); + caffe_gpu_scale(dim, Dtype(1.0 / norm_data[n]), bottom_data, + top_data); + } else { + // compute norm + caffe_gpu_gemv(CblasTrans, channels, spatial_dim, Dtype(1), + buffer_data, sum_channel_multiplier, Dtype(1), + norm_data); + caffe_gpu_powx(spatial_dim, norm_data, Dtype(0.5), norm_data); + // scale the layer + // NOLINT_NEXT_LINE(whitespace/operators) + DivBsx <<>>( + dim, bottom_data, norm_data, channels, spatial_dim, CblasNoTrans, + top_data); + CUDA_POST_KERNEL_CHECK; + norm_data += spatial_dim; + } + // scale the output + if (channel_shared_) { + caffe_gpu_scal(dim, scale[0], top_data); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + MulBsx <<>>( + dim, top_data, scale, channels, spatial_dim, CblasTrans, + top_data); + CUDA_POST_KERNEL_CHECK; + } + bottom_data += dim; + top_data += dim; + } +} + +template +void NormalizeLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + const Dtype* top_diff = top[0]->gpu_diff(); + const Dtype* top_data = top[0]->gpu_data(); + const Dtype* bottom_data = bottom[0]->mutable_gpu_data(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const Dtype* norm_data; + if (across_spatial_) { + // need to index it + norm_data = norm_.cpu_data(); + } else { + norm_data = norm_.gpu_data(); + } + const Dtype* scale; + if (channel_shared_) { + scale = this->blobs_[0]->cpu_data(); + } else { + scale = this->blobs_[0]->gpu_data(); + } + Dtype* buffer_data = buffer_.mutable_gpu_data(); + Dtype* buffer_channel = buffer_channel_.mutable_gpu_data(); + Dtype* buffer_spatial = buffer_spatial_.mutable_gpu_data(); + const Dtype* sum_channel_multiplier = sum_channel_multiplier_.gpu_data(); + const Dtype* sum_spatial_multiplier = sum_spatial_multiplier_.gpu_data(); + int count = top[0]->count(); + int num = top[0]->num(); + int dim = count / num; + int spatial_dim = top[0]->height() * top[0]->width(); + int channels = top[0]->channels(); + + // Propagate to param + if (this->param_propagate_down_[0]) { + if (channel_shared_) { + Dtype* scale_diff = this->blobs_[0]->mutable_cpu_diff(); + Dtype a; + caffe_gpu_dot(count, top_data, top_diff, &a); + scale_diff[0] += a / scale[0]; + } else { + Dtype* scale_diff = this->blobs_[0]->mutable_gpu_diff(); + for (int n = 0; n < num; ++n) { + // compute a + caffe_gpu_mul(dim, top_data+n*dim, top_diff+n*dim, buffer_data); + caffe_gpu_gemv(CblasNoTrans, channels, spatial_dim, Dtype(1), + buffer_data, sum_spatial_multiplier, Dtype(0), + buffer_channel); + // store a / scale[i] in buffer_data temporary + caffe_gpu_div(channels, buffer_channel, scale, buffer_channel); + caffe_gpu_add(channels, buffer_channel, scale_diff, scale_diff); + } + } + } + + // Propagate to bottom + if (propagate_down[0]) { + for (int n = 0; n < num; ++n) { + if (across_spatial_) { + Dtype a; + caffe_gpu_dot(dim, bottom_data, top_diff, &a); + caffe_gpu_scale(dim, a / norm_data[n] / norm_data[n], + bottom_data, bottom_diff); + caffe_gpu_sub(dim, top_diff, bottom_diff, bottom_diff); + caffe_gpu_scale(dim, Dtype(1.0 / norm_data[n]), bottom_diff, + bottom_diff); + } else { + // dot product between bottom_data and top_diff + caffe_gpu_mul(dim, bottom_data, top_diff, buffer_data); + caffe_gpu_gemv(CblasTrans, channels, spatial_dim, Dtype(1), + buffer_data, sum_channel_multiplier, Dtype(0), + buffer_spatial); + // scale botom_diff + // NOLINT_NEXT_LINE(whitespace/operators) + MulBsx <<>>( + dim, bottom_data, buffer_spatial, channels, spatial_dim, + CblasNoTrans, bottom_diff); + CUDA_POST_KERNEL_CHECK; + // divide by square of norm + caffe_gpu_powx(spatial_dim, norm_data, Dtype(2), buffer_spatial); + // NOLINT_NEXT_LINE(whitespace/operators) + DivBsx <<>>( + dim, bottom_diff, buffer_spatial, channels, spatial_dim, + CblasNoTrans, bottom_diff); + CUDA_POST_KERNEL_CHECK; + caffe_gpu_sub(dim, top_diff, bottom_diff, bottom_diff); + // divide by norm + // NOLINT_NEXT_LINE(whitespace/operators) + DivBsx <<>>( + dim, bottom_diff, norm_data, channels, spatial_dim, CblasNoTrans, + bottom_diff); + CUDA_POST_KERNEL_CHECK; + norm_data += spatial_dim; + } + // scale the diff + if (channel_shared_) { + caffe_gpu_scal(dim, scale[0], bottom_diff); + } else { + // NOLINT_NEXT_LINE(whitespace/operators) + MulBsx <<>>( + dim, bottom_diff, scale, channels, spatial_dim, CblasTrans, + bottom_diff); + CUDA_POST_KERNEL_CHECK; + } + bottom_data += dim; + top_diff += dim; + bottom_diff += dim; + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(NormalizeLayer); + + +} // namespace caffe diff --git a/src/caffe/layers/parameter_layer.cpp b/src/caffe/layers/parameter_layer.cpp index fbd326f8469..2397a22a826 100644 --- a/src/caffe/layers/parameter_layer.cpp +++ b/src/caffe/layers/parameter_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include "caffe/layers/parameter_layer.hpp" namespace caffe { diff --git a/src/caffe/layers/permute_layer.cpp b/src/caffe/layers/permute_layer.cpp new file mode 100644 index 00000000000..0dc793351d6 --- /dev/null +++ b/src/caffe/layers/permute_layer.cpp @@ -0,0 +1,180 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include "caffe/layers/permute_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void Permute(const int count, Dtype* bottom_data, const bool forward, + const int* permute_order, const int* old_steps, const int* new_steps, + const int num_axes, Dtype* top_data) { +#pragma omp parallel for + for (int i = 0; i < count; ++i) { + int old_idx = 0; + int idx = i; + for (int j = 0; j < num_axes; ++j) { + int order = permute_order[j]; + old_idx += (idx / new_steps[j]) * old_steps[order]; + idx %= new_steps[j]; + } + if (forward) { + top_data[i] = bottom_data[old_idx]; + } else { + bottom_data[old_idx] = top_data[i]; + } + } +} + +template +void PermuteLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + PermuteParameter permute_param = this->layer_param_.permute_param(); + CHECK_EQ(bottom.size(), 1); + num_axes_ = bottom[0]->num_axes(); + vector orders; + // Push the specified new orders. + for (int i = 0; i < permute_param.order_size(); ++i) { + int order = permute_param.order(i); + CHECK_LT(order, num_axes_) + << "order should be less than the input dimension."; + if (std::find(orders.begin(), orders.end(), order) != orders.end()) { + LOG(FATAL) << "there are duplicate orders"; + } + orders.push_back(order); + } + // Push the rest orders. And save original step sizes for each axis. + for (int i = 0; i < num_axes_; ++i) { + if (std::find(orders.begin(), orders.end(), i) == orders.end()) { + orders.push_back(i); + } + } + CHECK_EQ(num_axes_, orders.size()); + // Check if we need to reorder the data or keep it. + need_permute_ = false; + for (int i = 0; i < num_axes_; ++i) { + if (orders[i] != i) { + // As long as there is one order which is different from the natural order + // of the data, we need to permute. Otherwise, we share the data and diff. + need_permute_ = true; + break; + } + } + + vector top_shape(num_axes_, 1); + permute_order_.Reshape(num_axes_, 1, 1, 1); + old_steps_.Reshape(num_axes_, 1, 1, 1); + new_steps_.Reshape(num_axes_, 1, 1, 1); + for (int i = 0; i < num_axes_; ++i) { + permute_order_.mutable_cpu_data()[i] = orders[i]; + top_shape[i] = bottom[0]->shape(orders[i]); + } + top[0]->Reshape(top_shape); +} + +template +void PermuteLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + vector top_shape; + for (int i = 0; i < num_axes_; ++i) { + if (i == num_axes_ - 1) { + old_steps_.mutable_cpu_data()[i] = 1; + } else { + old_steps_.mutable_cpu_data()[i] = bottom[0]->count(i + 1); + } + top_shape.push_back(bottom[0]->shape(permute_order_.cpu_data()[i])); + } + top[0]->Reshape(top_shape); + + for (int i = 0; i < num_axes_; ++i) { + if (i == num_axes_ - 1) { + new_steps_.mutable_cpu_data()[i] = 1; + } else { + new_steps_.mutable_cpu_data()[i] = top[0]->count(i + 1); + } + } +} + +template +void PermuteLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + if (need_permute_) { + Dtype* bottom_data = bottom[0]->mutable_cpu_data(); + Dtype* top_data = top[0]->mutable_cpu_data(); + const int top_count = top[0]->count(); + const int* permute_order = permute_order_.cpu_data(); + const int* old_steps = old_steps_.cpu_data(); + const int* new_steps = new_steps_.cpu_data(); + bool forward = true; + Permute(top_count, bottom_data, forward, permute_order, old_steps, + new_steps, num_axes_, top_data); + } else { + // If there is no need to permute, we share data to save memory. + top[0]->ShareData(*bottom[0]); + } +} + +template +void PermuteLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (need_permute_) { + Dtype* top_diff = top[0]->mutable_cpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const int top_count = top[0]->count(); + const int* permute_order = permute_order_.cpu_data(); + const int* old_steps = old_steps_.cpu_data(); + const int* new_steps = new_steps_.cpu_data(); + bool forward = false; + Permute(top_count, bottom_diff, forward, permute_order, old_steps, + new_steps, num_axes_, top_diff); + } else { + // If there is no need to permute, we share diff to save memory. + bottom[0]->ShareDiff(*top[0]); + } +} + +#ifdef CPU_ONLY +STUB_GPU(PermuteLayer); +#endif + +INSTANTIATE_CLASS(PermuteLayer); +REGISTER_LAYER_CLASS(Permute); + +} // namespace caffe diff --git a/src/caffe/layers/permute_layer.cu b/src/caffe/layers/permute_layer.cu new file mode 100644 index 00000000000..82b1edbf68c --- /dev/null +++ b/src/caffe/layers/permute_layer.cu @@ -0,0 +1,78 @@ +#include +#include +#include + +#include "caffe/layers/permute_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +__global__ void PermuteKernel(const int nthreads, + Dtype* const bottom_data, const bool forward, const int* permute_order, + const int* old_steps, const int* new_steps, const int num_axes, + Dtype* const top_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int temp_idx = index; + int old_idx = 0; + for (int i = 0; i < num_axes; ++i) { + int order = permute_order[i]; + old_idx += (temp_idx / new_steps[i]) * old_steps[order]; + temp_idx %= new_steps[i]; + } + if (forward) { + top_data[index] = bottom_data[old_idx]; + } else { + bottom_data[old_idx] = top_data[index]; + } + } +} + +template +void PermuteLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + if (need_permute_) { + Dtype* bottom_data = bottom[0]->mutable_gpu_data(); + Dtype* top_data = top[0]->mutable_gpu_data(); + int count = top[0]->count(); + const int* permute_order = permute_order_.gpu_data(); + const int* new_steps = new_steps_.gpu_data(); + const int* old_steps = old_steps_.gpu_data(); + bool foward = true; + // NOLINT_NEXT_LINE(whitespace/operators) + PermuteKernel<<>>( + count, bottom_data, foward, permute_order, old_steps, new_steps, + num_axes_, top_data); + CUDA_POST_KERNEL_CHECK; + } else { + // If there is no need to permute, we share data to save memory. + top[0]->ShareData(*bottom[0]); + } +} + + +template +void PermuteLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (need_permute_) { + Dtype* top_diff = top[0]->mutable_gpu_diff(); + Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); + const int count = bottom[0]->count(); + const int* permute_order = permute_order_.gpu_data(); + const int* new_steps = new_steps_.gpu_data(); + const int* old_steps = old_steps_.gpu_data(); + bool foward = false; + // NOLINT_NEXT_LINE(whitespace/operators) + PermuteKernel<<>>( + count, bottom_diff, foward, permute_order, old_steps, new_steps, + num_axes_, top_diff); + CUDA_POST_KERNEL_CHECK; + } else { + // If there is no need to permute, we share diff to save memory. + bottom[0]->ShareDiff(*top[0]); + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(PermuteLayer); + +} // namespace caffe diff --git a/src/caffe/layers/pooling_layer.cpp b/src/caffe/layers/pooling_layer.cpp index 90897db0f45..1f4f342adca 100644 --- a/src/caffe/layers/pooling_layer.cpp +++ b/src/caffe/layers/pooling_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include @@ -7,9 +44,6 @@ namespace caffe { -using std::min; -using std::max; - template void PoolingLayer::LayerSetUp(const vector*>& bottom, const vector*>& top) { @@ -132,98 +166,36 @@ void PoolingLayer::Forward_cpu(const vector*>& bottom, const int top_count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; - int* mask = NULL; // suppress warnings about uninitalized variables - Dtype* top_mask = NULL; - // Different pooling methods. We explicitly do the switch outside the for - // loop to save time, although this results in more code. - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - // Initialize - if (use_top_mask) { - top_mask = top[1]->mutable_cpu_data(); - caffe_set(top_count, Dtype(-1), top_mask); - } else { - mask = max_idx_.mutable_cpu_data(); - caffe_set(top_count, -1, mask); - } - caffe_set(top_count, Dtype(-FLT_MAX), top_data); - // The main loop - for (int n = 0; n < bottom[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_); - int wend = min(wstart + kernel_w_, width_); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - const int pool_index = ph * pooled_width_ + pw; - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - const int index = h * width_ + w; - if (bottom_data[index] > top_data[pool_index]) { - top_data[pool_index] = bottom_data[index]; - if (use_top_mask) { - top_mask[pool_index] = static_cast(index); - } else { - mask[pool_index] = index; - } - } - } - } - } - } - // compute offset - bottom_data += bottom[0]->offset(0, 1); - top_data += top[0]->offset(0, 1); - if (use_top_mask) { - top_mask += top[0]->offset(0, 1); - } else { - mask += top[0]->offset(0, 1); - } - } - } - break; - case PoolingParameter_PoolMethod_AVE: - for (int i = 0; i < top_count; ++i) { - top_data[i] = 0; - } - // The main loop - for (int n = 0; n < bottom[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_ + pad_h_); - int wend = min(wstart + kernel_w_, width_ + pad_w_); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height_); - wend = min(wend, width_); - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - top_data[ph * pooled_width_ + pw] += - bottom_data[h * width_ + w]; - } - } - top_data[ph * pooled_width_ + pw] /= pool_size; - } - } - // compute offset - bottom_data += bottom[0]->offset(0, 1); - top_data += top[0]->offset(0, 1); - } - } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - NOT_IMPLEMENTED; - break; - default: - LOG(FATAL) << "Unknown pooling method."; + + typename PoolingCodeGeneratorForward::Callback_t* generator_func = + Forward_code_generator.Get_callback(this, top[0], use_top_mask); + // We are getting top_mask here as mutable_cpu_data is not thread safe + // and doing it inside parallel region creates of risk of race condition + void* mask = NULL; + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX ) { + mask = (use_top_mask) ? static_cast(top[1]->mutable_cpu_data()) : + static_cast(max_idx_.mutable_cpu_data()); } + + const int batch_size = bottom[0]->num(); + const int num_channels = bottom[0]->channels(); + +#ifdef _OPENMP + #pragma omp parallel for collapse(2) +#endif + for (int image = 0; image < batch_size; ++image) + for (int channel = 0; channel < num_channels; ++channel) + generator_func(bottom_data, + top_data, + top_count, + image, + image+1, + mask, + channel, + channel+1, + this, + use_top_mask); } template @@ -234,76 +206,39 @@ void PoolingLayer::Backward_cpu(const vector*>& top, } const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - // Different pooling methods. We explicitly do the switch outside the for - // loop to save time, although this results in more codes. caffe_set(bottom[0]->count(), Dtype(0), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; - const int* mask = NULL; // suppress warnings about uninitialized variables - const Dtype* top_mask = NULL; - switch (this->layer_param_.pooling_param().pool()) { - case PoolingParameter_PoolMethod_MAX: - // The main loop - if (use_top_mask) { - top_mask = top[1]->cpu_data(); - } else { - mask = max_idx_.cpu_data(); - } - for (int n = 0; n < top[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - const int index = ph * pooled_width_ + pw; - const int bottom_index = - use_top_mask ? top_mask[index] : mask[index]; - bottom_diff[bottom_index] += top_diff[index]; - } - } - bottom_diff += bottom[0]->offset(0, 1); - top_diff += top[0]->offset(0, 1); - if (use_top_mask) { - top_mask += top[0]->offset(0, 1); - } else { - mask += top[0]->offset(0, 1); - } - } - } - break; - case PoolingParameter_PoolMethod_AVE: - // The main loop - for (int n = 0; n < top[0]->num(); ++n) { - for (int c = 0; c < channels_; ++c) { - for (int ph = 0; ph < pooled_height_; ++ph) { - for (int pw = 0; pw < pooled_width_; ++pw) { - int hstart = ph * stride_h_ - pad_h_; - int wstart = pw * stride_w_ - pad_w_; - int hend = min(hstart + kernel_h_, height_ + pad_h_); - int wend = min(wstart + kernel_w_, width_ + pad_w_); - int pool_size = (hend - hstart) * (wend - wstart); - hstart = max(hstart, 0); - wstart = max(wstart, 0); - hend = min(hend, height_); - wend = min(wend, width_); - for (int h = hstart; h < hend; ++h) { - for (int w = wstart; w < wend; ++w) { - bottom_diff[h * width_ + w] += - top_diff[ph * pooled_width_ + pw] / pool_size; - } - } - } - } - // offset - bottom_diff += bottom[0]->offset(0, 1); - top_diff += top[0]->offset(0, 1); - } - } - break; - case PoolingParameter_PoolMethod_STOCHASTIC: - NOT_IMPLEMENTED; - break; - default: - LOG(FATAL) << "Unknown pooling method."; + + typename PoolingCodeGeneratorBackward::Callback_t* generator_func = + Backward_code_generator.Get_callback(this, top[0]); + + // We are getting top_mask here as mutable_cpu_data is not thread safe + // and doing it inside parallel region creates of risk of race condition + void* mask = NULL; + if (this->layer_param_.pooling_param().pool() == + PoolingParameter_PoolMethod_MAX ) { + mask = (use_top_mask) ? static_cast(top[1]->mutable_cpu_data()) : + static_cast(max_idx_.mutable_cpu_data()); } + + const int batch_size = bottom[0]->num(); + const int num_channels = bottom[0]->channels(); + +#ifdef _OPENMP + #pragma omp parallel for collapse(2) +#endif + for (int image = 0; image < batch_size; ++image) + for (int channel = 0; channel < num_channels; ++channel) + generator_func(top_diff, + bottom_diff, + image, + image+1, + channel, + channel+1, + use_top_mask, + mask, + this); } diff --git a/src/caffe/layers/pooling_layer_impl.cpp b/src/caffe/layers/pooling_layer_impl.cpp new file mode 100644 index 00000000000..2cc416f613c --- /dev/null +++ b/src/caffe/layers/pooling_layer_impl.cpp @@ -0,0 +1,874 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include "caffe/layers/pooling_layer.hpp" + +namespace caffe { +using std::min; +using std::max; + +template +PoolingCodeGeneratorForward::PoolingCodeGeneratorForward() { + Callback = NULL; +} + +template +PoolingCodeGeneratorForward::~PoolingCodeGeneratorForward() {} + +template +typename PoolingCodeGeneratorForward::Callback_t* + PoolingCodeGeneratorForward::Get_callback( + PoolingLayer* layer, + Blob* top, + bool use_top_mask) { + // Wrapper for lazy initialization. + // Also check if top shape din't change. + // TODO: do we need to check all blobs' shapes? + // In future we may add cache for all already found options. + // Currently there is only one code for last used shape. + if (Callback == NULL || + top->shape() != Layer_output_shape_signature || + Use_top_mask != use_top_mask || + Method != layer->layer_param_.pooling_param().pool()) { + Method = layer->layer_param_.pooling_param().pool(); + Use_top_mask = use_top_mask; + Layer_output_shape_signature = top->shape(); + Create_callback(layer); + } + return Callback; +} + +// Implementation of CodeGenerator classes for Pooling. +template +void PoolingCodeGeneratorForward::Naive( + const Dtype* bottom_data, + Dtype* top_data, + int top_count, + int batch_start, + int batch_end, + void* mask_ptr, + int64_t channel_start, + int64_t channel_end, + PoolingLayer* layer, + bool use_top_mask) { + int* mask = NULL; // suppress warnings about uninitalized variables + Dtype* top_mask = static_cast(mask_ptr); + + int pooled_fm_size = layer->pooled_height_ * layer->pooled_width_; + int fm_size = layer->height_ * layer->width_; + + // Different pooling methods. We explicitly do the switch outside the for + // loop to save time, although this results in more code. + switch (layer->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + // Initialize + if (!use_top_mask) { + mask = static_cast(mask_ptr); + } + + // Set starting point eg. memory offset corressponding to + // requested starting batch and requested starting channel + bottom_data += fm_size*(layer->channels_*batch_start + channel_start); + top_data += pooled_fm_size*(layer->channels_*batch_start + channel_start); + if (use_top_mask) { + top_mask += pooled_fm_size*(layer->channels_*batch_start + channel_start); + } else { + mask += pooled_fm_size*(layer->channels_*batch_start + channel_start); + } + + // The main loop + for (int n = batch_start; n < batch_end; ++n) { + for (int c = channel_start; c < channel_end; ++c) { + for (int ph = 0; ph < layer->pooled_height_; ++ph) { + for (int pw = 0; pw < layer->pooled_width_; ++pw) { + int hstart = ph * layer->stride_h_ - layer->pad_h_; + int wstart = pw * layer->stride_w_ - layer->pad_w_; + int hend = min(hstart + layer->kernel_h_, layer->height_); + int wend = min(wstart + layer->kernel_w_, layer->width_); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + const int pool_index = ph * layer->pooled_width_ + pw; + Dtype acc = -FLT_MAX; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + const int index = h * layer->width_ + w; + if (bottom_data[index] > acc) { + acc = bottom_data[index]; + if (use_top_mask) { + top_mask[pool_index] = static_cast(index); + } else { + mask[pool_index] = index; + } + } + } + } + top_data[pool_index] = acc; + } + } + // compute offset + bottom_data += fm_size; + top_data += pooled_fm_size; + if (use_top_mask) { + top_mask += pooled_fm_size; + } else { + mask += pooled_fm_size; + } + } + } + break; + case PoolingParameter_PoolMethod_AVE: + + bottom_data += fm_size*(layer->channels_*batch_start + channel_start); + top_data += pooled_fm_size*(layer->channels_*batch_start + channel_start); + + // The main loop + for (int n = batch_start; n < batch_end; ++n) { + for (int c = channel_start; c < channel_end; ++c) { + for (int ph = 0; ph < layer->pooled_height_; ++ph) { + for (int pw = 0; pw < layer->pooled_width_; ++pw) { + int hstart = ph * layer->stride_h_ - layer->pad_h_; + int wstart = pw * layer->stride_w_ - layer->pad_w_; + int hend = + min(hstart + layer->kernel_h_, layer->height_ + layer->pad_h_); + int wend = + min(wstart + layer->kernel_w_, layer->width_ + layer->pad_w_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, layer->height_); + wend = min(wend, layer->width_); + Dtype acc = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + acc += bottom_data[h * layer->width_ + w]; + } + } + top_data[ph * layer->pooled_width_ + pw] = acc / pool_size; + } + } + // compute offset + bottom_data += fm_size; + top_data += pooled_fm_size; + } + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } +} + +// Generic datatypes - use naive versions. +template +void PoolingCodeGeneratorForward::Create_callback( + PoolingLayer* layer) { + Callback = Naive; +} + +#if defined __x86_64__ || defined _M_X64 +// Here we have specialized versions for supported formats in x64 architectures. +template <> +void PoolingCodeGeneratorForward::Create_callback( + PoolingLayer* layer) { + using Xbyak::util::Cpu; + using Xbyak::Reg64; + using Xbyak::Reg32; + using Xbyak::Address; + Cpu Current_cpu; + const LayerParameter& param = layer->layer_param(); + if (Current_cpu.has(Cpu::tAVX2) && + (param.pooling_param().pool() == PoolingParameter_PoolMethod_AVE || + param.pooling_param().pool() == PoolingParameter_PoolMethod_MAX)) { + // AVX2 optimized version. + // Runtime constants. + const int pooled_fm_size = layer->pooled_height_ * layer->pooled_width_; + const int fm_size = layer->height_ * layer->width_; + + const int pooled_batch_size = pooled_fm_size * layer->channels_; + const int batch_size = fm_size * layer->channels_; + + const int height_kernel_h_ = layer->height_ - layer->kernel_h_; + const int width_kernel_w_ = layer->width_ - layer->kernel_w_; + + const int height_kernel_h_pad_h_ = height_kernel_h_ + layer->pad_h_; + const int width_kernel_w_pad_w_ = width_kernel_w_ + layer->pad_w_; + + int64_t internal_mask_ptr = + (Use_top_mask + || param.pooling_param().pool() == PoolingParameter_PoolMethod_AVE) ? + 0 : reinterpret_cast(layer->max_idx_.cpu_data()); + + bool optimal_version = false; + + if (layer->pad_h_ == 0 && + layer->pad_w_ == 0 && + (layer->pooled_height_-1) * layer->stride_h_ + layer->kernel_h_ + == layer->height_ && + (layer->pooled_width_-1) * layer->stride_w_ + layer->kernel_w_ + == layer->width_) + optimal_version = true; + + // Register names. + const Reg64& reg_input_ptr = rsi; + const Reg64& reg_index_acc = rcx; + const Reg32& reg_index_acc_l = ecx; + const Reg64& reg_index_cnt = rdi; + + const Reg64& reg_scratch0 = r8; + const Reg64& reg_scratch1 = r9; + const Reg64& reg_scratch2 = r10; + const Reg64& reg_scratch3 = r11; + const Reg64& reg_scratch4 = r12; + const Reg64& reg_scratch5 = r13; + const Reg64& reg_scratch6 = r14; + const Reg64& reg_scratch7 = r15; + const Reg64& reg_scratch8 = rbx; + + const Reg64& reg_arg0 = rdi; + const Reg64& reg_arg1 = rsi; + const Reg64& reg_arg2 = rdx; + const Reg64& reg_arg3 = rcx; + const Reg64& reg_arg4 = r8; + const Reg64& reg_arg5 = r9; + const Address& stackarg_channel_start = qword[rbp + 16]; + const Address& stackarg_channel_end = qword[rbp + 24]; + + const Reg64& reg_mul_param = rax; + const Reg64& reg_mul_result_l = rax; + + // Stack variable names residing inside red zone. + int stack_qwords = 0; + const Address& stack_bottom_data_ptr = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_top_data_ptr = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_top_count = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_batch_start = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_batch_end = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_top_mask_ptr = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_batch_cnt = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_channel_cnt = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_out_w_cnt = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_out_h_cnt = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_pool_index = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_top_mask_orig = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_top_orig = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_bottom_orig = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_min_float = dword[rbp - (++stack_qwords * 8)]; + const Address& stack_wstart0 = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_index0 = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_true_height = qword[rbp - (++stack_qwords * 8)]; + const Address& stack_true_width = qword[rbp - (++stack_qwords * 8)]; + + // ASSEMBLY STARTS HERE. + // It seems we are regenerating the code due to output reshape + if (Callback) + reset(); + + // Prologue. + push(rbp); + mov(rbp, rsp); + sub(rsp, stack_qwords * 8); + + // Save r12-r15 and rbx registers. + push(r12); push(r13); push(r14); push(r15); push(rbx); + + // Prepare default accumulator. (and bypass memory aliasing warning...) + float min_float = -FLT_MAX; + void* min_float_ptr = &min_float; + uint32_t min_float_cast = *reinterpret_cast(min_float_ptr); + mov(stack_min_float, min_float_cast); + vbroadcastss(xmm15, stack_min_float); + + // Move register arguments to the stack, + // we gonna need the registers for other purposes. + mov(stack_bottom_orig, reg_arg0); + mov(stack_top_orig, reg_arg1); + mov(stack_top_count, reg_arg2); + mov(stack_batch_start, reg_arg3); + mov(stack_batch_end, reg_arg4); + + if (param.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) { + if (Use_top_mask) { + mov(stack_top_mask_orig, reg_arg5); + } else { + mov(reg_scratch0, internal_mask_ptr); + mov(stack_top_mask_orig, reg_scratch0); + } + } + + // Further arguments on stack + // (layer_ptr and use_top_mask) are ingored in this implementation. + + // Iterate through batches. + mov(reg_scratch0, stack_batch_start); + mov(stack_batch_cnt, reg_scratch0); + L("batch_loop_start"); + mov(reg_scratch0, stack_batch_end); + cmp(stack_batch_cnt, reg_scratch0); + jae("batch_loop_end", T_NEAR); + + // Iterate through channels. + mov(reg_scratch0, stackarg_channel_start); + mov(stack_channel_cnt, reg_scratch0); + L("channel_loop_start"); + mov(reg_scratch0, stack_channel_cnt); + cmp(reg_scratch0, stackarg_channel_end); + jae("channel_loop_end", T_NEAR); + + // Compute batch/channel offsets for buffers. + // input: (batch_size*batch+fm_size*fm)*4 + ptr + mov(reg_mul_param, batch_size); + mul(stack_batch_cnt); + mov(reg_scratch0, reg_mul_result_l); + mov(reg_mul_param, fm_size); + mul(stack_channel_cnt); + add(reg_scratch0, reg_mul_result_l); + shl(reg_scratch0, 2); + add(reg_scratch0, stack_bottom_orig); + mov(stack_bottom_data_ptr, reg_scratch0); + + // output: (pooled_batch_size*batch+pooled_fm_size*fm)*4 + ptr + // mask same + mov(reg_mul_param, pooled_batch_size); + mul(stack_batch_cnt); + mov(reg_scratch0, reg_mul_result_l); + mov(reg_mul_param, pooled_fm_size); + mul(stack_channel_cnt); + add(reg_scratch0, reg_mul_result_l); + shl(reg_scratch0, 2); + mov(reg_scratch1, reg_scratch0); + + add(reg_scratch0, stack_top_orig); + mov(stack_top_data_ptr, reg_scratch0); + + if (param.pooling_param().pool() == PoolingParameter_PoolMethod_MAX) { + add(reg_scratch1, stack_top_mask_orig); + mov(stack_top_mask_ptr, reg_scratch1); + } + + // Iterate through output height. + mov(stack_out_h_cnt, 0); + L("out_h_loop_start"); + cmp(stack_out_h_cnt, layer->pooled_height_); + jae("out_h_loop_end", T_NEAR); + + if (optimal_version) { + // index_in = ph * stride_h_ * width_; stored in reg_scratch7 + mov(reg_mul_param, layer->stride_h_ * layer->width_); + imul(stack_out_h_cnt); + mov(reg_scratch7, reg_mul_result_l); + + // effective_in = index_in*4 + input_ptr; stored in reg_scratch5 + mov(reg_scratch0, stack_bottom_data_ptr); + lea(reg_scratch5, ptr[reg_scratch7*4 + reg_scratch0]); + + // index_out = ph * pooled_width_; stored in reg_scratch6 + mov(reg_mul_param, layer->pooled_width_); + imul(stack_out_h_cnt); + mov(reg_scratch6, reg_mul_result_l); + + // effective_out = index_out*4 + output_ptr; stored in reg_scratch4 + mov(reg_scratch0, stack_top_data_ptr); + lea(reg_scratch4, ptr[reg_scratch6*4 + reg_scratch0]); + + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + // effective_mask = index_out*4 + mask_ptr; stored in reg_scratch3 + mov(reg_scratch0, stack_top_mask_ptr); + lea(reg_scratch3, ptr[reg_scratch6*4 + reg_scratch0]); + } + + // Iterate through output width. + mov(reg_scratch8, 0); + L("out_w_loop_start"); + cmp(reg_scratch8, layer->pooled_width_); + jae("out_w_loop_end", T_NEAR); + + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + mov(reg_index_cnt, reg_scratch7); + movaps(xmm0, xmm15); + for (int kernel_h = 0; + kernel_h < layer->kernel_h_; ++kernel_h) { + for (int kernel_w = 0; + kernel_w < layer->kernel_w_; ++kernel_w) { + ucomiss(xmm0, + ptr[reg_scratch5 + + kernel_w*sizeof(float) + + kernel_h*layer->width_*sizeof(float)]); + maxss(xmm0, + ptr[reg_scratch5 + + kernel_w*sizeof(float) + + kernel_h*layer->width_*sizeof(float)]); + cmovb(reg_index_acc, reg_index_cnt); + + if (kernel_w+1 < layer->kernel_w_) + inc(reg_index_cnt); + } + add(reg_index_cnt, layer->width_ - layer->kernel_w_ + 1); + } + + if (Use_top_mask) { + // Use float mask, convert value to float first. + cvtsi2ss(xmm1, reg_index_acc); + movss(ptr[reg_scratch3], xmm1); + } else { + // Use 32bit integer mask. + mov(dword[reg_scratch3], reg_index_acc_l); + } + } else if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE) { + xorps(xmm0, xmm0); + for (int kernel_h = 0; + kernel_h < layer->kernel_h_; ++kernel_h) + for (int kernel_w = 0; + kernel_w < layer->kernel_w_; ++kernel_w) + addss(xmm0, + ptr[reg_scratch5 + + kernel_w*sizeof(float) + + kernel_h*layer->width_*sizeof(float)]); + + mov(reg_scratch0, layer->kernel_h_*layer->kernel_w_); + cvtsi2ss(xmm1, reg_scratch0); + divss(xmm0, xmm1); + } + + movss(ptr[reg_scratch4], xmm0); + + add(reg_scratch5, layer->stride_w_*sizeof(float)); + add(reg_scratch4, sizeof(float)); + + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + add(reg_scratch7, layer->stride_w_); + add(reg_scratch3, sizeof(float)); + } + + inc(reg_scratch8); + jmp("out_w_loop_start", T_NEAR); + L("out_w_loop_end"); + } else { + // hstart = ph * stride_h_ - pad_h_; stored in reg_scratch3 + mov(reg_mul_param, layer->stride_h_); + imul(stack_out_h_cnt); + sub(reg_mul_result_l, layer->pad_h_); + mov(reg_scratch3, reg_mul_result_l); + + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE) { + // true_height = + // min(hstart, height_ - kernel_h_ + pad_h_) + kernel_h_ - hstart; + // required to compute pooling size + mov(reg_scratch0, reg_scratch3); + mov(reg_scratch1, height_kernel_h_pad_h_); + cmp(reg_scratch0, reg_scratch1); + cmovg(reg_scratch0, reg_scratch1); + add(reg_scratch0, layer->kernel_h_); + sub(reg_scratch0, reg_scratch3); + mov(stack_true_height, reg_scratch0); + } + + // int hend + // = min(hstart, height_ - kernel_h_) + kernel_h_; + // stored in reg_scratch6 + mov(reg_scratch0, reg_scratch3); + mov(reg_scratch1, height_kernel_h_); + cmp(reg_scratch0, reg_scratch1); + cmovg(reg_scratch0, reg_scratch1); + add(reg_scratch0, layer->kernel_h_); + mov(reg_scratch6, reg_scratch0); + + // hstart = max(hstart, 0); stored in reg_scratch3 + mov(reg_scratch0, reg_scratch3); + mov(reg_scratch1, 0); + cmp(reg_scratch0, reg_scratch1); + cmovl(reg_scratch0, reg_scratch1); + mov(reg_scratch3, reg_scratch0); + + // wstart0 = -pad_w_ + mov(stack_wstart0, -layer->pad_w_); + + // pool_index = (ph * pooled_width_)*4; + mov(reg_mul_param, layer->pooled_width_ * sizeof(float)); + imul(stack_out_h_cnt); + mov(stack_pool_index, reg_mul_result_l); + + // index0 = hstart * width_; + mov(reg_mul_param, layer->width_); + imul(reg_scratch3); + mov(stack_index0, reg_mul_result_l); + + // Iterate through output width. + mov(stack_out_w_cnt, 0); + L("out_w_loop_start"); + cmp(stack_out_w_cnt, layer->pooled_width_); + jae("out_w_loop_end", T_NEAR); + + // wend = min(wstart0, width_ - kernel_w_) + kernel_w_; + // stored in reg_scratch7 + mov(reg_scratch7, stack_wstart0); + mov(reg_scratch1, width_kernel_w_); + cmp(reg_scratch7, reg_scratch1); + cmovg(reg_scratch7, reg_scratch1); + add(reg_scratch7, layer->kernel_w_); + + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE) { + // true_width = + // min(wstart0, width_ - kernel_w_ + pad_h_) + // + kernel_w_ - wstart0; + // required to compute pooling size + mov(reg_scratch0, stack_wstart0); + mov(reg_scratch1, width_kernel_w_pad_w_); + cmp(reg_scratch0, reg_scratch1); + cmovg(reg_scratch0, reg_scratch1); + add(reg_scratch0, layer->kernel_w_); + sub(reg_scratch0, stack_wstart0); + mov(stack_true_width, reg_scratch0); + } + + // wstart = max(wstart0, 0); stored in reg_scratch8 + mov(reg_scratch8, stack_wstart0); + mov(reg_scratch1, 0); + cmp(reg_scratch8, reg_scratch1); + cmovl(reg_scratch8, reg_scratch1); + + // wstart0 += stride_w_ + add(stack_wstart0, layer->stride_w_); + + // num_elements_to_do = wend - wstart; stored in reg_scratch7 + sub(reg_scratch7, reg_scratch8); + + // const int index = index0 + wstart; stored in reg_scratch5 + mov(reg_scratch5, stack_index0); + add(reg_scratch5, reg_scratch8); + + // const int effective_ptr = (index)*4 + input_ptr; + // stored in reg_scratch4 + lea(reg_scratch4, ptr[reg_scratch5*4]); + add(reg_scratch4, stack_bottom_data_ptr); + + // Prepare accumulators. + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + movaps(xmm0, xmm15); + mov(reg_index_acc, -1); + } else if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE) { + xorps(xmm0, xmm0); + } + + // Iterate through kernel height. + // We are reusing mulparam reg here (rax) + // but we won't be MULing inside. + mov(reg_mul_param, reg_scratch3); + align(8); + L("kern_h_loop_start"); + cmp(reg_mul_param, reg_scratch6); + jae("kern_h_loop_end", T_NEAR); + + mov(reg_input_ptr, reg_scratch4); + mov(reg_index_cnt, reg_scratch5); + lea(reg_scratch2, ptr[reg_scratch5 + reg_scratch7]); + + // Iterate through kernel width. + align(8); + L("kern_w_loop_start"); + cmp(reg_index_cnt, reg_scratch2); + jae("kern_w_loop_end", T_NEAR); + + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + movss(xmm1, ptr[reg_input_ptr]); + ucomiss(xmm1, xmm0); + maxss(xmm0, xmm1); + cmova(reg_index_acc, reg_index_cnt); + } else if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE) { + addss(xmm0, ptr[reg_input_ptr]); + } + + add(reg_input_ptr, sizeof(float)); + + inc(reg_index_cnt); + jmp("kern_w_loop_start", T_NEAR); + L("kern_w_loop_end"); + + add(reg_scratch4, layer->width_ * sizeof(float)); + add(reg_scratch5, layer->width_); + + inc(reg_mul_param); + jmp("kern_h_loop_start", T_NEAR); + L("kern_h_loop_end"); + + // Save accumulators. + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_AVE) { + mov(reg_mul_param, stack_true_height); + imul(stack_true_width); + cvtsi2ss(xmm1, reg_mul_result_l); + divss(xmm0, xmm1); + } + + mov(reg_scratch0, stack_pool_index); + mov(reg_scratch1, stack_top_data_ptr); + movss(ptr[reg_scratch0 + reg_scratch1], xmm0); + + if (param.pooling_param().pool() + == PoolingParameter_PoolMethod_MAX) { + if (Use_top_mask) { + // Use float mask, convert value to float first. + cvtsi2ss(xmm0, reg_index_acc); + mov(reg_scratch1, stack_top_mask_ptr); + movss(ptr[reg_scratch0 + reg_scratch1], xmm0); + } else { // Use 32bit integer mask. + mov(reg_scratch1, stack_top_mask_ptr); + mov(dword[reg_scratch0 + reg_scratch1], reg_index_acc_l); + } + } + + // Update pool_index. + add(stack_pool_index, sizeof(float)); + + inc(stack_out_w_cnt); + jmp("out_w_loop_start", T_NEAR); + L("out_w_loop_end"); + } + + inc(stack_out_h_cnt); + jmp("out_h_loop_start", T_NEAR); + L("out_h_loop_end"); + + inc(stack_channel_cnt); + jmp("channel_loop_start", T_NEAR); + L("channel_loop_end"); + + inc(stack_batch_cnt); + jmp("batch_loop_start", T_NEAR); + L("batch_loop_end"); + + // Restore r12-r15 and rbx registers. + pop(rbx); pop(r15); pop(r14); pop(r13); pop(r12); + + add(rsp, stack_qwords * 8); + pop(rbp); + ret(); + + Callback = getCode(); + } else { // Take naive path. + Callback = Naive; + } +} +#endif + +template +PoolingCodeGeneratorBackward::PoolingCodeGeneratorBackward() { + Callback = NULL; +} + +template +PoolingCodeGeneratorBackward::~PoolingCodeGeneratorBackward() { +} + +template +typename PoolingCodeGeneratorBackward::Callback_t* + PoolingCodeGeneratorBackward::Get_callback( + PoolingLayer* layer, Blob* top) { + // Wrapper for lazy initialization. + // Also check if top shape din't change. + // TODO: do we need to check all blobs' shapes? + // In future we may add cache for all already found options. + // Currently there is only one code for last used shape. + if (Callback == NULL || top->shape() != layer_output_shape_signature) { + layer_output_shape_signature = top->shape(); + Create_callback(layer); + } + + return Callback; +} + +template +void PoolingCodeGeneratorBackward::Naive( + const Dtype* top_diff, + Dtype* bottom_diff, + int batch_start, + int batch_end, + int64_t channel_start, + int64_t channel_end, + bool use_top_mask, + const void* mask_ptr, + PoolingLayer* layer) { + const int* mask = NULL; // suppress warnings about uninitialized variables + const Dtype* top_mask = static_cast(mask_ptr); + + int pooled_fm_size = layer->pooled_height_ * layer->pooled_width_; + int fm_size = layer->height_ * layer->width_; + + switch (layer->layer_param_.pooling_param().pool()) { + case PoolingParameter_PoolMethod_MAX: + // The main loop + if (!use_top_mask) { + mask = static_cast(mask_ptr); + } + bottom_diff += fm_size*(layer->channels_*batch_start + channel_start); + top_diff += pooled_fm_size*(layer->channels_*batch_start + channel_start); + if (use_top_mask) { + top_mask += pooled_fm_size*(layer->channels_*batch_start + channel_start); + } else { + mask += pooled_fm_size*(layer->channels_*batch_start + channel_start); + } + + for (int n = batch_start; n < batch_end; ++n) { + if (use_top_mask) { + for (int c = channel_start; c < channel_end; ++c) { + int index0 = 0; + for (int ph = 0; ph < layer->pooled_height_; ++ph) { + int index = index0; + index0 += layer->pooled_width_; + for (int pw = 0; pw < layer->pooled_width_; ++pw) { + const int bottom_index = top_mask[index]; + bottom_diff[bottom_index] += top_diff[index]; + ++index; + } + } + bottom_diff += fm_size; + top_diff += pooled_fm_size; + top_mask += pooled_fm_size; + } + } else { + for (int c = channel_start; c < channel_end; ++c) { + int index0 = 0; + for (int ph = 0; ph < layer->pooled_height_; ++ph) { + int index = index0; + index0 += layer->pooled_width_; + for (int pw = 0; pw < layer->pooled_width_; ++pw) { + const int bottom_index = mask[index]; + bottom_diff[bottom_index] += top_diff[index]; + ++index; + } + } + bottom_diff += fm_size; + top_diff += pooled_fm_size; + mask += pooled_fm_size; + } + } + } + break; + case PoolingParameter_PoolMethod_AVE: + + bottom_diff += fm_size*(layer->channels_*batch_start + channel_start); + top_diff += pooled_fm_size*(layer->channels_*batch_start + channel_start); + // The main loop + for (int n = batch_start; n < batch_end; ++n) { + for (int c = channel_start; c < channel_end; ++c) { + for (int ph = 0; ph < layer->pooled_height_; ++ph) { + for (int pw = 0; pw < layer->pooled_width_; ++pw) { + int hstart = ph * layer->stride_h_ - layer->pad_h_; + int wstart = pw * layer->stride_w_ - layer->pad_w_; + int hend = + min(hstart + layer->kernel_h_, layer->height_ + layer->pad_h_); + int wend = + min(wstart + layer->kernel_w_, layer->width_ + layer->pad_w_); + int pool_size = (hend - hstart) * (wend - wstart); + hstart = max(hstart, 0); + wstart = max(wstart, 0); + hend = min(hend, layer->height_); + wend = min(wend, layer->width_); + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + bottom_diff[h * layer->width_ + w] += + top_diff[ph * layer->pooled_width_ + pw] / pool_size; + } + } + } + } + // offset + bottom_diff += fm_size; + top_diff += pooled_fm_size; + } + } + break; + case PoolingParameter_PoolMethod_STOCHASTIC: + NOT_IMPLEMENTED; + break; + default: + LOG(FATAL) << "Unknown pooling method."; + } +} + +template +void PoolingCodeGeneratorBackward::Create_callback( + PoolingLayer* layer) { + Callback = Naive; +} + +#if defined __x86_64__ || defined _M_X64 +// Here we have specialized versions for supported formats in x64 architectures. +/* +template <> +void PoolingCodeGeneratorBackward::Create_callback( + PoolingLayer* layer) +{ + using namespace ::Xbyak; + util::Cpu Current_cpu; + if (Current_cpu.has(util::Cpu::tAVX2)) + { // AVX2 optimized version. + const LayerParameter& param = layer->layer_param(); + + // It seems we are regenerating the code due to output reshape. + if (Callback) + reset(); + + Callback = getCode(); + } + else + { // Take naive path. + Callback = Naive; + } +}*/ + +#endif + +INSTANTIATE_CLASS(PoolingCodeGeneratorForward); +INSTANTIATE_CLASS(PoolingCodeGeneratorBackward); + +} // namespace caffe + diff --git a/src/caffe/layers/power_layer.cpp b/src/caffe/layers/power_layer.cpp index d99b77ca839..f1b9d131bed 100644 --- a/src/caffe/layers/power_layer.cpp +++ b/src/caffe/layers/power_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/power_layer.hpp" diff --git a/src/caffe/layers/prelu_layer.cpp b/src/caffe/layers/prelu_layer.cpp index 853181bd5a2..aa102d7f667 100644 --- a/src/caffe/layers/prelu_layer.cpp +++ b/src/caffe/layers/prelu_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/prior_box_layer.cpp b/src/caffe/layers/prior_box_layer.cpp new file mode 100644 index 00000000000..151e010b322 --- /dev/null +++ b/src/caffe/layers/prior_box_layer.cpp @@ -0,0 +1,261 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include "caffe/layers/prior_box_layer.hpp" + +namespace caffe { + +template +void PriorBoxLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + const PriorBoxParameter& prior_box_param = + this->layer_param_.prior_box_param(); + CHECK_GT(prior_box_param.min_size_size(), 0) << "must provide min_size."; + for (int i = 0; i < prior_box_param.min_size_size(); ++i) { + min_sizes_.push_back(prior_box_param.min_size(i)); + CHECK_GT(min_sizes_.back(), 0) << "min_size must be positive."; + } + aspect_ratios_.clear(); + aspect_ratios_.push_back(1.); + flip_ = prior_box_param.flip(); + for (int i = 0; i < prior_box_param.aspect_ratio_size(); ++i) { + float ar = prior_box_param.aspect_ratio(i); + bool already_exist = false; + for (int j = 0; j < aspect_ratios_.size(); ++j) { + if (fabs(ar - aspect_ratios_[j]) < 1e-6) { + already_exist = true; + break; + } + } + if (!already_exist) { + aspect_ratios_.push_back(ar); + if (flip_) { + aspect_ratios_.push_back(1./ar); + } + } + } + num_priors_ = aspect_ratios_.size() * min_sizes_.size(); + if (prior_box_param.max_size_size() > 0) { + CHECK_EQ(prior_box_param.min_size_size(), prior_box_param.max_size_size()); + for (int i = 0; i < prior_box_param.max_size_size(); ++i) { + max_sizes_.push_back(prior_box_param.max_size(i)); + CHECK_GT(max_sizes_[i], min_sizes_[i]) + << "max_size must be greater than min_size."; + num_priors_ += 1; + } + } + clip_ = prior_box_param.clip(); + if (prior_box_param.variance_size() > 1) { + // Must and only provide 4 variance. + CHECK_EQ(prior_box_param.variance_size(), 4); + for (int i = 0; i < prior_box_param.variance_size(); ++i) { + CHECK_GT(prior_box_param.variance(i), 0); + variance_.push_back(prior_box_param.variance(i)); + } + } else if (prior_box_param.variance_size() == 1) { + CHECK_GT(prior_box_param.variance(0), 0); + variance_.push_back(prior_box_param.variance(0)); + } else { + // Set default to 0.1. + variance_.push_back(0.1); + } + + if (prior_box_param.has_img_h() || prior_box_param.has_img_w()) { + CHECK(!prior_box_param.has_img_size()) + << "Either img_size or img_h/img_w should be specified; not both."; + img_h_ = prior_box_param.img_h(); + CHECK_GT(img_h_, 0) << "img_h should be larger than 0."; + img_w_ = prior_box_param.img_w(); + CHECK_GT(img_w_, 0) << "img_w should be larger than 0."; + } else if (prior_box_param.has_img_size()) { + const int img_size = prior_box_param.img_size(); + CHECK_GT(img_size, 0) << "img_size should be larger than 0."; + img_h_ = img_size; + img_w_ = img_size; + } else { + img_h_ = 0; + img_w_ = 0; + } + + if (prior_box_param.has_step_h() || prior_box_param.has_step_w()) { + CHECK(!prior_box_param.has_step()) + << "Either step or step_h/step_w should be specified; not both."; + step_h_ = prior_box_param.step_h(); + CHECK_GT(step_h_, 0.) << "step_h should be larger than 0."; + step_w_ = prior_box_param.step_w(); + CHECK_GT(step_w_, 0.) << "step_w should be larger than 0."; + } else if (prior_box_param.has_step()) { + const float step = prior_box_param.step(); + CHECK_GT(step, 0) << "step should be larger than 0."; + step_h_ = step; + step_w_ = step; + } else { + step_h_ = 0; + step_w_ = 0; + } + + offset_ = prior_box_param.offset(); +} + +template +void PriorBoxLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + const int layer_width = bottom[0]->width(); + const int layer_height = bottom[0]->height(); + vector top_shape(3, 1); + // Since all images in a batch has same height and width, we only need to + // generate one set of priors which can be shared across all images. + top_shape[0] = 1; + // 2 channels. First channel stores the mean of each prior coordinate. + // Second channel stores the variance of each prior coordinate. + top_shape[1] = 2; + top_shape[2] = layer_width * layer_height * num_priors_ * 4; + CHECK_GT(top_shape[2], 0); + top[0]->Reshape(top_shape); +} + +template +void PriorBoxLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + const int layer_width = bottom[0]->width(); + const int layer_height = bottom[0]->height(); + int img_width, img_height; + if (img_h_ == 0 || img_w_ == 0) { + img_width = bottom[1]->width(); + img_height = bottom[1]->height(); + } else { + img_width = img_w_; + img_height = img_h_; + } + float step_w, step_h; + if (step_w_ == 0 || step_h_ == 0) { + step_w = static_cast(img_width) / layer_width; + step_h = static_cast(img_height) / layer_height; + } else { + step_w = step_w_; + step_h = step_h_; + } + Dtype* top_data = top[0]->mutable_cpu_data(); + int dim = layer_height * layer_width * num_priors_ * 4; + int idx = 0; + for (int h = 0; h < layer_height; ++h) { + for (int w = 0; w < layer_width; ++w) { + float center_x = (w + offset_) * step_w; + float center_y = (h + offset_) * step_h; + float box_width, box_height; + for (int s = 0; s < min_sizes_.size(); ++s) { + int min_size_ = min_sizes_[s]; + // first prior: aspect_ratio = 1, size = min_size + box_width = box_height = min_size_; + // xmin + top_data[idx++] = (center_x - box_width / 2.) / img_width; + // ymin + top_data[idx++] = (center_y - box_height / 2.) / img_height; + // xmax + top_data[idx++] = (center_x + box_width / 2.) / img_width; + // ymax + top_data[idx++] = (center_y + box_height / 2.) / img_height; + + if (max_sizes_.size() > 0) { + CHECK_EQ(min_sizes_.size(), max_sizes_.size()); + int max_size_ = max_sizes_[s]; + // second prior: aspect_ratio = 1, size = sqrt(min_size * max_size) + box_width = box_height = sqrt(min_size_ * max_size_); + // xmin + top_data[idx++] = (center_x - box_width / 2.) / img_width; + // ymin + top_data[idx++] = (center_y - box_height / 2.) / img_height; + // xmax + top_data[idx++] = (center_x + box_width / 2.) / img_width; + // ymax + top_data[idx++] = (center_y + box_height / 2.) / img_height; + } + + // rest of priors + for (int r = 0; r < aspect_ratios_.size(); ++r) { + float ar = aspect_ratios_[r]; + if (fabs(ar - 1.) < 1e-6) { + continue; + } + box_width = min_size_ * sqrt(ar); + box_height = min_size_ / sqrt(ar); + // xmin + top_data[idx++] = (center_x - box_width / 2.) / img_width; + // ymin + top_data[idx++] = (center_y - box_height / 2.) / img_height; + // xmax + top_data[idx++] = (center_x + box_width / 2.) / img_width; + // ymax + top_data[idx++] = (center_y + box_height / 2.) / img_height; + } + } + } + } + // clip the prior's coordidate such that it is within [0, 1] + if (clip_) { + for (int d = 0; d < dim; ++d) { + top_data[d] = std::min(std::max(top_data[d], 0.), 1.); + } + } + // set the variance. + top_data += top[0]->offset(0, 1); + if (variance_.size() == 1) { + caffe_set(dim, Dtype(variance_[0]), top_data); + } else { + int count = 0; + for (int h = 0; h < layer_height; ++h) { + for (int w = 0; w < layer_width; ++w) { + for (int i = 0; i < num_priors_; ++i) { + for (int j = 0; j < 4; ++j) { + top_data[count] = variance_[j]; + ++count; + } + } + } + } + } +} + +INSTANTIATE_CLASS(PriorBoxLayer); +REGISTER_LAYER_CLASS(PriorBox); + +} // namespace caffe diff --git a/src/caffe/layers/recurrent_layer.cpp b/src/caffe/layers/recurrent_layer.cpp index e0c82773392..3e3323f6cf0 100644 --- a/src/caffe/layers/recurrent_layer.cpp +++ b/src/caffe/layers/recurrent_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/reduction_layer.cpp b/src/caffe/layers/reduction_layer.cpp index fa46487e6a3..8d63e2d2940 100644 --- a/src/caffe/layers/reduction_layer.cpp +++ b/src/caffe/layers/reduction_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/reduction_layer.hpp" diff --git a/src/caffe/layers/relu_layer.cpp b/src/caffe/layers/relu_layer.cpp index 92a729c81bd..4369a1a412b 100644 --- a/src/caffe/layers/relu_layer.cpp +++ b/src/caffe/layers/relu_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -12,6 +49,9 @@ void ReLULayer::Forward_cpu(const vector*>& bottom, Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); +#ifdef _OPENMP +#pragma omp parallel for +#endif for (int i = 0; i < count; ++i) { top_data[i] = std::max(bottom_data[i], Dtype(0)) + negative_slope * std::min(bottom_data[i], Dtype(0)); @@ -28,6 +68,9 @@ void ReLULayer::Backward_cpu(const vector*>& top, Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); +#ifdef _OPENMP +#pragma omp parallel for +#endif for (int i = 0; i < count; ++i) { bottom_diff[i] = top_diff[i] * ((bottom_data[i] > 0) + negative_slope * (bottom_data[i] <= 0)); diff --git a/src/caffe/layers/reshape_layer.cpp b/src/caffe/layers/reshape_layer.cpp index 45dd0902d6a..20854ee24ab 100644 --- a/src/caffe/layers/reshape_layer.cpp +++ b/src/caffe/layers/reshape_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/reshape_layer.hpp" diff --git a/src/caffe/layers/rnn_layer.cpp b/src/caffe/layers/rnn_layer.cpp index f62ae8c77de..7f67b4f9667 100644 --- a/src/caffe/layers/rnn_layer.cpp +++ b/src/caffe/layers/rnn_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/scale_layer.cpp b/src/caffe/layers/scale_layer.cpp index ecdbb123e31..a28c175f325 100644 --- a/src/caffe/layers/scale_layer.cpp +++ b/src/caffe/layers/scale_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp index 10ac9470832..c5695d81fd7 100644 --- a/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/layers/sigmoid_cross_entropy_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp" diff --git a/src/caffe/layers/sigmoid_layer.cpp b/src/caffe/layers/sigmoid_layer.cpp index 85fd9676812..e58397a86c3 100644 --- a/src/caffe/layers/sigmoid_layer.cpp +++ b/src/caffe/layers/sigmoid_layer.cpp @@ -1,8 +1,49 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include "caffe/layers/sigmoid_layer.hpp" +#ifdef _OPENMP +#include +#endif + namespace caffe { template @@ -16,6 +57,9 @@ void SigmoidLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); +#ifdef _OPENMP + #pragma omp parallel for +#endif for (int i = 0; i < count; ++i) { top_data[i] = sigmoid(bottom_data[i]); } @@ -30,6 +74,9 @@ void SigmoidLayer::Backward_cpu(const vector*>& top, const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const int count = bottom[0]->count(); +#ifdef _OPENMP + #pragma omp parallel for +#endif for (int i = 0; i < count; ++i) { const Dtype sigmoid_x = top_data[i]; bottom_diff[i] = top_diff[i] * sigmoid_x * (1. - sigmoid_x); diff --git a/src/caffe/layers/silence_layer.cpp b/src/caffe/layers/silence_layer.cpp index b2f85c52a0f..6a0d9c4f368 100644 --- a/src/caffe/layers/silence_layer.cpp +++ b/src/caffe/layers/silence_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/silence_layer.hpp" diff --git a/src/caffe/layers/slice_layer.cpp b/src/caffe/layers/slice_layer.cpp index 759beafe0d9..71f87a9a3bb 100644 --- a/src/caffe/layers/slice_layer.cpp +++ b/src/caffe/layers/slice_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/layers/smooth_L1_loss_layer.cpp b/src/caffe/layers/smooth_L1_loss_layer.cpp new file mode 100644 index 00000000000..384ce3c9f92 --- /dev/null +++ b/src/caffe/layers/smooth_L1_loss_layer.cpp @@ -0,0 +1,145 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +// ------------------------------------------------------------------ +// Fast R-CNN +// copyright (c) 2015 Microsoft +// Licensed under The MIT License [see fast-rcnn/LICENSE for details] +// Written by Ross Girshick +// Modified by Wei Liu +// ------------------------------------------------------------------ + +#include + +#include "caffe/layers/smooth_L1_loss_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +void SmoothL1LossLayer::LayerSetUp( + const vector*>& bottom, const vector*>& top) { + LossLayer::LayerSetUp(bottom, top); + has_weights_ = (bottom.size() == 3); +} + +template +void SmoothL1LossLayer::Reshape( + const vector*>& bottom, const vector*>& top) { + LossLayer::Reshape(bottom, top); + CHECK_EQ(bottom[0]->channels(), bottom[1]->channels()); + CHECK_EQ(bottom[0]->height(), bottom[1]->height()); + CHECK_EQ(bottom[0]->width(), bottom[1]->width()); + if (has_weights_) { + CHECK_EQ(bottom[0]->channels(), bottom[2]->channels()); + CHECK_EQ(bottom[0]->height(), bottom[2]->height()); + CHECK_EQ(bottom[0]->width(), bottom[2]->width()); + } + diff_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); + errors_.Reshape(bottom[0]->num(), bottom[0]->channels(), + bottom[0]->height(), bottom[0]->width()); +} + +template +void SmoothL1LossLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_sub( + count, + bottom[0]->cpu_data(), + bottom[1]->cpu_data(), + diff_.mutable_cpu_data()); + if (has_weights_) { + caffe_mul( + count, + bottom[2]->cpu_data(), + diff_.cpu_data(), + diff_.mutable_cpu_data()); // d := w * (b0 - b1) + } + const Dtype* diff_data = diff_.cpu_data(); + Dtype* error_data = errors_.mutable_cpu_data(); + for (int i = 0; i < count; ++i) { + Dtype val = diff_data[i]; + Dtype abs_val = fabs(val); + if (abs_val < 1.) { + error_data[i] = 0.5 * val * val; + } else { + error_data[i] = abs_val - 0.5; + } + } + top[0]->mutable_cpu_data()[0] = + caffe_cpu_asum(count, errors_.cpu_data()) / bottom[0]->num(); +} + +template +void SmoothL1LossLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + int count = diff_.count(); + Dtype* diff_data = diff_.mutable_cpu_data(); + for (int i = 0; i < count; ++i) { + Dtype val = diff_data[i]; + // f'(x) = x if |x| < 1 + // = sign(x) otherwise + if (fabs(val) < 1.) { + diff_data[i] = val; + } else { + diff_data[i] = (Dtype(0) < val) - (val < Dtype(0)); + } + } + for (int i = 0; i < 2; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 0) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); + caffe_cpu_axpby( + bottom[i]->count(), // count + alpha, // alpha + diff_.cpu_data(), // a + Dtype(0), // beta + bottom[i]->mutable_cpu_diff()); // b + } + } +} + +#ifdef CPU_ONLY +STUB_GPU(SmoothL1LossLayer); +#endif + +INSTANTIATE_CLASS(SmoothL1LossLayer); +REGISTER_LAYER_CLASS(SmoothL1Loss); + +} // namespace caffe diff --git a/src/caffe/layers/smooth_L1_loss_layer.cu b/src/caffe/layers/smooth_L1_loss_layer.cu new file mode 100644 index 00000000000..1dbf9f4dcdf --- /dev/null +++ b/src/caffe/layers/smooth_L1_loss_layer.cu @@ -0,0 +1,96 @@ +// ------------------------------------------------------------------ +// Fast R-CNN +// copyright (c) 2015 Microsoft +// Licensed under The MIT License [see fast-rcnn/LICENSE for details] +// Written by Ross Girshick +// Modified by Wei Liu +// ------------------------------------------------------------------ + +#include + +#include "caffe/layers/smooth_L1_loss_layer.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +template +__global__ void SmoothL1Forward(const int n, const Dtype* in, Dtype* out) { + // f(x) = 0.5 * x^2 if |x| < 1 + // |x| - 0.5 otherwise + CUDA_KERNEL_LOOP(index, n) { + Dtype val = in[index]; + Dtype abs_val = abs(val); + if (abs_val < 1) { + out[index] = 0.5 * val * val; + } else { + out[index] = abs_val - 0.5; + } + } +} + +template +void SmoothL1LossLayer::Forward_gpu(const vector*>& bottom, + const vector*>& top) { + int count = bottom[0]->count(); + caffe_gpu_sub( + count, + bottom[0]->gpu_data(), + bottom[1]->gpu_data(), + diff_.mutable_gpu_data()); // d := b0 - b1 + if (has_weights_) { + caffe_gpu_mul( + count, + bottom[2]->gpu_data(), + diff_.gpu_data(), + diff_.mutable_gpu_data()); // d := w * (b0 - b1) + } + // NOLINT_NEXT_LINE(whitespace/operators) + SmoothL1Forward<<>>( + count, diff_.gpu_data(), errors_.mutable_gpu_data()); + CUDA_POST_KERNEL_CHECK; + + Dtype loss; + caffe_gpu_asum(count, errors_.gpu_data(), &loss); + top[0]->mutable_cpu_data()[0] = loss / bottom[0]->num(); +} + +template +__global__ void SmoothL1Backward(const int n, const Dtype* in, Dtype* out) { + // f'(x) = x if |x| < 1 + // = sign(x) otherwise + CUDA_KERNEL_LOOP(index, n) { + Dtype val = in[index]; + Dtype abs_val = abs(val); + if (abs_val < 1) { + out[index] = val; + } else { + out[index] = (Dtype(0) < val) - (val < Dtype(0)); + } + } +} + +template +void SmoothL1LossLayer::Backward_gpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + int count = diff_.count(); + // NOLINT_NEXT_LINE(whitespace/operators) + SmoothL1Backward<<>>( + count, diff_.gpu_data(), diff_.mutable_gpu_data()); + CUDA_POST_KERNEL_CHECK; + for (int i = 0; i < 2; ++i) { + if (propagate_down[i]) { + const Dtype sign = (i == 0) ? 1 : -1; + const Dtype alpha = sign * top[0]->cpu_diff()[0] / bottom[i]->num(); + caffe_gpu_axpby( + bottom[i]->count(), // count + alpha, // alpha + diff_.gpu_data(), // x + Dtype(0), // beta + bottom[i]->mutable_gpu_diff()); // y + } + } +} + +INSTANTIATE_LAYER_GPU_FUNCS(SmoothL1LossLayer); + +} // namespace caffe diff --git a/src/caffe/layers/softmax_layer.cpp b/src/caffe/layers/softmax_layer.cpp index f60e9b03ebf..e8ce5374a83 100644 --- a/src/caffe/layers/softmax_layer.cpp +++ b/src/caffe/layers/softmax_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -24,6 +61,45 @@ void SoftmaxLayer::Reshape(const vector*>& bottom, } template +void SoftmaxLayer::Forward_cpu_fast_case( + const vector*>& bottom, + const vector*>& top) { + int channels = bottom[0]->shape(softmax_axis_); + int dim = bottom[0]->count() / outer_num_; + // assert(dim == channels); +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (int i = 0; i < outer_num_; ++i) { + const Dtype* bottom_data = bottom[0]->cpu_data() + i*dim; + Dtype *top_data = top[0]->mutable_cpu_data() + channels*i; + + Dtype scale_data = bottom_data[0]; + for (int j = 1; j < channels; ++j) { + scale_data = std::max(scale_data, bottom_data[j]); + } + + // subtraction + for (int j = 0; j < channels; j++) { + top_data[j] = bottom_data[j] - scale_data; + } + + // exponentiation + // FIXME_valgrind: caffe_exp(dim, top_data, top_data); + caffe_exp(dim, top_data, top_data); + + // sum after exp + scale_data = top_data[0]; + for (int j = 1; j < channels; j++) { + scale_data += top_data[j]; + } + + // division + caffe_scal(dim, Dtype(1)/scale_data, top_data); + } +} + +template void SoftmaxLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); @@ -31,31 +107,45 @@ void SoftmaxLayer::Forward_cpu(const vector*>& bottom, Dtype* scale_data = scale_.mutable_cpu_data(); int channels = bottom[0]->shape(softmax_axis_); int dim = bottom[0]->count() / outer_num_; + + if (inner_num_ == 1 && channels == dim) { + Forward_cpu_fast_case(bottom, top); + return; + } + caffe_copy(bottom[0]->count(), bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. for (int i = 0; i < outer_num_; ++i) { - // initialize scale_data to the first plane - caffe_copy(inner_num_, bottom_data + i * dim, scale_data); - for (int j = 0; j < channels; j++) { - for (int k = 0; k < inner_num_; k++) { - scale_data[k] = std::max(scale_data[k], - bottom_data[i * dim + j * inner_num_ + k]); +#ifdef _OPENMP +#pragma omp parallel for +#endif + for (int k = 0; k < inner_num_; k++) { + Dtype max_val = bottom_data[i * dim + k]; + for (int j = 1; j < channels; j++) { + Dtype value = bottom_data[i * dim + k + j * inner_num_]; + if (max_val < value) max_val = value; } + scale_data[k] = max_val; } // subtraction caffe_cpu_gemm(CblasNoTrans, CblasNoTrans, channels, inner_num_, 1, -1., sum_multiplier_.cpu_data(), scale_data, 1., top_data); // exponentiation + // FIXME_valgrind: caffe_exp(dim, top_data, top_data); caffe_exp(dim, top_data, top_data); // sum after exp caffe_cpu_gemv(CblasTrans, channels, inner_num_, 1., top_data, sum_multiplier_.cpu_data(), 0., scale_data); // division +#ifdef _OPENMP +#pragma omp parallel for +#endif for (int j = 0; j < channels; j++) { - caffe_div(inner_num_, top_data, scale_data, top_data); - top_data += inner_num_; + caffe_div(inner_num_, top_data + j*inner_num_, scale_data, + top_data + j*inner_num_); } + top_data += channels*inner_num_; } } @@ -72,6 +162,9 @@ void SoftmaxLayer::Backward_cpu(const vector*>& top, caffe_copy(top[0]->count(), top_diff, bottom_diff); for (int i = 0; i < outer_num_; ++i) { // compute dot(top_diff, top_data) and subtract them from the bottom diff +#ifdef _OPENMP +#pragma omp parallel for +#endif for (int k = 0; k < inner_num_; ++k) { scale_data[k] = caffe_cpu_strided_dot(channels, bottom_diff + i * dim + k, inner_num_, diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index dddb7606573..0aeab4ee453 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include @@ -95,22 +132,73 @@ void SoftmaxWithLossLayer::Forward_cpu( int dim = prob_.count() / outer_num_; int count = 0; Dtype loss = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; j++) { - const int label_value = static_cast(label[i * inner_num_ + j]); - if (has_ignore_label_ && label_value == ignore_label_) { - continue; + if (bottom.size() == 3) { + const Dtype* weights = bottom[2]->cpu_data(); + Dtype weighted_sum = 0; + Dtype weighted_sum_local = 0; + Dtype loss_local = 0; + + for (int i = 0; i < outer_num_; ++i) { + weighted_sum_local = 0; + loss_local = 0; + + #ifdef _OPENMP + #pragma omp parallel for reduction(+: loss_local, weighted_sum_local) if(inner_num_ > 1) + #endif + for (int j = 0; j < inner_num_; j++) { + const int label_value = static_cast(label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + continue; + } + + DCHECK_GE(label_value, 0); + DCHECK_LT(label_value, prob_.shape(softmax_axis_)); + Dtype p = prob_data[i * dim + label_value * inner_num_ + j]; + loss_local += weights[i * inner_num_ + j] * log(std::max(Dtype(FLT_MIN), std::min(p, Dtype(1.0 - FLT_MIN)))); + weighted_sum_local += weights[i * inner_num_ + j]; + } + + weighted_sum += weighted_sum_local; + loss -= loss_local; + } + + top[0]->mutable_cpu_data()[0] = loss / weighted_sum; + if (top.size() == 2) { + top[1]->ShareData(prob_); + } + } else { + int count_local = 0; + Dtype loss_local = 0; + + for (int i = 0; i < outer_num_; ++i) { + count_local = 0; + loss_local = 0; + + #ifdef _OPENMP + #pragma omp parallel for reduction(+: loss_local, count_local) if(inner_num_ > 1) + #endif + for (int j = 0; j < inner_num_; j++) { + const int label_value = static_cast(label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + continue; + } + + DCHECK_GE(label_value, 0); + DCHECK_LT(label_value, prob_.shape(softmax_axis_)); + Dtype p = prob_data[i * dim + label_value * inner_num_ + j]; + loss_local += log(std::max(Dtype(FLT_MIN), std::min(p, Dtype(1.0 - FLT_MIN)))); + ++count_local; + } + + count += count_local; + loss -= loss_local; + } + + Dtype normalizer = LossLayer::GetNormalizer(normalization_, outer_num_, inner_num_, count); + top[0]->mutable_cpu_data()[0] = loss / normalizer; + if (top.size() == 2) { + top[1]->ShareData(prob_); } - DCHECK_GE(label_value, 0); - DCHECK_LT(label_value, prob_.shape(softmax_axis_)); - loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j], - Dtype(FLT_MIN))); - ++count; - } - } - top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count); - if (top.size() == 2) { - top[1]->ShareData(prob_); } } @@ -122,29 +210,58 @@ void SoftmaxWithLossLayer::Backward_cpu(const vector*>& top, << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { - Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); - const Dtype* prob_data = prob_.cpu_data(); - caffe_copy(prob_.count(), prob_data, bottom_diff); - const Dtype* label = bottom[1]->cpu_data(); - int dim = prob_.count() / outer_num_; - int count = 0; - for (int i = 0; i < outer_num_; ++i) { - for (int j = 0; j < inner_num_; ++j) { - const int label_value = static_cast(label[i * inner_num_ + j]); - if (has_ignore_label_ && label_value == ignore_label_) { - for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { - bottom_diff[i * dim + c * inner_num_ + j] = 0; + if (bottom.size() == 3) { + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* prob_data = prob_.cpu_data(); + caffe_copy(prob_.count(), prob_data, bottom_diff); + const Dtype* label = bottom[1]->cpu_data(); + int dim = prob_.count() / outer_num_; + Dtype weight_sum = Dtype(0); + const Dtype* weights = bottom[2]->cpu_data(); + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; ++j) { + const int label_value = static_cast(label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { + bottom_diff[i * dim + c * inner_num_ + j] = 0; + } + } else { + bottom_diff[i * dim + label_value * inner_num_ + j] -= 1; + for (int c = 0; c < bottom[0]->shape(1); ++c) { + bottom_diff[i * dim + c * inner_num_ + j] *= weights[i * inner_num_ + j]; + } + weight_sum += weights[i * inner_num_ + j]; + } } - } else { - bottom_diff[i * dim + label_value * inner_num_ + j] -= 1; - ++count; } - } + + Dtype loss_weight = top[0]->cpu_diff()[0] / weight_sum; + caffe_scal(prob_.count(), loss_weight, bottom_diff); + } else { + Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); + const Dtype* prob_data = prob_.cpu_data(); + caffe_copy(prob_.count(), prob_data, bottom_diff); + const Dtype* label = bottom[1]->cpu_data(); + int dim = prob_.count() / outer_num_; + int count = 0; + for (int i = 0; i < outer_num_; ++i) { + for (int j = 0; j < inner_num_; ++j) { + const int label_value = static_cast(label[i * inner_num_ + j]); + if (has_ignore_label_ && label_value == ignore_label_) { + for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) { + bottom_diff[i * dim + c * inner_num_ + j] = 0; + } + } else { + bottom_diff[i * dim + label_value * inner_num_ + j] -= 1; + ++count; + } + } + } + // Scale gradient + Dtype normalizer = LossLayer::GetNormalizer(normalization_, outer_num_, inner_num_, count); + Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer; + caffe_scal(prob_.count(), loss_weight, bottom_diff); } - // Scale gradient - Dtype loss_weight = top[0]->cpu_diff()[0] / - get_normalizer(normalization_, count); - caffe_scal(prob_.count(), loss_weight, bottom_diff); } } diff --git a/src/caffe/layers/softmax_loss_layer.cu b/src/caffe/layers/softmax_loss_layer.cu index 660e1b39fe0..2fd31ea81f4 100644 --- a/src/caffe/layers/softmax_loss_layer.cu +++ b/src/caffe/layers/softmax_loss_layer.cu @@ -56,8 +56,9 @@ void SoftmaxWithLossLayer::Forward_gpu( has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } - top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, - valid_count); + Dtype normalizer = LossLayer::GetNormalizer( + normalization_, outer_num_, inner_num_, valid_count); + top[0]->mutable_cpu_data()[0] = loss / normalizer; if (top.size() == 2) { top[1]->ShareData(prob_); } @@ -117,8 +118,9 @@ void SoftmaxWithLossLayer::Backward_gpu(const vector*>& top, has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } - const Dtype loss_weight = top[0]->cpu_diff()[0] / - get_normalizer(normalization_, valid_count); + Dtype normalizer = LossLayer::GetNormalizer( + normalization_, outer_num_, inner_num_, valid_count); + const Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer; caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } diff --git a/src/caffe/layers/split_layer.cpp b/src/caffe/layers/split_layer.cpp index 1a27a9af0a1..1c823768c7c 100644 --- a/src/caffe/layers/split_layer.cpp +++ b/src/caffe/layers/split_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/split_layer.hpp" @@ -54,6 +91,5 @@ STUB_GPU(SplitLayer); #endif INSTANTIATE_CLASS(SplitLayer); -REGISTER_LAYER_CLASS(Split); } // namespace caffe diff --git a/src/caffe/layers/spp_layer.cpp b/src/caffe/layers/spp_layer.cpp index b9af8e8af0e..28a3f35ff41 100644 --- a/src/caffe/layers/spp_layer.cpp +++ b/src/caffe/layers/spp_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -14,6 +51,11 @@ using std::min; using std::max; template +SPPLayer::~SPPLayer() { + std::for_each(split_top_vec_.begin(),split_top_vec_.end(), + [](Blob* p){delete p;}); +} +template LayerParameter SPPLayer::GetPoolingParam(const int pyramid_level, const int bottom_h, const int bottom_w, const SPPParameter spp_param) { LayerParameter pooling_param; @@ -75,6 +117,8 @@ void SPPLayer::LayerSetUp(const vector*>& bottom, CHECK_GT(bottom_w_, 0) << "Input dimensions cannot be zero."; pyramid_height_ = spp_param.pyramid_height(); + std::for_each(split_top_vec_.begin(),split_top_vec_.end(), + [](Blob* p){delete p;}); split_top_vec_.clear(); pooling_bottom_vecs_.clear(); pooling_layers_.clear(); @@ -106,13 +150,15 @@ void SPPLayer::LayerSetUp(const vector*>& bottom, for (int i = 0; i < pyramid_height_; i++) { // pooling layer input holders setup - pooling_bottom_vecs_.push_back(new vector*>); + pooling_bottom_vecs_.push_back(shared_ptr*> > + (new vector*>)); pooling_bottom_vecs_[i]->push_back(split_top_vec_[i]); // pooling layer output holders setup - pooling_outputs_.push_back(new Blob()); - pooling_top_vecs_.push_back(new vector*>); - pooling_top_vecs_[i]->push_back(pooling_outputs_[i]); + pooling_outputs_.push_back(shared_ptr >(new Blob())); + pooling_top_vecs_.push_back(shared_ptr*> >( + new vector*>)); + pooling_top_vecs_[i]->push_back(pooling_outputs_[i].get()); // pooling layer setup LayerParameter pooling_param = GetPoolingParam( @@ -123,17 +169,19 @@ void SPPLayer::LayerSetUp(const vector*>& bottom, pooling_layers_[i]->SetUp(*pooling_bottom_vecs_[i], *pooling_top_vecs_[i]); // flatten layer output holders setup - flatten_outputs_.push_back(new Blob()); - flatten_top_vecs_.push_back(new vector*>); - flatten_top_vecs_[i]->push_back(flatten_outputs_[i]); + flatten_outputs_.push_back(shared_ptr >(new Blob())); + flatten_top_vecs_.push_back(shared_ptr*> > + (new vector*>)); + flatten_top_vecs_[i]->push_back(flatten_outputs_[i].get()); // flatten layer setup LayerParameter flatten_param; - flatten_layers_.push_back(new FlattenLayer(flatten_param)); + flatten_layers_.push_back(shared_ptr > + (new FlattenLayer(flatten_param))); flatten_layers_[i]->SetUp(*pooling_top_vecs_[i], *flatten_top_vecs_[i]); // concat layer input holders setup - concat_bottom_vec_.push_back(flatten_outputs_[i]); + concat_bottom_vec_.push_back(flatten_outputs_[i].get()); } // concat layer setup diff --git a/src/caffe/layers/tanh_layer.cpp b/src/caffe/layers/tanh_layer.cpp index 184e926d22a..e707cf6aaf9 100644 --- a/src/caffe/layers/tanh_layer.cpp +++ b/src/caffe/layers/tanh_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // TanH neuron activation function layer. // Adapted from ReLU layer code written by Yangqing Jia diff --git a/src/caffe/layers/threshold_layer.cpp b/src/caffe/layers/threshold_layer.cpp index 63822ee5520..43764d508f1 100644 --- a/src/caffe/layers/threshold_layer.cpp +++ b/src/caffe/layers/threshold_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/threshold_layer.hpp" diff --git a/src/caffe/layers/tile_layer.cpp b/src/caffe/layers/tile_layer.cpp index cf0c187005c..77d8280f608 100644 --- a/src/caffe/layers/tile_layer.cpp +++ b/src/caffe/layers/tile_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/layers/tile_layer.hpp" diff --git a/src/caffe/layers/video_data_layer.cpp b/src/caffe/layers/video_data_layer.cpp new file mode 100644 index 00000000000..b53cf3e09ff --- /dev/null +++ b/src/caffe/layers/video_data_layer.cpp @@ -0,0 +1,202 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_OPENCV +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "caffe/data_transformer.hpp" +#include "caffe/layers/video_data_layer.hpp" +#include "caffe/util/benchmark.hpp" + +namespace caffe { + +template +VideoDataLayer::VideoDataLayer(const LayerParameter& param) + : BasePrefetchingDataLayer(param) { +} + +template +VideoDataLayer::~VideoDataLayer() { + this->StopInternalThread(); + if (cap_.isOpened()) { + cap_.release(); + } +} + +template +void VideoDataLayer::DataLayerSetUp( + const vector*>& bottom, const vector*>& top) { + const int batch_size = this->layer_param_.data_param().batch_size(); + const VideoDataParameter& video_data_param = + this->layer_param_.video_data_param(); + video_type_ = video_data_param.video_type(); + skip_frames_ = video_data_param.skip_frames(); + CHECK_GE(skip_frames_, 0); + + // Read an image, and use it to initialize the top blob. + cv::Mat cv_img; + if (video_type_ == VideoDataParameter_VideoType_WEBCAM) { + const int device_id = video_data_param.device_id(); + if (!cap_.open(device_id)) { + LOG(FATAL) << "Failed to open webcam: " << device_id; + } + cap_ >> cv_img; + } else if (video_type_ == VideoDataParameter_VideoType_VIDEO) { + CHECK(video_data_param.has_video_file()) << "Must provide video file!"; + const string& video_file = video_data_param.video_file(); + if (!cap_.open(video_file)) { + LOG(FATAL) << "Failed to open video: " << video_file; + } + total_frames_ = cap_.get(CV_CAP_PROP_FRAME_COUNT); + processed_frames_ = 0; + // Read image to infer shape. + cap_ >> cv_img; + // Set index back to the first frame. + cap_.set(CV_CAP_PROP_POS_FRAMES, 0); + } else { + LOG(FATAL) << "Unknow video type!"; + } + CHECK(cv_img.data) << "Could not load image!"; + // Use data_transformer to infer the expected blob shape from a cv_image. + top_shape_ = this->data_transformer_->InferBlobShape(cv_img); + this->transformed_data_.Reshape(top_shape_); + top_shape_[0] = batch_size; + top[0]->Reshape(top_shape_); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].data_.Reshape(top_shape_); + } + LOG(INFO) << "output data size: " << top[0]->num() << "," + << top[0]->channels() << "," << top[0]->height() << "," + << top[0]->width(); + // label + if (this->output_labels_) { + vector label_shape(1, batch_size); + top[1]->Reshape(label_shape); + for (int i = 0; i < this->PREFETCH_COUNT; ++i) { + this->prefetch_[i].label_.Reshape(label_shape); + } + } +} + +// This function is called on prefetch thread +template +void VideoDataLayer::load_batch(Batch* batch) { + CPUTimer batch_timer; + batch_timer.Start(); + double read_time = 0; + double trans_time = 0; + CPUTimer timer; + CHECK(batch->data_.count()); + CHECK(this->transformed_data_.count()); + + // Reshape according to the first anno_datum of each batch + // on single input batches allows for inputs of varying dimension. + const int batch_size = this->layer_param_.data_param().batch_size(); + top_shape_[0] = 1; + this->transformed_data_.Reshape(top_shape_); + // Reshape batch according to the batch_size. + top_shape_[0] = batch_size; + batch->data_.Reshape(top_shape_); + + Dtype* top_data = batch->data_.mutable_cpu_data(); + Dtype* top_label = NULL; // suppress warnings about uninitialized variables + if (this->output_labels_) { + top_label = batch->label_.mutable_cpu_data(); + } + + int skip_frames = skip_frames_; + for (int item_id = 0; item_id < batch_size; ++item_id) { + timer.Start(); + cv::Mat cv_img; + if (video_type_ == VideoDataParameter_VideoType_WEBCAM) { + cap_ >> cv_img; + } else if (video_type_ == VideoDataParameter_VideoType_VIDEO) { + if (processed_frames_ >= total_frames_) { + LOG(INFO) << "Finished processing video."; + raise(SIGINT); + } + ++processed_frames_; + cap_ >> cv_img; + } else { + LOG(FATAL) << "Unknown video type."; + } + CHECK(cv_img.data) << "Could not load image!"; + read_time += timer.MicroSeconds(); + if (skip_frames > 0) { + --skip_frames; + --item_id; + } else { + skip_frames = skip_frames_; + timer.Start(); + // Apply transformations (mirror, crop...) to the image + int offset = batch->data_.offset(item_id); + this->transformed_data_.set_cpu_data(top_data + offset); + this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); + trans_time += timer.MicroSeconds(); + } + CHECK(cv_img.data) << "Could not load image!"; + read_time += timer.MicroSeconds(); + timer.Start(); + // Apply transformations (mirror, crop...) to the image + int offset = batch->data_.offset(item_id); + this->transformed_data_.set_cpu_data(top_data + offset); + this->data_transformer_->Transform(cv_img, &(this->transformed_data_)); + trans_time += timer.MicroSeconds(); + if (this->output_labels_) { + top_label[item_id] = 0; + } + } + timer.Stop(); + batch_timer.Stop(); + DLOG(INFO) << "Prefetch batch: " << batch_timer.MilliSeconds() << " ms."; + DLOG(INFO) << " Read time: " << read_time / 1000 << " ms."; + DLOG(INFO) << "Transform time: " << trans_time / 1000 << " ms."; +} + +INSTANTIATE_CLASS(VideoDataLayer); +REGISTER_LAYER_CLASS(VideoData); + +} // namespace caffe +#endif // USE_OPENCV diff --git a/src/caffe/layers/window_data_layer.cpp b/src/caffe/layers/window_data_layer.cpp index 103dd4b6af8..12a19dfe64d 100644 --- a/src/caffe/layers/window_data_layer.cpp +++ b/src/caffe/layers/window_data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include #include diff --git a/src/caffe/mkl_memory.cpp b/src/caffe/mkl_memory.cpp new file mode 100644 index 00000000000..d25afd24aa9 --- /dev/null +++ b/src/caffe/mkl_memory.cpp @@ -0,0 +1,423 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include "caffe/util/performance.hpp" + +#include "caffe/mkl_memory.hpp" + +// Uncomment to see where the layout conversions are done +// #undef DLOG +#ifndef DLOG +#define DLOG LOG +#endif + +namespace caffe { + +template +void MKLMemoryDescriptorBase::create_conversions() { + int status; + this->remove_conversions(); + if (layout_int + && !dnnLayoutCompare(layout_usr, layout_int)) { + CHECK(layout_usr); + status = dnnConversionCreate(&convert_to_int, layout_usr, + layout_int); + CHECK_EQ(status, E_SUCCESS) + << "Failed creation convert_to_int with status " + << status << " for buffer: " << this->name << "\n"; + status = dnnConversionCreate(&convert_from_int, layout_int, + layout_usr); + CHECK_EQ(status, E_SUCCESS) + << "Failed creation convert_from_int with status " + << status << " for buffer: " << this->name << "\n"; + } +} + +template +void MKLMemoryDescriptorBase::remove_conversions() { + int status; + if (this->convert_from_int) { + DLOG(INFO) << "convert_from_int layout already created, recreating for" + << this->name; + status = dnnDelete(this->convert_from_int); + CHECK_EQ(status, E_SUCCESS); + } + if (this->convert_to_int) { + DLOG(INFO) << "convert_to_int layout already created, recreating for" + << this->name; + status = dnnDelete(this->convert_to_int); + CHECK_EQ(status, E_SUCCESS); + } +} + +template +void MKLMemoryDescriptorBase::create_internal_layout( + const dnnPrimitive_t primitive, dnnResourceType_t type) { + int status; + this->remove_internal_layout(); + status = dnnLayoutCreateFromPrimitive( + &this->layout_int, primitive, type); + CHECK_EQ(status, E_SUCCESS) + << "Failed dnnLayoutCreateFromPrimitive with status " + << status << " for buffer: " << this->name << "\n"; + + if (this->layout_usr) + this->create_conversions(); +} + +template +void MKLMemoryDescriptorBase::create_user_layout( + size_t dimension, + const size_t size[], + const size_t strides[], + bool create_conversion_if_possible) { + int status; + this->remove_user_layout(); + status = dnnLayoutCreate( + &this->layout_usr, dimension, size, strides); + CHECK_EQ(status, E_SUCCESS) << "Failed dnnLayoutCreate with status " + << status << " for buffer: " << this->name << "\n"; + + // If conversion creation is to happen + // then if only we have internal layout already in place + // we can proceed with conversion creation. + // Otherwise we make sure that existing conversions are deleted + // as with new layout creation they are being instantly invalidated + if (create_conversion_if_possible) { + if (this->layout_int) { + this->create_conversions(); + } + } else { + this->remove_conversions(); + } +} + +template +void MKLMemoryDescriptorBase::remove_internal_layout() { + int status; + if (this->layout_int) { + DLOG(INFO) << "Internal layout already created, recreating for" + << this->name; + status = dnnLayoutDelete(this->layout_int); + CHECK_EQ(status, E_SUCCESS); + + // with layout invalidated we should also remove Allocation + // as next layout may declare diffrent sizes of allocation + status = dnnReleaseBuffer(this->internal_ptr); + this->internal_ptr = NULL; + CHECK_EQ(status, E_SUCCESS); + } +} + +template +void MKLMemoryDescriptorBase::remove_user_layout() { + int status; + if (this->layout_usr) { + DLOG(INFO) << "Internal layout already created, recreating for" + << this->name; + status = dnnLayoutDelete(this->layout_usr); + CHECK_EQ(status, E_SUCCESS); + + // with layout invalidated we should also remove Allocation + // as next layout may declare diffrent sizes of allocation + status = dnnReleaseBuffer(this->internal_ptr); + this->internal_ptr = NULL; + CHECK_EQ(status, E_SUCCESS); + } +} + +template +void MKLMemoryDescriptorBase::create_layouts( + const dnnPrimitive_t primitive, dnnResourceType_t type, + size_t dimension, const size_t size[], const size_t strides[]) { + // To avoid creating conversion among potentialiy diffrent + // (in terms of size) layouts we need to destroy existing layouts here + + if (this->layout_usr) { + DLOG(INFO) << "User layout already created, recreating for" + << this->name; + int status = dnnLayoutDelete(this->layout_usr); + CHECK_EQ(status, E_SUCCESS); + } + this->create_internal_layout(primitive, type); + this->create_user_layout(dimension, size, strides); +} + +template +void MKLMemoryDescriptorBase::convert_from_prv(void* cpu_ptr) { + CHECK(cpu_ptr); + // When no conversion is available then + // recreate them if layouts are available + if (this-> convert_from_int == NULL) { + this->create_conversions(); + } + CHECK(this->convert_from_int); + int status; + void *convert_resources[dnnResourceNumber]; + + DLOG(INFO) << "convert priv => " << this->name << " =>"; + + convert_resources[dnnResourceFrom] = this->prv_ptr(); + convert_resources[dnnResourceTo] = cpu_ptr; + + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(this->convert_from_int, convert_resources); + PERFORMANCE_MEASUREMENT_END_STATIC("mkl_conversion"); + + CHECK_EQ(status, 0) << "Conversion from prv failed with status " << status; +} + +template +void MKLMemoryDescriptorBase::convert_to_prv(void* cpu_ptr) { + CHECK(cpu_ptr); + CHECK(this->convert_to_int); + int status; + void *convert_resources[dnnResourceNumber]; + + DLOG(INFO) << "convert => priv => " + << this->name; + + convert_resources[dnnResourceFrom] = cpu_ptr; + convert_resources[dnnResourceTo] = this->prv_ptr(); + + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(this->convert_to_int, convert_resources); + PERFORMANCE_MEASUREMENT_END_STATIC("mkl_conversion"); + + CHECK_EQ(status, 0) << "Conversion from prv failed with status " << status; +} + + +template +bool MKLMemoryDescriptorBase::layout_compare( + shared_ptr other) { + CHECK_EQ(other->get_descr_type(), + PrvMemDescr::PRV_DESCR_MKL2017); + + shared_ptr > other_descr = + boost::static_pointer_cast > + (other); + + if (dnnLayoutCompare(other_descr->layout_int, + this->layout_int)) + return true; + else + return false; +} + +template +void MKLMemoryDescriptorBase::convert_from_other( + shared_ptr other) { + shared_ptr > other_descr = + boost::static_pointer_cast > + (other); + + DLOG(INFO) << "convert other => priv " << other_descr->name + << " => " << this->name; + + int status; + dnnPrimitive_t convert; + // TODO: cache this primitive? + status = dnnConversionCreate(&convert, + other_descr->layout_int, this->layout_int); + + void *convert_resources[dnnResourceNumber]; + convert_resources[dnnResourceFrom] = other_descr->prv_ptr(); + convert_resources[dnnResourceTo] = this->prv_ptr(); + + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(convert, convert_resources); + PERFORMANCE_MEASUREMENT_END_STATIC("mkl_conversion"); + + CHECK_EQ(status, 0) << "Conversion from other failed with status " + << status; + + dnnDelete(convert); +} + +template +Dtype* MKLMemoryDescriptor::get_converted_prv( + Blob* blob, bool set_prv_ptr, + MKLMemoryDescriptor* converted_in_fwd) { + if (this->convert_to_int) { + int status; + void *convert_resources[dnnResourceNumber]; + const Dtype* prv_ptr = is_diff ? blob->prv_diff() : blob->prv_data(); + if (prv_ptr == NULL) { + if (converted_in_fwd) { + // hack for reusing previously done conversion + // if(dnnLayoutCompare(converted_in_fwd->layout_int , this->layout_int)) + if (1) { + DLOG(INFO) << "reusing fwd " + << converted_in_fwd->name << " == " << this->name; + return converted_in_fwd->internal_ptr; + } else { + DLOG(INFO) << "layout doesn't match " + << converted_in_fwd->name << " != " << this->name; + } + } + + DLOG(INFO) << "convert => priv => " + << this->name; + + this->allocate(); + convert_resources[dnnResourceFrom] = + is_diff ? + reinterpret_cast(const_cast(blob->cpu_diff())) + : reinterpret_cast(const_cast(blob->cpu_data())); + convert_resources[dnnResourceTo] = + reinterpret_cast(this->internal_ptr); + + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(this->convert_to_int, convert_resources); + PERFORMANCE_MEASUREMENT_END_STATIC("mkl_conversion"); + + CHECK_EQ(status, 0) << "Conversion failed with status " << status; + + if (set_prv_ptr) { + if (is_diff) + blob->set_prv_diff_descriptor(this->get_shared_ptr(), true); + else + blob->set_prv_data_descriptor(this->get_shared_ptr(), true); + } + return this->internal_ptr; + } else { + // This section helps if padding needs to be added (or removed...) + // TODO: consider removing when no longer needed. + shared_ptr prv_mem_descriptor = + is_diff ? (blob->get_prv_diff_descriptor()) : + (blob->get_prv_data_descriptor()); + + CHECK_EQ(prv_mem_descriptor->get_descr_type(), + PrvMemDescr::PRV_DESCR_MKL2017); + + shared_ptr > current_descr = + boost::static_pointer_cast > + (prv_mem_descriptor); + + if (!dnnLayoutCompare(current_descr->layout_int, + this->layout_int)) { + if (converted_in_fwd) { + // hack for reusing previously done conversion + // if(dnnLayoutCompare(converted_in_fwd->layout_int,this->layout_int)) + if (1) { + DLOG(INFO) << "reusing fwd " + << converted_in_fwd->name << " == " << this->name; + return converted_in_fwd->internal_ptr; + } else { + DLOG(INFO) << "layout doesn't match " + << converted_in_fwd->name << " != " << this->name; + } + } + DLOG(INFO) << "convert priv => priv " + << current_descr->name << " => " << this->name; + + if (this->convert_prv2prv) { + CHECK_EQ(dnnLayoutCompare( + this->descr_prv2prv_conversion->layout_int, + this->layout_int), 0); + status = 0; + } else { + status = dnnConversionCreate(&this->convert_prv2prv, + current_descr->layout_int , this->layout_int); + if (status == 0) + this->descr_prv2prv_conversion = current_descr; + } + + if (status != 0) { + // TODO: Very weird that we end up here for conv1. No idea why.... + DLOG(INFO) << "!!!! Failed creation convert_prv2prv with status " + << status << "\n"; + + this->allocate(); + convert_resources[dnnResourceFrom] = is_diff ? + reinterpret_cast(const_cast(blob->cpu_diff())) : + reinterpret_cast(const_cast(blob->cpu_data())); + convert_resources[dnnResourceTo] = + reinterpret_cast(this->internal_ptr); + + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(this->convert_to_int, convert_resources); + PERFORMANCE_MEASUREMENT_END_STATIC("mkl_conversion"); + + CHECK_EQ(status, 0) << "Conversion failed with status " << status; + + } else { + this->allocate(); + + convert_resources[dnnResourceFrom] = is_diff ? + reinterpret_cast(const_cast(blob->prv_diff())) : + reinterpret_cast(const_cast(blob->prv_data())); + convert_resources[dnnResourceTo] = + reinterpret_cast(this->internal_ptr); + + PERFORMANCE_MEASUREMENT_BEGIN(); + status = dnnExecute(this->convert_prv2prv, convert_resources); + PERFORMANCE_MEASUREMENT_END_STATIC("mkl_conversion"); + + CHECK_EQ(status, 0) << "Conversion failed with status " << status; + } + + if (set_prv_ptr) { + if (is_diff) + blob->set_prv_diff_descriptor(this->get_shared_ptr(), true); + else + blob->set_prv_data_descriptor(this->get_shared_ptr(), true); + } + return this->internal_ptr; + } else if (current_descr.get() != this) { + DLOG(INFO) << "layout OK " + << current_descr->name << " == " << this->name; + } + } + + return const_cast(prv_ptr); + } + + return (is_diff ? const_cast(blob->cpu_diff()) : + const_cast(blob->cpu_data())); +} + +template class MKLMemoryDescriptor; +template class MKLMemoryDescriptor; +template class MKLMemoryDescriptor; +template class MKLMemoryDescriptor; +template class MKLMemoryDescriptorBase; +template class MKLMemoryDescriptorBase; +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/mkldnn_base.cpp b/src/caffe/mkldnn_base.cpp new file mode 100644 index 00000000000..7f07478ba1f --- /dev/null +++ b/src/caffe/mkldnn_base.cpp @@ -0,0 +1,74 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include "caffe/mkldnn_memory.hpp" + +namespace caffe { + + +shared_ptr StreamHolder::get_stream() +{ + if (this->_current_stream == NULL || !this->_current_stream->ready()) { + _current_stream.reset(new MKLDNNStream()); + } + return _current_stream; +} + +template +shared_ptr MKLDNNPrimitive::get_mkldnn_stream() { + if(mkldnn_stream == NULL) + mkldnn_stream = StreamHolder::Instance().get_stream(); + else + StreamHolder::Instance().prepare_mkldnn_stream(mkldnn_stream); + return mkldnn_stream; + +} + +template +shared_ptr MKLDNNPrimitive::submit() { + CHECK(this->aprimitive); + this->get_mkldnn_stream()->submit({*(this->aprimitive)}); + return mkldnn_stream; +} + +template class MKLDNNLayer; +template class MKLDNNLayer; +template class MKLDNNPrimitive; +template class MKLDNNPrimitive; +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/mkldnn_memory.cpp b/src/caffe/mkldnn_memory.cpp new file mode 100644 index 00000000000..bacb6ae6191 --- /dev/null +++ b/src/caffe/mkldnn_memory.cpp @@ -0,0 +1,486 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include "caffe/mkldnn_memory.hpp" +#include "caffe/util/performance.hpp" + +namespace caffe { + + +template +MKLDNNMemoryDescriptorBase::MKLDNNMemoryDescriptorBase(shared_ptr usr_memory_pd + , shared_ptr prv_memory_pd + , Blob* blob + , MKLDNNLayer* mkldnn_layer) + : name("MKLDNNMemoryDescriptorBase") + , _reorder_usr2prv_pd(), _reorder_prv2usr_pd(), _reorder_extprv2prv_pd() + ,_prv_memory(), _internal_ptr(NULL), _usr_memory(), _cpu_ptr(NULL) + , _mkldnn_layer(NULL) +{ + set_usr_memory_pd(usr_memory_pd); + set_prv_memory_pd(prv_memory_pd); + set_mkldnn_layer(mkldnn_layer); + this->_blob = blob; +} + +template +void MKLDNNMemoryDescriptorBase::check_usr_with_prv_descriptors() +{ + CHECK(_usr_memory_pd); + CHECK(_prv_memory_pd); + int32_t ndims = _usr_memory_pd->desc().data.ndims; + CHECK_EQ(ndims, _prv_memory_pd->desc().data.ndims) + << "MKLDNNMemoryDescriptorBase: Usr and Prv memory must have same dimensions number"; + for (int32_t dim = 0; dim < ndims; ++dim) { + CHECK_EQ(_usr_memory_pd->desc().data.dims[dim] + , _prv_memory_pd->desc().data.dims[dim]) + << "MKLDNNMemoryDescriptorBase: Usr and Prv memory must have same dimensions"; + } +} + +template +void MKLDNNMemoryDescriptorBase::create_reorder_descriptors() +{ + CHECK(_usr_memory_pd); + CHECK(_prv_memory_pd); + if ( *_usr_memory_pd != *_prv_memory_pd) { + _reorder_usr2prv_pd = shared_ptr( + new reorder::primitive_desc(*_usr_memory_pd, *_prv_memory_pd)); + + _reorder_prv2usr_pd = shared_ptr( + new reorder::primitive_desc(*_prv_memory_pd, *_usr_memory_pd)); + } + if ( _extprv_memory_pd && *_prv_memory_pd != *_extprv_memory_pd) { + _reorder_extprv2prv_pd = shared_ptr( + new reorder::primitive_desc(*_extprv_memory_pd, *_prv_memory_pd)); + } +} + + +template + MKLDNNMemoryDescriptor::MKLDNNMemoryDescriptor(shared_ptr usr_memory_pd + , shared_ptr prv_memory_pd + , Blob* blob, MKLDNNLayer* mkldnn_layer) + : MKLDNNMemoryDescriptorBase(usr_memory_pd, prv_memory_pd, blob, mkldnn_layer) +{ + const Dtype* prv_ptr = is_diff ? blob->prv_diff() : blob->prv_data(); + + if (prv_ptr != NULL) { + shared_ptr > blob_prv_mkldnn_mem_descr = get_mkldnn_prv_descriptor(blob); +#ifdef DEBUG + LOG(INFO) << "Format of blob-prv-memory-pd: " << blob_prv_mkldnn_mem_descr->prv_memory_pd()->desc().data.format; + LOG(INFO) << "Format of this-prv-memory-pd: " << this->prv_memory_pd()->desc().data.format; +#endif + if (*blob_prv_mkldnn_mem_descr->prv_memory_pd() != *this->prv_memory_pd()) { +#ifdef DEBUG + LOG(INFO) << "Formats of blob-prv-memory-pd and this-prv-memory-pd are not equal !"; +#endif + this->set_extprv_memory_pd(blob_prv_mkldnn_mem_descr->prv_memory_pd()); + } + } +} + +template +void MKLDNNMemoryDescriptor::create_reorder_to_prv(void* cpu_ptr) +{ + CHECK(cpu_ptr); + CHECK(this->_usr_memory_pd); + CHECK(this->_prv_memory_pd); + CHECK(this->_reorder_usr2prv_pd); + if (this->_cpu_ptr == NULL) + this->_cpu_ptr = cpu_ptr; + else + CHECK_EQ(this->_cpu_ptr, cpu_ptr); + if(this->_usr_memory == NULL) + this->_usr_memory.reset(new memory(*this->_usr_memory_pd, cpu_ptr)); + if(this->_reorder_usr2prv.aprimitive == NULL) + this->_reorder_usr2prv.reset(new reorder(*this->_reorder_usr2prv_pd, *this->_usr_memory, *this->get_prv_memory())); +} + +template +void MKLDNNMemoryDescriptor::convert_to_prv(void* cpu_ptr) +{ +#ifdef DEBUG + LOG(INFO) << "--- MKLDNNMemoryDescriptorBase::convert_to_prv --- " << this->name; +#endif + CHECK(cpu_ptr); + CHECK_EQ(this->_cpu_ptr, cpu_ptr); + create_reorder_to_prv(cpu_ptr); + VLOG(1) << "--- MKLDNNMemoryDescriptorBase::convert_to_prv --- " << this->name; +#ifdef DEBUG + LOG(INFO) << "Reorder: from usr to prv."; + LOG(INFO) << "Format of _usr_memory_pd: " << this->_usr_memory_pd->desc().data.format; + LOG(INFO) << "Format of _prv_memory_pd: " << this->_prv_memory_pd->desc().data.format; +#endif + PERFORMANCE_MEASUREMENT_BEGIN(); + this->_reorder_usr2prv.submit(); + PERFORMANCE_MEASUREMENT_END_STATIC("mkldnn_conversion"); +} + +template +void MKLDNNMemoryDescriptor::create_reorder_from_prv(void* cpu_ptr) +{ + CHECK(cpu_ptr); + CHECK(this->_usr_memory_pd); + CHECK(this->_prv_memory_pd); + CHECK(this->_reorder_prv2usr_pd); + if (this->_cpu_ptr == NULL) + this->_cpu_ptr = cpu_ptr; + else + CHECK_EQ(this->_cpu_ptr, cpu_ptr); + if(this->_usr_memory == NULL) + this->_usr_memory.reset(new memory(*this->_usr_memory_pd, cpu_ptr)); + if(this->_reorder_prv2usr.aprimitive == NULL) { + CHECK(this->aprimitive()); + this->_reorder_prv2usr.aprimitive.reset(new reorder(*this->_reorder_prv2usr_pd, *this->aprimitive(), *this->_usr_memory)); + } +} + +template +void MKLDNNMemoryDescriptor::convert_from_prv(void* cpu_ptr) +{ +#ifdef DEBUG + LOG(INFO) << "--- MKLDNNMemoryDescriptorBase::convert_from_prv --- " << this->name; +#endif + CHECK(cpu_ptr); + if(this->_reorder_prv2usr_pd == NULL) + return; + create_reorder_from_prv(cpu_ptr); + VLOG(1) << "--- MKLDNNMemoryDescriptorBase::convert_from_prv --- " << this->name; +#ifdef DEBUG + LOG(INFO) << "Reorder: from prv to usr."; + LOG(INFO) << "Format of _prv_memory_pd: " << this->_prv_memory_pd->desc().data.format; + LOG(INFO) << "Format of _usr_memory_pd: " << this->_usr_memory_pd->desc().data.format; +#endif + PERFORMANCE_MEASUREMENT_BEGIN(); + this->_reorder_prv2usr.submit(); + PERFORMANCE_MEASUREMENT_END_STATIC("mkldnn_conversion"); +} + +template +void MKLDNNMemoryDescriptor::create_reorder_from_extprv(shared_ptr aprimitive) +{ + CHECK(aprimitive); + CHECK(this->_extprv_memory_pd); + CHECK(this->_prv_memory_pd); + CHECK(this->_reorder_extprv2prv_pd); + if(this->_reorder_extprv2prv.aprimitive == NULL) + this->_reorder_extprv2prv.reset(new reorder(*this->_reorder_extprv2prv_pd, *aprimitive, *this->get_prv_memory())); +} + +template +void MKLDNNMemoryDescriptor::convert_from_extprv(shared_ptr aprimitive) +{ +#ifdef DEBUG + LOG(INFO) << "--- MKLDNNMemoryDescriptorBase::convert_from_extprv --- " << this->name; +#endif + CHECK(aprimitive); + if(this->_reorder_extprv2prv_pd == NULL) + return; + if (this->_extprv_memory_pd->desc().data.format == this->_prv_memory_pd->desc().data.format && + this->_extprv_memory_pd->desc().data.data_type == this->_prv_memory_pd->desc().data.data_type) + { +#ifdef DEBUG + LOG(INFO) << "The format and data_type of _extprv_memory_pd and _prv_memory_pd is same, no need do conversion."; +#endif + return; + } + create_reorder_from_extprv(aprimitive); + VLOG(1) << "--- MKLDNNMemoryDescriptorBase::convert_from_extprv --- " << this->name; +#ifdef DEBUG + LOG(INFO) << "Reorder: from extprv to prv."; + LOG(INFO) << "Format of _extprv_memory_pd: " << this->_extprv_memory_pd->desc().data.format; + LOG(INFO) << "Format of _prv_memory_pd: " << this->_prv_memory_pd->desc().data.format; +#endif + PERFORMANCE_MEASUREMENT_BEGIN(); + this->_reorder_extprv2prv.submit(); + PERFORMANCE_MEASUREMENT_END_STATIC("mkldnn_conversion"); +} + + +template +bool MKLDNNMemoryDescriptor::on_to_cpu() +{ + CHECK(this->mkldnn_layer()); + if (StreamHolder::Instance().current_stream() != NULL && StreamHolder::Instance().current_stream()->ready()) { + VLOG(1) << "- MKLDNNMemoryDescriptorBase::" << __FUNCTION__ << ": stream.wait() - " << this->name; + StreamHolder::Instance().current_stream()->wait(); + } + return true; +} + +template +bool MKLDNNMemoryDescriptorBase::layout_compare(shared_ptr other) +{ + CHECK_EQ(other->get_descr_type(), + PrvMemDescr::PRV_DESCR_MKLDNN); + + shared_ptr > other_descr = + boost::static_pointer_cast >(other); + + return (*other_descr->prv_memory_pd() == *this->prv_memory_pd()); +} + +template +void MKLDNNMemoryDescriptorBase::convert_from_other(shared_ptr other) +{ + NOT_IMPLEMENTED; +} + +template +shared_ptr MKLDNNMemoryDescriptor::get_blob_prv_primitive(Blob* blob + ,bool set_prv_ptr, bool convert + ,MKLDNNMemoryDescriptor* converted_in_fwd) +{ + if (!this->conversion_needed()) { + return shared_ptr(); // TODO: may be CHECK ? + } + + // Conversion is needed + const Dtype* prv_ptr = is_diff ? blob->prv_diff() : blob->prv_data(); + if (prv_ptr == NULL) { + if (converted_in_fwd) { + // TODO: use previously done conversion on forward - needed for training + NOT_IMPLEMENTED; + } + if(convert) + this->convert_to_prv(const_cast(is_diff ? blob->cpu_diff() : blob->cpu_data())); + else + this->create_reorder_to_prv(const_cast(is_diff ? blob->cpu_diff() : blob->cpu_data())); + if (set_prv_ptr) { + if (is_diff) { + blob->set_prv_diff_descriptor(this->get_shared_ptr(), false); + // below line designated to set correspondent SyncedMemory->_head to HEAD_AT_CPU + // TODO: need to optimize + blob->set_prv_diff_descriptor(NULL); + } else { + blob->set_prv_data_descriptor(this->get_shared_ptr(), false); + // below line designated to set correspondent SyncedMemory->_head to HEAD_AT_CPU + // TODO: need to optimize + blob->set_prv_data_descriptor(NULL); + } + } + return this->reorder_usr2prv(); + } else { + shared_ptr > blob_prv_mkldnn_mem_descr = get_mkldnn_prv_descriptor(blob); + + if (*blob_prv_mkldnn_mem_descr->prv_memory_pd() != *this->prv_memory_pd()) { + // prv in blob and in this descrptor may have different layouts + if(convert) + this->convert_from_extprv(blob_prv_mkldnn_mem_descr->aprimitive()); + else + this->create_reorder_from_extprv(blob_prv_mkldnn_mem_descr->aprimitive()); + return this->reorder_extprv2prv(); + } else if (blob_prv_mkldnn_mem_descr.get() != this) { + VLOG(1) << "layout OK " << blob_prv_mkldnn_mem_descr->name << " == " << this->name; + } + return blob_prv_mkldnn_mem_descr->aprimitive(); + } + NOT_IMPLEMENTED; + return shared_ptr(); +} + +// TODO: explain what is happenning here!!! +template +void MKLDNNMemoryDescriptor::sync_before_read() +{ + // TODO: need to optimize code + if (!this->conversion_needed()) { + return; + } + + // Conversion is needed + const Dtype* prv_ptr = is_diff ? this->_blob->prv_diff() : this->_blob->prv_data(); + if (prv_ptr == NULL) { + this->convert_to_prv(const_cast(is_diff ? this->_blob->cpu_diff() : this->_blob->cpu_data())); + // if blob has not prv descriptor then set it to avoid conversions on next iterations + if (is_diff) { + this->_blob->set_prv_diff_descriptor(this->get_shared_ptr(), false); + // Original: + // below line designated to set correspondent SyncedMemory->_head to HEAD_AT_CPU + // TODO: need to optimize + //this->_blob->set_prv_diff_descriptor(NULL); + // It will lead the performance drop in two aspects: + // 1. FWD Conv: Reorder of weights from oihw to OIhw16i16o is executed for every iteration. This should be happening only once per convolution layer including all iterations. + // 2. BWD Conv: Reorder of weights is happening from oihw to OIhw16o16i format, where as expected, the reorder should happen from OIhw16i16o to OIhw16o16i for better performance. + } else { + this->_blob->set_prv_data_descriptor(this->get_shared_ptr(), true); //Change from false to true, suggested by Czaja, Jacek + // Original: + // below line designated to set correspondent SyncedMemory->_head to HEAD_AT_CPU + // TODO: need to optimize + //this->_blob->set_prv_data_descriptor(NULL); + // It will lead the performance drop in two aspects: + // 1. FWD Conv: Reorder of weights from oihw to OIhw16i16o is executed for every iteration. This should be happening only once per convolution layer including all iterations. + // 2. BWD Conv: Reorder of weights is happening from oihw to OIhw16o16i format, where as expected, the reorder should happen from OIhw16i16o to OIhw16o16i for better performance. + } + } else { + shared_ptr > blob_prv_mkldnn_mem_descr = get_mkldnn_prv_descriptor(this->_blob); + + if (*blob_prv_mkldnn_mem_descr->prv_memory_pd() != *this->prv_memory_pd()) { + // prv in blob and in this descrptor may have different layouts + this->convert_from_extprv(blob_prv_mkldnn_mem_descr->aprimitive()); + } else { + if (is_diff) { + this->_blob->mutable_prv_diff(); + } else { + this->_blob->mutable_prv_data(); + } + } + } +} + +template +void MKLDNNMemoryDescriptor::sync_before_write(bool inplace) +{ + // TODO: need to optimize code + if(!inplace) { + if(is_diff) { + this->_blob->set_prv_diff_descriptor(this->get_shared_ptr(), this->conversion_needed() ? false : true); + } else { + this->_blob->set_prv_data_descriptor(this->get_shared_ptr(), this->conversion_needed() ? false : true); + } + } + //Fix me: this->conversion_needed() == false means diff/data is in the CPU, no need to set the prv_diff/data_descriptor + /* + if ((!inplace) && (this->conversion_needed())) { + if (is_diff) { + this->_blob->set_prv_diff_descriptor(this->get_shared_ptr(), false); + } else { + this->_blob->set_prv_data_descriptor(this->get_shared_ptr(), false); + } + } + */ +} + +template +shared_ptr MKLDNNMemoryDescriptor::create_input(Blob * blob, bool set_prv_ptr) +{ + shared_ptr pres; + if (this->conversion_needed()) { + pres = this->get_blob_prv_primitive(blob, set_prv_ptr, false); + } else { + pres.reset(new memory(*this->usr_memory_pd(), const_cast(is_diff ? blob->cpu_diff() : blob->cpu_data()))); + } + return pres; +} + +template +shared_ptr MKLDNNMemoryDescriptor::create_output_memory(Blob * blob, bool inplace) +{ + shared_ptr omem; + if (this->conversion_needed()) { + shared_ptr blob_prv_mem_descriptor = is_diff ? + (blob->get_prv_diff_descriptor()) : (blob->get_prv_data_descriptor()); + + if(blob_prv_mem_descriptor != NULL) { + shared_ptr > current_descr = get_mkldnn_prv_descriptor(blob); + + omem = current_descr->get_prv_memory(); + this->set_prv_memory(omem); + } else { + omem = this->get_prv_memory(); + } + } else { + omem.reset(new memory(*this->usr_memory_pd(), is_diff ? blob->mutable_cpu_diff() : blob->mutable_cpu_data())); + } + return omem; +} + +template +shared_ptr MKLDNNMemoryDescriptor::create_input(bool set_prv_ptr) +{ + // TODO: need to iptimize code + return create_input(this->_blob, set_prv_ptr); +} + +template +shared_ptr MKLDNNMemoryDescriptor::create_output_memory(bool inplace) +{ + // TODO: need to optimize code + shared_ptr omem = create_output_memory(this->_blob); + if(!inplace) { + if(is_diff) { + this->_blob->set_prv_diff_descriptor(this->get_shared_ptr(), this->conversion_needed() ? false : true); + } else { + this->_blob->set_prv_data_descriptor(this->get_shared_ptr(), this->conversion_needed() ? false : true); + } + } + /* + //Fix me: this->conversion_needed() == false means diff/data is in the CPU, no need to set the prv_diff/data_descriptor + if ((!inplace) && (this->conversion_needed())) { + if (is_diff) { + this->_blob->set_prv_diff_descriptor(this->get_shared_ptr(), false); + } else { + this->_blob->set_prv_data_descriptor(this->get_shared_ptr(), false); + } + } + */ + return omem; +} + +template +shared_ptr > get_mkldnn_prv_descriptor(Blob* blob) +{ + shared_ptr blob_prv_mem_descriptor = is_diff ? + (blob->get_prv_diff_descriptor()) : (blob->get_prv_data_descriptor()); + + CHECK_EQ(blob_prv_mem_descriptor->get_descr_type(), PrvMemDescr::PRV_DESCR_MKLDNN); + + shared_ptr > blob_prv_mkldnn_mem_descr = + boost::static_pointer_cast >(blob_prv_mem_descriptor); + CHECK(blob_prv_mkldnn_mem_descr != NULL); + return blob_prv_mkldnn_mem_descr; +} + +template class MKLDNNMemoryDescriptor; +template class MKLDNNMemoryDescriptor; +template class MKLDNNMemoryDescriptor; +template class MKLDNNMemoryDescriptor; +template class MKLDNNMemoryDescriptorBase; +template class MKLDNNMemoryDescriptorBase; + +template +shared_ptr > get_mkldnn_prv_descriptor(Blob* blob); +template +shared_ptr > get_mkldnn_prv_descriptor(Blob* blob); +template +shared_ptr > get_mkldnn_prv_descriptor(Blob* blob); +template +shared_ptr > get_mkldnn_prv_descriptor(Blob* blob); +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/multinode/apply_mn_param.cpp b/src/caffe/multinode/apply_mn_param.cpp new file mode 100644 index 00000000000..29e32079bbf --- /dev/null +++ b/src/caffe/multinode/apply_mn_param.cpp @@ -0,0 +1,341 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_MLSL + +#include +#include +#include + +#include "caffe/common.hpp" +#include "caffe/blob.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/multinode/mlsl.hpp" +#include "caffe/multinode/apply_mn_param.hpp" + +namespace caffe { + +template +void ApplyMultinodeParams(const NetParameter& param, + NetParameter* param_with_mn) { + // save per-layer global parameter mapping being applied later + map net_layer_params; + // aux map for inserting MnActivationLayer + map blob_param_map; + MultinodeParameter mn_param = param.multinode(); + + // Step 1: Identify all the layers having global net params + for (int param_id = 0; param_id < mn_param.model_parallel_size(); param_id++) { + MnModelParallelParameter model_parallel_param = mn_param.model_parallel(param_id); + string layer_from = model_parallel_param.layer_from(); + string layer_to = model_parallel_param.layer_to(); + set marked_blobs; + for (int i = 0; i < param.layer_size(); i++) { + const LayerParameter& layer_param = param.layer(i); + bool layer_covered_by_global = false; + if (layer_param.name() == layer_from || + layer_param.name() == layer_to) { + layer_covered_by_global = true; + } else { + for (int j = 0; j < layer_param.bottom_size(); j++) { + if (marked_blobs.find(layer_param.bottom(j)) != + marked_blobs.end()) { + layer_covered_by_global = true; + break; + } + } + } + if (layer_covered_by_global) { + for (int j = 0; j < layer_param.top_size(); j++) { + marked_blobs.insert(layer_param.top(j)); + } + net_layer_params[layer_param.name()] = model_parallel_param; + // For cross-channel LRN, we assume there is always one model part + // for simple implementation. + if (layer_param.type() == "LRN" && + layer_param.lrn_param().norm_region() == + LRNParameter_NormRegion_ACROSS_CHANNELS) { + net_layer_params[layer_param.name()].set_model_parts(1); + } + } + if (layer_param.name() == layer_to || + layer_param.top_size() == 0) { + break; + } + } + } + + // Step 2: Identify the places to insert activation layers + map blob_mdg_map; + for (int i = 0; i < param.layer_size(); i++) { + const LayerParameter& layer_param = param.layer(i); + string layer_name = layer_param.name(); + string layer_type = layer_param.type(); + const MultinodeLayerParameter& mn_layer_param = layer_param.multinode(); + int num_nodes = mn_layer_param.num_nodes(); + int model_parts = mn_layer_param.model_parts(); + if (net_layer_params.find(layer_name) != net_layer_params.end()) { + MnModelParallelParameter model_parallel_param = + net_layer_params[layer_name]; + num_nodes = model_parallel_param.num_nodes(); + model_parts = model_parallel_param.model_parts(); + } + for (int j = 0; j < layer_param.bottom_size(); j++) { + string bottom_name = layer_param.bottom(j); + if (blob_mdg_map.find(bottom_name) != blob_mdg_map.end()) { + MnActivationParameter mdg = blob_mdg_map[bottom_name]; + mdg.set_num_nodes_out(num_nodes); + mdg.set_model_parts_out(model_parts); + int num_nodes_in = mdg.num_nodes_in(); + int num_nodes_out = mdg.num_nodes_out(); + int model_parts_in = mdg.model_parts_in(); + int model_parts_out = mdg.model_parts_out(); + mn::GetCanonicalMnParam(num_nodes_in, model_parts_in); + mn::GetCanonicalMnParam(num_nodes_out, model_parts_out); + if ((model_parts_out > 1 && + (layer_type == "Convolution" || layer_type == "InnerProduct" || + layer_type == "Accuracy" || layer_type == "SoftmaxWithLoss")) || + num_nodes_in != num_nodes_out || + model_parts_in != model_parts_out) { + string layer_blob_name = layer_name + "/" + layer_param.bottom(j); + if (layer_type == "Accuracy" || layer_type == "SoftmaxWithLoss") { + mdg.set_need_reduce(false); + } + blob_param_map[layer_blob_name] = mdg; + } + blob_mdg_map.erase(bottom_name); + } + } + for (int j = 0; j < layer_param.top_size(); j++) { + MnActivationParameter mdg; + mdg.set_num_nodes_in(num_nodes); + mdg.set_model_parts_in(model_parts); + blob_mdg_map[layer_param.top(j)] = mdg; + } + } + + // Step 3: Create the new net, apply global mn setting to each layer, + // insert activation layers if needed + param_with_mn->CopyFrom(param); + param_with_mn->clear_layer(); + for (int i = 0; i < param.layer_size(); i++) { + const LayerParameter& orig_layer_param = param.layer(i); + map updated_blob_idx_to_name; + for (int j = 0; j < orig_layer_param.bottom_size(); j++) { + const string& bottom_blob_name = orig_layer_param.bottom(j); + string layer_blob_name = orig_layer_param.name() + "/" + bottom_blob_name; + if (blob_param_map.find(layer_blob_name) != blob_param_map.end()) { + LayerParameter* mn_activation_layer_param = + param_with_mn->add_layer(); + string new_name = "mn_activation/" + layer_blob_name; + mn_activation_layer_param->Clear(); + mn_activation_layer_param->set_name(new_name); + mn_activation_layer_param->set_type("MnActivation"); + mn_activation_layer_param->add_bottom(bottom_blob_name); + mn_activation_layer_param->add_top(new_name); + MnActivationParameter *mn_activation_param = + mn_activation_layer_param->mutable_mn_activation_param(); + *mn_activation_param = blob_param_map[layer_blob_name]; + updated_blob_idx_to_name[j] = new_name; + } + } + LayerParameter* layer_param = param_with_mn->add_layer(); + layer_param->CopyFrom(orig_layer_param); + // Apply global mn setting + if (net_layer_params.find(layer_param->name()) != net_layer_params.end()) { + MultinodeLayerParameter *mn_layer_param = layer_param->mutable_multinode(); + const MnModelParallelParameter &mn_param = net_layer_params[layer_param->name()]; + mn_layer_param->set_num_nodes(mn_param.num_nodes()); + mn_layer_param->set_model_parts(mn_param.model_parts()); + } + const MultinodeLayerParameter &mn_layer_param = layer_param->multinode(); + int num_nodes = mn_layer_param.num_nodes(); + int model_parts = mn_layer_param.model_parts(); + mn::GetCanonicalMnParam(num_nodes, model_parts); + if (model_parts > 1) { + // TODO: support transpose + // TODO: support undividible num_output + if (layer_param->type() == "Convolution") { + ConvolutionParameter *conv_param = layer_param->mutable_convolution_param(); + int new_num_output = conv_param->num_output() / model_parts; + CHECK_EQ(conv_param->num_output(), model_parts * new_num_output) + << "Convolution layer " << layer_param->name() + << ": Undividible num_output " << conv_param->num_output() + << " by model_parts " << model_parts; + conv_param->set_num_output(new_num_output); + } else if (layer_param->type() == "InnerProduct") { + InnerProductParameter *ip_param = layer_param->mutable_inner_product_param(); + int new_num_output = ip_param->num_output() / model_parts; + CHECK_EQ(ip_param->num_output(), model_parts * new_num_output) + << "InnerProduct layer " << layer_param->name() + << ": Undividible num_output " << ip_param->num_output() + << " by model_parts " << model_parts; + ip_param->set_num_output(ip_param->num_output() / model_parts); + CHECK(!ip_param->transpose()) << "Model parallelism does not support transpose!"; + } + for (int j = 0; j < layer_param->blobs_size(); j++) { + Blob blob; + Blob new_blob; + const BlobProto &proto = layer_param->blobs(j); + blob.FromProto(proto); + vector shape = blob.shape(); + new_blob.Reshape(shape); + if (shape.size() > 0) { + if (proto.has_num() || proto.has_channels() || + proto.has_height() || proto.has_width()) { + // deprecated 4D blob + if (layer_param->type() == "InnerProduct") { + CHECK_EQ(shape.size(), 4); + CHECK_EQ(shape[0], 1); + CHECK_EQ(shape[1], 1); + if (shape[2] == 1) { + shape.resize(1); + shape[0] = blob.shape(3); + } else { + shape.resize(2); + shape[0] = blob.shape(2); + shape[1] = blob.shape(3); + } + new_blob.Reshape(shape); + } + } + int count = blob.count() / model_parts; + int offset = count * (mn::get_node_id() % model_parts); + shape[0] /= model_parts; + new_blob.Reshape(shape); + caffe_copy(count, blob.cpu_data() + offset, new_blob.mutable_cpu_data()); + caffe_copy(count, blob.cpu_diff() + offset, new_blob.mutable_cpu_diff()); + BlobProto *updated_blob_proto = layer_param->mutable_blobs(j); + updated_blob_proto->Clear(); + new_blob.ToProto(updated_blob_proto, true); + } + } + } + for (int j = 0; j < orig_layer_param.bottom_size(); j++) { + if (updated_blob_idx_to_name.find(j) != updated_blob_idx_to_name.end()) { + layer_param->set_bottom(j, updated_blob_idx_to_name[j]); + } + } + } +} + +template +void CopyMultinodeParamsFromNet(const Net *net, NetParameter *param) { + // set per-layer multi-node parameters before adjusting net proto + for (int i = 0; i < param->layer_size(); i++) { + LayerParameter* source_layer = param->mutable_layer(i); + const string& source_layer_name = source_layer->name(); + int target_layer_id = 0; + while (target_layer_id != net->layer_names().size() && + net->layer_names()[target_layer_id] != source_layer_name) { + ++target_layer_id; + } + if (target_layer_id == net->layer_names().size()) continue; + *source_layer->mutable_multinode() = + net->layers()[target_layer_id]->layer_param().multinode(); + } +} + +template +void RevertMultinodeParams(NetParameter* param, bool write_diff) { + NetParameter orig_param; + orig_param.CopyFrom(*param); + param->clear_layer(); + for (int i = 0; i < orig_param.layer_size(); i++) { + const LayerParameter& orig_layer_param = orig_param.layer(i); + if (orig_layer_param.type() == "MnActivation") continue; + LayerParameter* layer_param = param->add_layer(); + layer_param->CopyFrom(orig_layer_param); + layer_param->clear_bottom(); + for (int j = 0; j < orig_layer_param.bottom_size(); j++) { + string bottom_name = orig_layer_param.bottom(j); + string prefix = "mn_activation/" + orig_layer_param.name() + "/"; + if (bottom_name.find(prefix) == 0) { + bottom_name = bottom_name.substr(prefix.size()); + } + layer_param->add_bottom(bottom_name); + } + const MultinodeLayerParameter &mn_layer_param = orig_layer_param.multinode(); + int num_nodes = mn_layer_param.num_nodes(); + int model_parts = mn_layer_param.model_parts(); + mn::GetCanonicalMnParam(num_nodes, model_parts); + if (model_parts > 1) { + if (layer_param->type() == "Convolution") { + ConvolutionParameter *conv_param = layer_param->mutable_convolution_param(); + conv_param->set_num_output(conv_param->num_output() * model_parts); + } else if (layer_param->type() == "InnerProduct") { + InnerProductParameter *ip_param = layer_param->mutable_inner_product_param(); + ip_param->set_num_output(ip_param->num_output() * model_parts); + CHECK(!ip_param->transpose()) << "Model parallelism does not support transpose!"; + } + layer_param->clear_blobs(); + for (int j = 0; j < orig_layer_param.blobs_size(); j++) { + BlobProto *blob_proto = layer_param->add_blobs(); + Blob orig_blob; + orig_blob.FromProto(orig_layer_param.blobs(j)); + vector shape = orig_blob.shape(); + Blob new_blob; + if (shape.size() > 0) { + mn::Distribution *distrib = mn::get_distrib(num_nodes/model_parts, model_parts); + int count = orig_blob.count(); + shape[0] *= model_parts; + new_blob.Reshape(shape); + distrib->allgather( + orig_blob.mutable_cpu_data(), count, new_blob.mutable_cpu_data()); + if (write_diff) { + distrib->allgather( + orig_blob.mutable_cpu_diff(), count, new_blob.mutable_cpu_diff()); + } + } + new_blob.ToProto(blob_proto, write_diff); + } + } + layer_param->mutable_multinode()->Clear(); + } +} + +template void ApplyMultinodeParams(const NetParameter& param, + NetParameter* param_with_mn); +template void ApplyMultinodeParams(const NetParameter& param, + NetParameter* param_with_mn); +template void CopyMultinodeParamsFromNet(const Net *net, NetParameter *param); +template void CopyMultinodeParamsFromNet(const Net *net, NetParameter *param); +template void RevertMultinodeParams(NetParameter* param, bool write_diff); +template void RevertMultinodeParams(NetParameter* param, bool write_diff); +} // namespace caffe + +#endif // USE_MLSL diff --git a/src/caffe/multinode/mlsl.cpp b/src/caffe/multinode/mlsl.cpp new file mode 100644 index 00000000000..8a0f772af0e --- /dev/null +++ b/src/caffe/multinode/mlsl.cpp @@ -0,0 +1,92 @@ +/* + * All modification made by Intel Corporation: © 2016 Intel Corporation + * + * All contributions by the University of California: + * Copyright (c) 2014, 2015, The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014, 2015, the respective contributors + * All rights reserved. + * For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + * + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Intel Corporation nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifdef USE_MLSL + +#include +#include "boost/thread/mutex.hpp" +#include "caffe/multinode/mlsl.hpp" + +namespace caffe { + namespace mn { + boost::mutex distrib_lock; + std::map, boost::shared_ptr> *distrib_map; + + void init(int* argc, char **argv[]) { + static class initialize { + public: + initialize(int* argc, char** argv[]) { + MLSL::Environment::GetEnv().Init(argc, argv); + distrib_map = + new std::map, boost::shared_ptr>(); + } + ~initialize() { + delete distrib_map; + MLSL::Environment::GetEnv().Finalize(); + } + } __init{ argc, argv }; + } + + shared_ptr create_distrib( + int dataParts, int modelParts, int dataColor, int modelColor, + int dataColorMax, int modelColorMax) { + return shared_ptr( + new Distribution(dataParts, modelParts, dataColor, modelColor, + dataColorMax, modelColorMax)); + } + + Distribution * get_distrib(int dataParts, int modelParts) { + boost::mutex::scoped_lock l(distrib_lock); + std::pair key = std::make_pair(dataParts, modelParts); + if (distrib_map->find(key) == distrib_map->end()) { + int node_id = get_node_id(); + int num_nodes = get_nodes_count(); + int modelColor = node_id / modelParts; + int dataColor = node_id % (num_nodes / dataParts); + (*distrib_map)[key] = boost::shared_ptr( + new Distribution(dataParts, modelParts, dataColor, modelColor)); + } + return (*distrib_map)[key].get(); + } + + Distribution * get_distrib() { + return get_distrib(get_nodes_count(), 1); + } + } +} + +#endif /* USE_MLSL */ diff --git a/src/caffe/multinode/mn_activation_layer.cpp b/src/caffe/multinode/mn_activation_layer.cpp new file mode 100644 index 00000000000..7b4c7b8ca32 --- /dev/null +++ b/src/caffe/multinode/mn_activation_layer.cpp @@ -0,0 +1,254 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_MLSL + +#include "caffe/multinode/mn_activation_layer.hpp" +#include "caffe/multinode/mlsl.hpp" + +namespace caffe { + +template +void MnActivationLayer::LayerSetUp(const vector*>& bottom, + const vector*>& top) { + MnActivationParameter param = this->layer_param_.mn_activation_param(); + num_nodes_in_ = param.num_nodes_in(); + num_nodes_out_ = param.num_nodes_out(); + model_parts_in_ = param.model_parts_in(); + model_parts_out_ = param.model_parts_out(); + mn::GetCanonicalMnParam(num_nodes_in_, model_parts_in_); + mn::GetCanonicalMnParam(num_nodes_out_, model_parts_out_); + data_parts_in_ = num_nodes_in_ / model_parts_in_; + data_parts_out_ = num_nodes_out_ / model_parts_out_; + + CHECK_EQ(num_nodes_in_, data_parts_in_ * model_parts_in_); + CHECK_EQ(num_nodes_out_, data_parts_out_ * model_parts_out_); + CHECK(data_parts_in_ != data_parts_out_ || + model_parts_in_ != model_parts_out_ || + model_parts_in_ > 1); + + distrib_in_ = mn::get_distrib(data_parts_in_, model_parts_in_); + distrib_out_ = mn::get_distrib(data_parts_out_, model_parts_out_); + + if (data_parts_in_ != data_parts_out_) { + int num_nodes = mn::get_nodes_count(); + int node_id = mn::get_node_id(); + int data_parts_max = std::max(data_parts_in_, data_parts_out_); + int data_parts_min = std::min(data_parts_in_, data_parts_out_); + int num_data_groups = num_nodes / data_parts_min; + // make sure data_color in-use starts from 0 and ends at data_parts_min-1 + int data_color = node_id / num_data_groups + + (node_id % (num_nodes / data_parts_max)) * data_parts_min; + LOG(INFO) << "Create data_in_out distribution: " + << data_parts_in_ << " ==> " << data_parts_out_ + << ", (" << data_parts_max / data_parts_min + << ",1), data color: " << data_color + << ", data color max: " << data_parts_min-1; + distrib_data_in_out_ = mn::create_distrib( + data_parts_max / data_parts_min, 1, data_color, MLSL_DEFAULT_COLOR, + data_parts_min-1, MLSL_DEFAULT_COLOR); + } +} + +template +void MnActivationLayer::Reshape(const vector*>& bottom, + const vector*>& top) { + const vector &bottom_shape = bottom[0]->shape(); + vector top_shape = bottom[0]->shape(); + // re-group and distribute the data parts + top_shape[0] = bottom_shape[0] * data_parts_in_ / data_parts_out_; + if (top_shape.size() > 1) { + // gather all the model parts split from previous output + top_shape[1] = bottom_shape[1] * model_parts_in_; + } + top[0]->Reshape(top_shape); + top_reduce_buf_.ReshapeLike(*top[0]); + vector bottom_gather_shape = bottom[0]->shape(); + if (bottom_shape.size() > 1) { + bottom_gather_shape[1] = bottom_shape[1] * model_parts_in_; + } + bottom_gather_buf_.Reshape(bottom_gather_shape); + bottom_gather_work_buf_.Reshape(bottom_gather_shape); +} + +template +void MnActivationLayer::Forward_cpu(const vector*>& bottom, + const vector*>& top) { + Dtype *bottom_work_buf = (Dtype*)bottom[0]->cpu_data(); + if (model_parts_in_ > 1) { + distrib_in_->gather( + (Dtype*)bottom[0]->cpu_data(), bottom[0]->count(), + bottom_gather_buf_.mutable_cpu_data()); + if (data_parts_in_ == data_parts_out_) { + bottom_work_buf = top[0]->mutable_cpu_data(); + } else { + bottom_work_buf = bottom_gather_work_buf_.mutable_cpu_data(); + } + Unpack( + bottom_gather_buf_.cpu_data(), + bottom[0]->shape(0), bottom[0]->shape(1), bottom[0]->count(2), + model_parts_in_, + bottom_work_buf); + } + if (data_parts_in_ > data_parts_out_) { + distrib_data_in_out_->gather( + bottom_work_buf, bottom[0]->count() * model_parts_in_, + top[0]->mutable_cpu_data()); + } else if (data_parts_in_ < data_parts_out_) { + distrib_data_in_out_->scatter( + bottom_work_buf, top[0]->mutable_cpu_data(), + top[0]->count()); + } else { + if (bottom_work_buf != top[0]->mutable_cpu_data()) { + caffe_copy( + top[0]->count(), bottom_work_buf, top[0]->mutable_cpu_data()); + } + } + distrib_out_->bcast( + top[0]->mutable_cpu_data(), top[0]->count()); +} + +template +bool MnActivationLayer::Backward_cpu_fast(const vector*>& top, + const vector*>& bottom) { + if (num_nodes_in_ == num_nodes_out_ && + model_parts_in_ == model_parts_out_ && + model_parts_in_ > 1) { + Pack(top[0]->cpu_diff(), bottom_gather_work_buf_.mutable_cpu_data(), + bottom[0]->shape(0), bottom[0]->shape(1), bottom[0]->count(2), + model_parts_in_); + distrib_out_->reducescatter( + bottom_gather_work_buf_.mutable_cpu_data(), + bottom[0]->mutable_cpu_diff(), bottom[0]->count()); + return true; + } + return false; +} + +template +void MnActivationLayer::Backward_cpu(const vector*>& top, + const vector& propagate_down, const vector*>& bottom) { + if (propagate_down[0]) { + if (Backward_cpu_fast(top, bottom)) return; + Dtype *top_work_buf = (Dtype*)top[0]->cpu_diff(); + if (model_parts_out_ > 1 && + this->layer_param_.mn_activation_param().need_reduce()) { + distrib_out_->reduce( + (Dtype*)top[0]->cpu_diff(), top_reduce_buf_.mutable_cpu_data(), + top_reduce_buf_.count()); + top_work_buf = top_reduce_buf_.mutable_cpu_data(); + } + Dtype *bottom_work_buf = bottom[0]->mutable_cpu_diff(); + if (model_parts_in_ > 1) { + bottom_work_buf = bottom_gather_buf_.mutable_cpu_data(); + } + if (data_parts_in_ > data_parts_out_) { + distrib_data_in_out_->scatter( + top_work_buf, bottom_work_buf, + bottom_gather_buf_.count()); + } else if (data_parts_in_ < data_parts_out_) { + distrib_data_in_out_->gather( + top_work_buf, top[0]->count(), + bottom_work_buf); + } else { + if (model_parts_in_ > 1) { + bottom_work_buf = top_work_buf; + } else { + caffe_copy( + bottom[0]->count(), top_work_buf, bottom_work_buf); + } + } + if (model_parts_in_ > 1) { + Pack(bottom_work_buf, bottom_gather_work_buf_.mutable_cpu_data(), + bottom[0]->shape(0), bottom[0]->shape(1), bottom[0]->count(2), + model_parts_in_); + distrib_in_->scatter( + bottom_gather_work_buf_.mutable_cpu_data(), + bottom[0]->mutable_cpu_diff(), bottom[0]->count()); + } + } +} + +template +void MnActivationLayer::Unpack(const Dtype *src, int N, int C, int HW, int numC, Dtype *dst) { + int dstC = numC * C; +#pragma omp parallel for collapse (2) + for (int iN = 0; iN < N; iN++) { + for (int iC = 0; iC < dstC; iC++) { + int iSrc = iC / C; + int iSrcC = iC % C; + for (int iHW = 0; iHW < HW; iHW++) { + dst[iN*dstC*HW + iC*HW + iHW] = + src[iSrc*N*C*HW + iN*C*HW + iSrcC*HW + iHW]; + } + } + } +} + +template +void MnActivationLayer::Pack(const Dtype *src, Dtype *dst, int N, int C, int HW, int numC) { + int srcC = numC * C; + for (int iDst = 0; iDst < numC; iDst++) { +#pragma omp parallel for collapse (2) + for (int iN = 0; iN < N; iN++) { + for (int iC = 0; iC < C; iC++) { + int iSrcC = iDst * C + iC; + for (int iHW = 0; iHW < HW; iHW++) { + dst[iDst*N*C*HW + iN*C*HW + iC*HW + iHW] = + src[iN*srcC*HW + iSrcC*HW + iHW]; + } + } + } + } +} + +template +bool MnActivationLayer::Bypass(const vector*>& bottom, + const vector*>& top) { + return distrib_in_->get_group_id() > 0 && distrib_out_->get_group_id() > 0; +} + +#ifdef CPU_ONLY +STUB_GPU(MnActivationLayer); +#endif + +INSTANTIATE_CLASS(MnActivationLayer); +REGISTER_LAYER_CLASS(MnActivation); +} // namespace caffe + +#endif + diff --git a/src/caffe/multinode/multi_solver.cpp b/src/caffe/multinode/multi_solver.cpp new file mode 100644 index 00000000000..13ad8da2b25 --- /dev/null +++ b/src/caffe/multinode/multi_solver.cpp @@ -0,0 +1,205 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_MLSL + +#include + +#include +#include + +#include "caffe/multinode/multi_solver.hpp" + +namespace caffe { + +#ifdef CAFFE_PER_LAYER_TIMINGS +#define LAYER_TIMING_START() do { \ + root_solver_->timer.Start(); \ +}while(0) + +#define LAYER_TIMING_STOP(name, index) do { \ + root_solver_->name##_time_per_layer[index] += root_solver_->timer.MicroSeconds(); \ +}while(0) +#else +#define LAYER_TIMING_START() + +#define LAYER_TIMING_STOP(name,index) +#endif + +template +inline bool MultiSolver::IsSkipWaitGradient(int layer_id) { + Net& net = *root_solver_->net(); + const std::vector>>& layers{ net.layers() }; + const std::vector& layer_need_backward{ net.layer_need_backward() }; + + if (!layer_need_backward[layer_id] || ((layers[layer_id]->layerOp != nullptr) + && !layers[layer_id]->layerOp->HasParameterSets())) { + DLOG(INFO) << "ForwardBackwardImpl: no need for apply_updates for layer # " + << layer_id << ", skip on_delwt_wait, apply_updates, on_wtinc_ready"; + return true; + } + return false; +} + +template +inline void MultiSolver::WaitAndUpdateGradient(int layer_id) { + LAYER_TIMING_START(); + for (int j = 0; j < callbacks_.size(); ++j) { + callbacks_[j]->on_delwt_wait(layer_id); + } + LAYER_TIMING_STOP(waitcomm, layer_id); + +#ifdef FW_OVERLAP_OPT + if (layer_finished_flags_[layer_id]) { +#endif + LAYER_TIMING_START(); + for (int j = 0; j < callbacks_.size(); ++j) { + callbacks_[j]->apply_updates(layer_id); + } + LAYER_TIMING_STOP(update, layer_id); +#ifdef FW_OVERLAP_OPT + } +#endif +} + +template +Dtype MultiSolver::ForwardBackwardImpl(bool first, bool last) { + Dtype loss = 0; + Net& net = *root_solver_->net(); + const std::vector>>& layers{ net.layers() }; + const std::vector& layer_need_backward{ net.layer_need_backward() }; + + for (int i = 0; i < layers.size(); ++i) { +#ifdef FW_OVERLAP_OPT + if (first && IsSkipWaitGradient(i) == false) { + while (layer_finished_flags_[i] == false) { + WaitAndUpdateGradient(i); + if (layer_finished_flags_[i]) + break; + + for (int k=i+1; k= 0; --i) { + if (!layer_need_backward[i]) { + continue; + } + + LAYER_TIMING_START(); + net.BackwardFromTo(i, i); + LAYER_TIMING_STOP(backward, i); + + if (last && (layers[i]->layerOp != nullptr) + && layers[i]->layerOp->HasParameterSets()) { + LAYER_TIMING_START(); + for (int j = 0; j < callbacks_.size(); ++j) { + callbacks_[j]->on_iter_finished(i); + } + LAYER_TIMING_STOP(startcomm, i); + } + } + +#ifdef FW_OVERLAP_OPT + int iter = root_solver_->iter(); + int max_iter = root_solver_->param().max_iter(); + bool test = (root_solver_->param().test_interval() + && ((iter + 1) % root_solver_->param().test_interval() == 0)); + if (last && (test || (iter == max_iter - 1))) { + int finished_count = 0; + while (finished_count < layers.size()) { +#else + if (last) { +#endif + for (int i = 0; i < layers.size(); ++i) { + if (IsSkipWaitGradient(i)) { +#ifdef FW_OVERLAP_OPT + finished_count++; + layer_finished_flags_[i] = true; +#endif + continue; + } +#ifdef FW_OVERLAP_OPT + if (layer_finished_flags_[i]) + continue; +#endif + + WaitAndUpdateGradient(i); +#ifdef FW_OVERLAP_OPT + if (layer_finished_flags_[i]) + finished_count++; +#endif + } +#ifdef FW_OVERLAP_OPT + } +#endif + } + + DLOG(WARNING) << "iter " << root_solver_->iter() << ", loss " << loss; + return loss; +} + +template +Dtype MultiSolver::ForwardBackward() { + Dtype loss = 0; + root_solver_->net()->ClearParamDiffs(); + for (int i = 0; i < iter_size; ++i) { + loss += ForwardBackwardImpl( + (i == 0), (i + 1 == iter_size)); + } + return loss / iter_size; +} + +INSTANTIATE_CLASS(MultiSolver); + +} // namespace caffe + +#endif /* USE_MLSL */ diff --git a/src/caffe/multinode/multi_sync.cpp b/src/caffe/multinode/multi_sync.cpp new file mode 100644 index 00000000000..448172c7b60 --- /dev/null +++ b/src/caffe/multinode/multi_sync.cpp @@ -0,0 +1,75 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_MLSL + +#include "caffe/multinode/multi_sync.hpp" + +namespace caffe { + +template +MultiSync::MultiSync(shared_ptr > root_solver) + : solver(boost::make_shared >(root_solver)), + layers(root_solver->net()->layers()), + net(root_solver->net()), + net_params(root_solver->net()->learnable_params()) { + root_solver->param().set_disabled_update(true); + + if (root_solver->iter() == 0) + root_solver->set_iter(1); + + layer_param_ids.resize(layers.size()); +#ifdef FW_OVERLAP_OPT + param_ids_finished_flags.resize(layers.size()); +#endif + + for (int layer_id = 0; layer_id < layers.size(); layer_id++) { + shared_ptr > layer = layers[layer_id]; + + /* cache param ids */ + layer_param_ids[layer_id] = net->get_layer_learnable_param_ids(layer_id); +#ifdef FW_OVERLAP_OPT + param_ids_finished_flags[layer_id].resize(layer_param_ids[layer_id].size()); + std::fill(param_ids_finished_flags[layer_id].begin(), param_ids_finished_flags[layer_id].end(), false); +#endif + } +} + + INSTANTIATE_CLASS(MultiSync); +} // namespace caffe + +#endif /* USE_MLSL */ diff --git a/src/caffe/net.cpp b/src/caffe/net.cpp index 644cb7e97ee..0a8aeb98129 100644 --- a/src/caffe/net.cpp +++ b/src/caffe/net.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include @@ -7,17 +44,26 @@ #include "hdf5.h" +#include "boost/algorithm/string.hpp" + #include "caffe/common.hpp" #include "caffe/layer.hpp" #include "caffe/net.hpp" #include "caffe/parallel.hpp" #include "caffe/proto/caffe.pb.h" +#include "caffe/util/cpu_info.hpp" #include "caffe/util/hdf5.hpp" #include "caffe/util/insert_splits.hpp" #include "caffe/util/math_functions.hpp" +#include "caffe/util/performance.hpp" #include "caffe/util/upgrade_proto.hpp" #include "caffe/test/test_caffe_main.hpp" +#include "caffe/multinode/mlsl.hpp" +#include "caffe/multinode/apply_mn_param.hpp" +#include "caffe/util/remove_batch_norm.hpp" + +PERFORMANCE_CREATE_MONITOR(); namespace caffe { @@ -30,7 +76,7 @@ Net::Net(const NetParameter& param, const Net* root_net) template Net::Net(const string& param_file, Phase phase, const int level, const vector* stages, - const Net* root_net) + const Net* root_net, std::string engine) : root_net_(root_net) { NetParameter param; ReadNetParamsFromTextFileOrDie(param_file, ¶m); @@ -42,6 +88,8 @@ Net::Net(const string& param_file, Phase phase, } } param.mutable_state()->set_level(level); + if (engine != "") + param.set_engine(engine); Init(param); } @@ -49,18 +97,76 @@ template void Net::Init(const NetParameter& in_param) { CHECK(Caffe::root_solver() || root_net_) << "root_net_ needs to be set for all non-root solvers"; + +#ifdef _OPENMP + static bool executed = false; + if (!executed) { + if (Caffe::mode() == Caffe::GPU) { + caffe::cpu::OpenMpManager::setGpuEnabled(); + } else { + caffe::cpu::OpenMpManager::setGpuDisabled(); + } + + caffe::cpu::OpenMpManager::bindOpenMpThreads(); + caffe::cpu::OpenMpManager::printVerboseInformation(); + } +#endif + // Set phase from the state. phase_ = in_param.state().phase(); // Filter layers based on their include/exclude rules and // the current NetState. NetParameter filtered_param; FilterNet(in_param, &filtered_param); - LOG_IF(INFO, Caffe::root_solver()) - << "Initializing net from parameters: " << std::endl - << filtered_param.DebugString(); + + // Backward compatibility for obsolete compile-time flags +#ifdef USE_MKL2017_AS_DEFAULT_ENGINE + if (filtered_param.engine() == "") + filtered_param.set_engine("MKL2017"); +#endif +#ifdef USE_MKLDNN_AS_DEFAULT_ENGINE + if (filtered_param.engine() == "") + filtered_param.set_engine("MKLDNN"); +#endif + engine_name_ = filtered_param.engine(); + + NetParameter& param = filtered_param; // Create a copy of filtered_param with splits added where necessary. - NetParameter param; - InsertSplits(filtered_param, ¶m); + NetParameter param_with_splits; + InsertSplits(param, ¶m_with_splits); + param = param_with_splits; + + NetParameter compiled_param; + // Transform Net (merge layers etc.) improve computational performance + CompileNet(param, &compiled_param); + param = compiled_param; + this->bn_scale_remove_ = param.compile_net_state().bn_scale_remove(); + this->bn_scale_merge_ = param.compile_net_state().bn_scale_merge(); + int kept_bn_layers_num = param.compile_net_state().kept_bn_layers_size(); + for (int idx = 0; idx < kept_bn_layers_num; ++idx) { + this->kept_bn_layers_.push_back(param.compile_net_state().kept_bn_layers(idx)); + } + +#ifdef USE_MLSL + NetParameter param_with_mn; + if (mn::is_multinode()) { + ApplyMultinodeParams(param, ¶m_with_mn); + param = param_with_mn; + } +#endif + + // Printing processed model + if (Caffe::root_solver()) { + LOG(INFO) << "Initializing net from parameters: " << std::endl; + LOG(INFO).flush(); + fflush(0); + param.PrintDebugString(); + fflush(0); + } + +#ifdef USE_MLSL + int global_batch_size = -1; +#endif // Basically, build all the layers and set up their connections. name_ = param.name(); map blob_name_to_idx; @@ -83,6 +189,18 @@ void Net::Init(const NetParameter& in_param) { } // Setup layer. const LayerParameter& layer_param = param.layer(layer_id); + if (param.engine() != "") { + if (param.layer(layer_id).engine() == "") { + param.mutable_layer(layer_id)->set_engine(param.engine()); + } + else { + if ((!param.layer(layer_id).engine().compare("MKL2017") && !param.engine().compare("MKLDNN")) + || (!param.layer(layer_id).engine().compare("MKLDNN") && !param.engine().compare("MKL2017"))) { + param.mutable_layer(layer_id)->set_engine(param.engine()); + } + } + } + if (layer_param.propagate_down_size() > 0) { CHECK_EQ(layer_param.propagate_down_size(), layer_param.bottom_size()) @@ -133,6 +251,47 @@ void Net::Init(const NetParameter& in_param) { AppendTop(param, layer_id, num_top, NULL, NULL); } } + +#ifdef USE_MLSL + if (!layer_param.type().compare("Data") || + !layer_param.type().compare("DummyData") || + !layer_param.type().compare("ImageData") || + !layer_param.type().compare("HDF5Data") || + !layer_param.type().compare("MemoryData") || + !layer_param.type().compare("Input") || + !layer_param.type().compare("WindowData")) { + + // FIXME: retrieve batch_size from top[0]->shape[0] when MLSL stuff will be moved from LayerSetUp + //int batch_size = top_vecs_[layer_id][0]->shape(0); + + int batch_size = 0; + if (!layer_param.type().compare("Data")) + batch_size = layer_param.data_param().batch_size(); + else if (!layer_param.type().compare("DummyData")) + batch_size = layer_param.dummy_data_param().shape(0).dim(0); + else if (!layer_param.type().compare("ImageData")) + batch_size = layer_param.image_data_param().batch_size(); + else if (!layer_param.type().compare("HDF5Data")) + batch_size = layer_param.hdf5_data_param().batch_size(); + else if (!layer_param.type().compare("MemoryData")) + batch_size = layer_param.memory_data_param().batch_size(); + else if (!layer_param.type().compare("WindowData")) + batch_size = layer_param.window_data_param().batch_size(); + else if (!layer_param.type().compare("Input")) + batch_size = layer_param.input_param().shape(0).dim(0); + + if (caffe::TRAIN == param.state().phase()) { + LOG(WARNING) << "SetMinibatchSize " << batch_size; + if (global_batch_size < 0) { + global_batch_size = batch_size * mn::get_nodes_count(); + mn::train::set_global_minibatch_size(global_batch_size); + } else { + CHECK_EQ(global_batch_size, batch_size * mn::get_nodes_count()); + } + } + } +#endif /* USE_MLSL */ + // After this layer is connected, set it up. if (share_from_root) { // Set up size of top blobs using root_net_ @@ -188,6 +347,7 @@ void Net::Init(const NetParameter& in_param) { } } } + // Go through the net backwards to determine which blobs contribute to the // loss. We can skip backward computation for blobs that don't contribute // to the loss. @@ -280,6 +440,29 @@ void Net::Init(const NetParameter& in_param) { } ShareWeights(); debug_info_ = param.debug_info(); + +#ifdef USE_MLSL + if (this->phase_ == TRAIN) { + for (int layer_id = 0; layer_id < param.layer_size(); ++layer_id) { + boost::shared_ptr> layer{ layers_[layer_id] }; + if ((layer->layerOp != nullptr) && layer->layerOp->HasParameterSets()) { + vector param_ids = get_layer_learnable_param_ids(layer_id); + for (int i = 0; i < param_ids.size(); i++) { + int mlsl_weight_size = layer->layerOp->GetParameterSet(i)->GetLocalKernelCount() + * layer->layerOp->GetParameterSet(i)->GetKernelSize() + * sizeof(Dtype); + int caffe_weight_size = learnable_params_[param_ids[i]]->count() * sizeof(Dtype); + if (mlsl_weight_size < caffe_weight_size) + LOG(FATAL) << "InitNet: ERROR: check weight sizes for layer " << layer->type() << ", layer_id " << layer_id + << ", param_id " << param_ids[i] + << ", MLSL weight size in bytes " << mlsl_weight_size + << ", CAFFE weight size in bytes " << caffe_weight_size; + } + } + } + } +#endif /* USE_MLSL */ + LOG_IF(INFO, Caffe::root_solver()) << "Network initialization done."; } @@ -314,6 +497,286 @@ void Net::FilterNet(const NetParameter& param, } template +void Net::CompileNet(const NetParameter& param, + NetParameter* param_compiled) { + + NetParameter param_temp0; + param_temp0.CopyFrom(param); + param_temp0.clear_layer(); + RemoveBNScale(param, ¶m_temp0); + + NetParameter param_temp; // temporary compiled param + param_temp.CopyFrom(param_temp0); + param_temp.clear_layer(); // Remove layers + CompilationRuleOne(param_temp0, ¶m_temp); + + NetParameter param_temp2; // temporary compiled param + param_temp2.CopyFrom(param_temp); + param_temp2.clear_layer(); // Remove layers + + CompilationRuleTwo(param_temp, ¶m_temp2); + + param_compiled->CopyFrom(param_temp2); + param_compiled->clear_layer(); // Remove layers + CompilationRuleThree(param_temp2, param_compiled); +} + +template +void Net::CompilationRuleOne(const NetParameter& param, + NetParameter* param_compiled) { + + bool merge_bn_scale = false; + std::set layers_to_drop; + for (int i = 0; i < param.layer_size(); ++i) { + LayerParameter* layer_param = + (const_cast(param)).mutable_layer(i); + bool layer_included = true; + + // Optimization rule 1: + // - If we are having engine MKL2017 and Scale layer within a model + // and input bottom comes from BatchNorm of engine MKL2017 + // then we can remove Scale layer + // and rename BatchNorm top blob after deleted Scale's top + + // Extension of optimization rule 1: + // - If we are having engine MKLDNN and Scale layer within a model + // and input bottom comes from BatchNorm of engine MKLDNN + // then we can remove Scale layer + // and rename BatchNorm top blob after deleted Scale's top + + // If current layer is BatchNorm of MKL2017 engine.. + if (((layer_param->type().compare("BatchNorm") == 0) && + ((layer_param->batch_norm_param().engine() == + BatchNormParameter_Engine_MKL2017) + || ((layer_param->batch_norm_param().engine() == + BatchNormParameter_Engine_DEFAULT) && + param.engine().compare("MKL2017") == 0))) || + // If current layer is BatchNorm of MKLDNN engine.. + ((layer_param->type().compare("BatchNorm") == 0) && + ((layer_param->batch_norm_param().engine() == BatchNormParameter_Engine_MKLDNN) + || (((layer_param->batch_norm_param().engine() == BatchNormParameter_Engine_DEFAULT) && + (param.engine().compare(0, 6, "MKLDNN") == 0)) || + (param.engine() == "" && + layer_param->engine().compare(0, 6, "MKLDNN") == 0))))) { + std::vector consumer_layer_params; + GetBlobConsumers(consumer_layer_params, + layer_param->top(0), + param, + i+1 < param.layer_size() ? i+1 : i); + const LayerParameter& consumer_layer_param = + consumer_layer_params.size() > 0 ? + *(consumer_layer_params[0]) : *layer_param; + // Consumer layer of blob produced by BN + // has to be Scale layer with one Input Blob + if ((consumer_layer_param.type().compare("Scale") == 0) && + (consumer_layer_param.bottom_size() == 1)) { + string& batchnorm_top_blob_name = + const_cast(layer_param->top(0)); + const string& scale_top_blob_name = consumer_layer_param.top(0); + // Mark Consumer layer (its name) as the one marked for dropping + layers_to_drop.insert(consumer_layer_param.name()); + if (!merge_bn_scale) merge_bn_scale = true; + + // Replace BatchNorm top name with Scale top name + batchnorm_top_blob_name.resize(scale_top_blob_name.size()); + batchnorm_top_blob_name.replace(0, + scale_top_blob_name.size(), + scale_top_blob_name); + // Read the bias_term param of Scale Layer and set bias_term param + // of MKLBatchNorm accordingly + bool scale_bias_term = consumer_layer_param. + scale_param().bias_term(); + layer_param->mutable_batch_norm_param()-> + set_bias_term(scale_bias_term); + if (consumer_layer_param.blobs_size() == 2) { + layer_param->add_blobs()->CopyFrom(consumer_layer_param.blobs(0)); + layer_param->add_blobs()->CopyFrom(consumer_layer_param.blobs(1)); + } + } + } + + if (layers_to_drop.find(layer_param->name()) != layers_to_drop.end()) { + LOG_IF(INFO, Caffe::root_solver()) << "Dropped layer: " + << layer_param->name() << std::endl; + layer_included = false; + // Remove dropped layer from the list of layers to be dropped + layers_to_drop.erase(layers_to_drop.find(layer_param->name())); + } + + if (layer_included) { + param_compiled->add_layer()->CopyFrom(*layer_param); + } + } + param_compiled->mutable_compile_net_state()->set_bn_scale_merge(merge_bn_scale); +} + + +template +void Net::CompilationRuleTwo(const NetParameter& param, + NetParameter* param_compiled) { + std::set layers_to_drop; + for (int i = 0; i < param.layer_size(); ++i) { + LayerParameter* layer_param = + (const_cast(param)).mutable_layer(i); + bool layer_included = true; + + // Optimization rule 2: + // - If we are having engine MKLDNN and ReLU layer within a model + // and input bottom comes from Convolution of engine MKLDNN + // then we can remove ReLU layer + // and rename Convolution top blob after deleted ReLU's top + // Note: Currently merging of convolution and relu layers is feasible + // If current layer is Convolution of MKLDNN engine.. + if ((layer_param->type().compare("Convolution") == 0) && + ((layer_param->convolution_param().engine() == ConvolutionParameter_Engine_MKLDNN) + || (((layer_param->convolution_param().engine() == ConvolutionParameter_Engine_DEFAULT) && + (param.engine().compare(0, 6, "MKLDNN") == 0 + && param.engine().find(":DLA", 6) == string::npos)) || + (param.engine() == "" && + layer_param->engine().compare(0, 6, "MKLDNN") == 0 && + layer_param->engine().find(":DLA", 6) == string::npos)))) { + std::vector consumer_layer_params; + GetBlobConsumers(consumer_layer_params, layer_param->top(0), + param, i+1 < param.layer_size() ? i+1 : i); + const LayerParameter& consumer_layer_param = + consumer_layer_params.size() > 0 ? + *(consumer_layer_params[0]) : *layer_param; + + // Consumer layer of blob produced by Conv + // has to be ReLU layer with one Input Blob + if ((consumer_layer_param.type().compare("ReLU") == 0) && + ((consumer_layer_param.relu_param().engine() == ReLUParameter_Engine_MKLDNN) + || (((consumer_layer_param.relu_param().engine() == ReLUParameter_Engine_DEFAULT) && + (param.engine().compare(0, 6, "MKLDNN") == 0 + && param.engine().find(":DLA", 6) == string::npos)) || + (param.engine() == "" && + layer_param->engine().compare(0, 6, "MKLDNN") == 0 && + layer_param->engine().find(":DLA", 6) == string::npos)))) { + string& convolution_top_blob_name = + const_cast(layer_param->top(0)); + + if(param.state().phase() == TEST) { + const string& scale_top_blob_name = consumer_layer_param.top(0); + // Mark Consumer layer (its name) as the one marked for dropping + layers_to_drop.insert(consumer_layer_param.name()); + + // Replace Convolution top name with ReLU top name + convolution_top_blob_name.resize(scale_top_blob_name.size()); + convolution_top_blob_name.replace(0, + scale_top_blob_name.size(), + scale_top_blob_name); + } + // set relu flag in convolution + layer_param->mutable_convolution_param()->set_relu(true); + float negative_slope1 = + consumer_layer_param.relu_param().negative_slope(); + layer_param->mutable_convolution_param()-> + set_negative_slope(negative_slope1); + + if(param.state().phase() == TRAIN) { + if(i+1 < param.layer_size()) { + LayerParameter* relu_layer_param = + (const_cast(param)).mutable_layer(i+1); + relu_layer_param->mutable_relu_param()->set_fuse(true); + } + } + } + } + + if(param.state().phase() == TEST) { + if (layers_to_drop.find(layer_param->name()) != layers_to_drop.end()) { + LOG_IF(INFO, Caffe::root_solver()) << "Dropped layer: " + << layer_param->name() << std::endl; + layer_included = false; + // Remove dropped layer from the list of layers to be dropped + layers_to_drop.erase(layers_to_drop.find(layer_param->name())); + } + } + + if (layer_included) { + param_compiled->add_layer()->CopyFrom(*layer_param); + } + } +} + +template +void Net::CompilationRuleThree(const NetParameter& param, + NetParameter* param_compiled) { + for (int i = 0; i < param.layer_size(); ++i) { + LayerParameter* layer_param = + (const_cast(param)).mutable_layer(i); + + // Optimization rule 3: + // - If we are having engine MKL2017 and Batch Normalization + // doing inplace computation then + // to improve performance we create another top buffer + // and make other layers consuming BatchNorm top to use new buffer + + // If current layer is BatchNorm of MKL2017 engine.. + if (((layer_param->type().compare("BatchNorm") == 0) && + ((layer_param->batch_norm_param().engine() == + BatchNormParameter_Engine_MKL2017) + || ((layer_param->batch_norm_param().engine() == + BatchNormParameter_Engine_DEFAULT) && + param.engine().compare("MKL2017") == 0))) && + (layer_param->top(0) == layer_param->bottom(0) )) { + std::string& batch_norm_top = const_cast(layer_param->top(0)); + std::vector consumer_layer_params; + GetBlobConsumers(consumer_layer_params, + batch_norm_top, + param, + i+1 < param.layer_size() ? i+1 : i); + + for (std::vector::iterator it = + consumer_layer_params.begin(); + it != consumer_layer_params.end(); ++it) { + // If consumer is computing inplace then modify top as well + if (((*it)->top_size() > 0 ) && + ((*it)->bottom(0).compare((*it)->top(0)) == 0)) { + // Modify consumer top + const_cast((*it)->top(0)).append("_x"); + } + + // Modify consumer bottom. Sometimes searched + // buffer is under higher bottom index than 0 eg. + // In case of Eltwise + for (unsigned int i = 0; i < (*it)->bottom_size(); ++i) { + if ((*it)->bottom(i).compare(batch_norm_top) == 0) { + const_cast((*it)->bottom(i)).append("_x"); + } + } + } + // Modify top so it is diffrent from bottom + batch_norm_top.append("_x"); + } + param_compiled->add_layer()->CopyFrom(*layer_param); + } + return; +} + +template +void Net::GetBlobConsumers( + std::vector& consumer_blobs, + const string& blob_name_to_find, + const NetParameter& param, + int layer_id_to_start_traversing_from) { + consumer_blobs.clear(); + // Validate values of ids of layers are <1..num_layers-1> + CHECK_GE(layer_id_to_start_traversing_from, 1); + CHECK_LT(layer_id_to_start_traversing_from, param.layer_size()); + + // Traverse through layers to search the layer that consumes blob_name_to_find + for (int i = layer_id_to_start_traversing_from; i < param.layer_size(); ++i) { + // check bottom blobs if any of them is consuming given blob + for (int j = 0; j < param.layer(i).bottom_size(); ++j) { + if (param.layer(i).bottom(j).compare(blob_name_to_find) == 0) { + consumer_blobs.push_back(¶m.layer(i)); + } + } + } +} + +template bool Net::StateMeetsRule(const NetState& state, const NetStateRule& rule, const string& layer_name) { // Check whether the rule is broken due to phase. @@ -540,14 +1003,21 @@ void Net::AppendParam(const NetParameter& param, const int layer_id, } } + + template Dtype Net::ForwardFromTo(int start, int end) { CHECK_GE(start, 0); CHECK_LT(end, layers_.size()); Dtype loss = 0; for (int i = start; i <= end; ++i) { + PERFORMANCE_MEASUREMENT_BEGIN(); + // LOG(ERROR) << "Forwarding " << layer_names_[i]; Dtype layer_loss = layers_[i]->Forward(bottom_vecs_[i], top_vecs_[i]); + + PERFORMANCE_MEASUREMENT_END((std::string("FW_") + layer_names_[i]).c_str()); + loss += layer_loss; if (debug_info_) { ForwardDebugInfo(i); } } @@ -592,8 +1062,13 @@ void Net::BackwardFromTo(int start, int end) { CHECK_LT(start, layers_.size()); for (int i = start; i >= end; --i) { if (layer_need_backward_[i]) { + PERFORMANCE_MEASUREMENT_BEGIN(); + layers_[i]->Backward( top_vecs_[i], bottom_need_backward_[i], bottom_vecs_[i]); + + PERFORMANCE_MEASUREMENT_END((std::string("BW_")+layer_names_[i]).c_str()); + if (debug_info_) { BackwardDebugInfo(i); } } } @@ -680,6 +1155,28 @@ void Net::UpdateDebugInfo(const int param_id) { template void Net::ShareTrainedLayersWith(const Net* other) { + + + if (this->bn_scale_remove_) { + //This path shows testing network's blobs(weight & bias) has been adjusted + //We can't share weights & blobs with training net! We will save current + //training net to a temp model file and load to memory later + NetParameter temp_net_param; + NetParameter complete_net_param; + other->ToProto(&temp_net_param, false); + //Copy this->remained_bn_layer_names to temp_net_param + for (vector::iterator it = kept_bn_layers_.begin(); it != kept_bn_layers_.end(); it++) { + temp_net_param.mutable_compile_net_state()->add_kept_bn_layers(*it); + } + //temp_net_param.mutable_compile_net_state()->set_bn_top_rename(other->bn_top_rename_); + complete_net_param.CopyFrom(temp_net_param); + if (other->bn_scale_merge_) { + complete_net_param.clear_layer(); + RecoverBNScaleMergedNet(&temp_net_param, &complete_net_param); + } + CopyTrainedLayersFrom(complete_net_param); + return ; + } int num_source_layers = other->layers().size(); for (int i = 0; i < num_source_layers; ++i) { Layer* source_layer = other->layers()[i].get(); @@ -747,7 +1244,27 @@ void Net::Reshape() { } template -void Net::CopyTrainedLayersFrom(const NetParameter& param) { +void Net::CopyTrainedLayersFrom(const NetParameter& param_inp) { + NetParameter param_tmp = param_inp; + NetParameter ¶m = param_tmp; + param.set_engine(engine_name_); + param_tmp.mutable_state()->set_phase(phase_); + param_tmp.mutable_compile_net_state()->set_is_init(false); + for (vector::iterator it = this->kept_bn_layers_.begin(); it != this->kept_bn_layers_.end(); it++) { + param_tmp.mutable_compile_net_state()->add_kept_bn_layers(*it); + } + NetParameter param_compiled; + CompileNet(param, ¶m_compiled); + param = param_compiled; +#ifdef USE_MLSL + NetParameter param_mn; + if (mn::is_multinode()) { + CopyMultinodeParamsFromNet(this, ¶m); + ApplyMultinodeParams(param, ¶m_mn); + param = param_mn; + } +#endif + int num_source_layers = param.layer_size(); for (int i = 0; i < num_source_layers; ++i) { const LayerParameter& source_layer = param.layer(i); @@ -843,8 +1360,29 @@ void Net::CopyTrainedLayersFromHDF5(const string trained_filename) { << source_layer_name; } } +#ifdef USE_MLSL + const MultinodeLayerParameter &mn_layer_param = + layers_[target_layer_id]->layer_param().multinode(); + int num_nodes = mn_layer_param.num_nodes(); + int model_parts = mn_layer_param.model_parts(); + mn::GetCanonicalMnParam(num_nodes, model_parts); + Blob orig_blob; + vector shape = target_blobs[j]->shape(); + CHECK_GT(shape.size(), 0); + int offset = 0; + if (model_parts > 1) { + shape[0] *= model_parts; + offset = target_blobs[j]->count() * (mn::get_node_id() % model_parts); + } + orig_blob.Reshape(shape); + hdf5_load_nd_dataset(layer_hid, dataset_name.c_str(), 0, kMaxBlobAxes, + &orig_blob); + caffe_copy(target_blobs[j]->count(), orig_blob.cpu_data() + offset, + target_blobs[j]->mutable_cpu_data()); +#else hdf5_load_nd_dataset(layer_hid, dataset_name.c_str(), 0, kMaxBlobAxes, target_blobs[j].get()); +#endif } H5Gclose(layer_hid); } @@ -862,6 +1400,14 @@ void Net::ToProto(NetParameter* param, bool write_diff) const { LayerParameter* layer_param = param->add_layer(); layers_[i]->ToProto(layer_param, write_diff); } + // TODO: Should implement the param adjustment for ToHDF5 as well + // TODO: Decompile net to BVLC compatibility + // DecompileNet(param); +#ifdef USE_MLSL + if (mn::is_multinode()) { + RevertMultinodeParams(param, write_diff); + } +#endif } template @@ -881,6 +1427,9 @@ void Net::ToHDF5(const string& filename, bool write_diff) const { } for (int layer_id = 0; layer_id < layers_.size(); ++layer_id) { const LayerParameter& layer_param = layers_[layer_id]->layer_param(); +#ifdef USE_MLSL + if (layer_param.type() == "MnActivation") continue; +#endif string layer_name = layer_param.name(); hid_t layer_data_hid = H5Gcreate2(data_hid, layer_name.c_str(), H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); @@ -898,6 +1447,48 @@ void Net::ToHDF5(const string& filename, bool write_diff) const { ostringstream dataset_name; dataset_name << param_id; const int net_param_id = param_id_vecs_[layer_id][param_id]; +#ifdef USE_MLSL + const MultinodeLayerParameter &mn_layer_param = layer_param.multinode(); + int num_nodes = mn_layer_param.num_nodes(); + int model_parts = mn_layer_param.model_parts(); + mn::GetCanonicalMnParam(num_nodes, model_parts); + Blob new_blob; + vector shape = params_[net_param_id]->shape(); + CHECK_GT(shape.size(), 0); + if (model_parts > 1) { + mn::Distribution *distrib = mn::get_distrib(num_nodes/model_parts, model_parts); + shape[0] *= model_parts; + new_blob.Reshape(shape); + distrib->allgather( + params_[net_param_id]->mutable_cpu_data(), + params_[net_param_id]->count(), + new_blob.mutable_cpu_data()); + if (write_diff) { + distrib->allgather( + params_[net_param_id]->mutable_cpu_diff(), + params_[net_param_id]->count(), + new_blob.mutable_cpu_diff()); + } + } else { + new_blob.Reshape(shape); + caffe_copy(new_blob.count(), params_[net_param_id]->cpu_data(), + new_blob.mutable_cpu_data()); + if (write_diff) { + caffe_copy(new_blob.count(), params_[net_param_id]->cpu_diff(), + new_blob.mutable_cpu_diff()); + } + } + if (param_owners_[net_param_id] == -1) { + // Only save params that own themselves + hdf5_save_nd_dataset(layer_data_hid, dataset_name.str(), + new_blob); + } + if (write_diff) { + // Write diffs regardless of weight-sharing + hdf5_save_nd_dataset(layer_diff_hid, dataset_name.str(), + new_blob, true); + } +#else if (param_owners_[net_param_id] == -1) { // Only save params that own themselves hdf5_save_nd_dataset(layer_data_hid, dataset_name.str(), @@ -908,6 +1499,7 @@ void Net::ToHDF5(const string& filename, bool write_diff) const { hdf5_save_nd_dataset(layer_diff_hid, dataset_name.str(), *params_[net_param_id], true); } +#endif } H5Gclose(layer_data_hid); if (write_diff) { @@ -929,23 +1521,32 @@ void Net::Update() { } template -void Net::ClearParamDiffs() { - for (int i = 0; i < learnable_params_.size(); ++i) { - Blob* blob = learnable_params_[i]; - switch (Caffe::mode()) { - case Caffe::CPU: - caffe_set(blob->count(), static_cast(0), - blob->mutable_cpu_diff()); - break; - case Caffe::GPU: +void Net::ClearParamDiffs(int learnable_param_id) { + Blob* blob = learnable_params_[learnable_param_id]; + switch (Caffe::mode()) { + case Caffe::CPU: + if (blob->prv_diff()) + caffe_set(blob->prv_diff_count(), static_cast(0), + blob->mutable_prv_diff()); + else + caffe_set(blob->count(), static_cast(0), + blob->mutable_cpu_diff()); + break; + case Caffe::GPU: #ifndef CPU_ONLY - caffe_gpu_set(blob->count(), static_cast(0), - blob->mutable_gpu_diff()); + caffe_gpu_set(blob->count(), static_cast(0), + blob->mutable_gpu_diff()); #else - NO_GPU; + NO_GPU; #endif - break; - } + break; + } +} + +template +void Net::ClearParamDiffs() { + for (int i = 0; i < learnable_params_.size(); ++i) { + ClearParamDiffs(i); } } @@ -959,6 +1560,19 @@ void Net::ShareWeights() { } template +vector Net::get_layer_learnable_param_ids(int layer_id) const { + CHECK_GE(layer_id, 0); + CHECK(layer_id < param_id_vecs_.size()); + const vector& layer_param_ids = param_id_vecs_[layer_id]; + vector ret; + for (int i = 0; i < layer_param_ids.size(); ++i) { + ret.push_back(learnable_param_ids_[layer_param_ids[i]]); + CHECK(params_[layer_param_ids[i]].get() == learnable_params_[ret.back()]); + } + return ret; +} + +template bool Net::has_blob(const string& blob_name) const { return blob_names_index_.find(blob_name) != blob_names_index_.end(); } diff --git a/src/caffe/parallel.cpp b/src/caffe/parallel.cpp index 5bc41c6a6e5..559150571cf 100644 --- a/src/caffe/parallel.cpp +++ b/src/caffe/parallel.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CPU_ONLY #include #endif diff --git a/src/caffe/proto/caffe.proto b/src/caffe/proto/caffe.proto index 1556781cbc2..eaf9b6e6bd3 100644 --- a/src/caffe/proto/caffe.proto +++ b/src/caffe/proto/caffe.proto @@ -40,6 +40,120 @@ message Datum { optional bool encoded = 7 [default = false]; } +// The label (display) name and label id. +message LabelMapItem { + // Both name and label are required. + optional string name = 1; + optional int32 label = 2; + // display_name is optional. + optional string display_name = 3; +} + +message LabelMap { + repeated LabelMapItem item = 1; +} + +// Sample a bbox in the normalized space [0, 1] with provided constraints. +message Sampler { + // Minimum scale of the sampled bbox. + optional float min_scale = 1 [default = 1.]; + // Maximum scale of the sampled bbox. + optional float max_scale = 2 [default = 1.]; + + // Minimum aspect ratio of the sampled bbox. + optional float min_aspect_ratio = 3 [default = 1.]; + // Maximum aspect ratio of the sampled bbox. + optional float max_aspect_ratio = 4 [default = 1.]; +} + +// Constraints for selecting sampled bbox. +message SampleConstraint { + // Minimum Jaccard overlap between sampled bbox and all bboxes in + // AnnotationGroup. + optional float min_jaccard_overlap = 1; + // Maximum Jaccard overlap between sampled bbox and all bboxes in + // AnnotationGroup. + optional float max_jaccard_overlap = 2; + + // Minimum coverage of sampled bbox by all bboxes in AnnotationGroup. + optional float min_sample_coverage = 3; + // Maximum coverage of sampled bbox by all bboxes in AnnotationGroup. + optional float max_sample_coverage = 4; + + // Minimum coverage of all bboxes in AnnotationGroup by sampled bbox. + optional float min_object_coverage = 5; + // Maximum coverage of all bboxes in AnnotationGroup by sampled bbox. + optional float max_object_coverage = 6; +} + +// Sample a batch of bboxes with provided constraints. +message BatchSampler { + // Use original image as the source for sampling. + optional bool use_original_image = 1 [default = true]; + + // Constraints for sampling bbox. + optional Sampler sampler = 2; + + // Constraints for determining if a sampled bbox is positive or negative. + optional SampleConstraint sample_constraint = 3; + + // If provided, break when found certain number of samples satisfing the + // sample_constraint. + optional uint32 max_sample = 4; + + // Maximum number of trials for sampling to avoid infinite loop. + optional uint32 max_trials = 5 [default = 100]; +} + +// Condition for emitting annotations. +message EmitConstraint { + enum EmitType { + CENTER = 0; + MIN_OVERLAP = 1; + } + optional EmitType emit_type = 1 [default = CENTER]; + // If emit_type is MIN_OVERLAP, provide the emit_overlap. + optional float emit_overlap = 2; +} + +// The normalized bounding box [0, 1] w.r.t. the input image size. +message NormalizedBBox { + optional float xmin = 1; + optional float ymin = 2; + optional float xmax = 3; + optional float ymax = 4; + optional int32 label = 5; + optional bool difficult = 6; + optional float score = 7; + optional float size = 8; +} + +// Annotation for each object instance. +message Annotation { + optional int32 instance_id = 1 [default = 0]; + optional NormalizedBBox bbox = 2; +} + +// Group of annotations for a particular label. +message AnnotationGroup { + optional int32 group_label = 1; + repeated Annotation annotation = 2; +} + +// An extension of Datum which contains "rich" annotations. +message AnnotatedDatum { + enum AnnotationType { + BBOX = 0; + } + optional Datum datum = 1; + // If there are "rich" annotations, specify the type of annotation. + // Currently it only supports bounding box. + // If there are no "rich" annotations, use label in datum instead. + optional AnnotationType type = 2; + // Each group contains annotation for a particular class. + repeated AnnotationGroup annotation_group = 3; +} + message FillerParameter { // The filler type. optional string type = 1 [default = 'constant']; @@ -83,22 +197,61 @@ message NetParameter { // specified in the layers' include and exclude fields. optional NetState state = 6; + //The CompileNet will do some layer-fusion optimization to current network if it + //finds something can be optimized, compile_net_state records which Compilation Rule + //really works. + optional CompileNetState compile_net_state = 10; + // Print debugging information about results while running Net::Forward, // Net::Backward, and Net::Update. optional bool debug_info = 7 [default = false]; + optional string engine = 9 [default = ""]; + // The layers that make up the net. Each of their configurations, including // connectivity and behavior, is specified as a LayerParameter. repeated LayerParameter layer = 100; // ID 100 so layers are printed last. // DEPRECATED: use 'layer' instead. repeated V1LayerParameter layers = 2; + + // Multinode settings + optional MultinodeParameter multinode = 101; +} + + + +message CompileNetState { + optional bool is_init = 1 [default = true]; + optional bool bn_scale_remove = 2 [default = false]; + optional bool bn_scale_merge = 3 [default = false]; + repeated string kept_bn_layers = 4; +} + +message MultinodeParameter { + repeated MnModelParallelParameter model_parallel = 1; +} + +message MnModelParallelParameter { + required string layer_from = 1; + optional string layer_to = 2; + optional uint32 num_nodes = 3; // 0 means all nodes + optional uint32 model_parts = 4; // 0 or >= num_nodes, means all nodes +} + +message MultiPhaseSolverParameter { + repeated SolverBatchSizePair params_pair = 1; +} + +message SolverBatchSizePair { + optional SolverParameter solver_params = 1; + optional uint32 batch_size = 2; } // NOTE // Update the next available ID when you add a new SolverParameter field. // -// SolverParameter next available ID: 41 (last added: type) +// SolverParameter next available ID: 50 (last added: warm_up_start_lr) message SolverParameter { ////////////////////////////////////////////////////////////////////////////// // Specifying the train and test networks @@ -135,6 +288,17 @@ message SolverParameter { optional NetState train_state = 26; repeated NetState test_state = 27; + // Evaluation type. + optional string eval_type = 41 [default = "classification"]; + // ap_version: different ways of computing Average Precision. + // Check https://sanchom.wordpress.com/tag/average-precision/ for details. + // 11point: the 11-point interpolated average precision. Used in VOC2007. + // MaxIntegral: maximally interpolated AP. Used in VOC2012/ILSVRC. + // Integral: the natural integral of the precision-recall curve. + optional string ap_version = 42 [default = "Integral"]; + // If true, display per class result. + optional bool show_per_class_result = 43 [default = false]; + // The number of iterations for each test net. repeated int32 test_iter = 3; @@ -166,6 +330,8 @@ message SolverParameter { // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) // - sigmoid: the effective learning rate follows a sigmod decay // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) + // - plateau: decreases lr + // if the minimum loss isn't updated for 'plateau_winsize' iters // // where base_lr, max_iter, gamma, step, stepvalue and power are defined // in the solver parameter protocol buffer, and iter is the current iteration. @@ -181,6 +347,8 @@ message SolverParameter { optional int32 stepsize = 13; // the stepsize for learning rate policy "multistep" repeated int32 stepvalue = 34; + // the stepsize for learning rate policy "plateau" + repeated int32 plateau_winsize = 44; // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, // whenever their actual L2 norm is larger. @@ -219,7 +387,7 @@ message SolverParameter { // RMSProp decay value // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) - optional float rms_decay = 38; + optional float rms_decay = 38 [default = 0.99]; // If true, print information about the state of the net that may help with // debugging learning problems. @@ -239,6 +407,11 @@ message SolverParameter { } // DEPRECATED: use type instead of solver_type optional SolverType solver_type = 30 [default = SGD]; + + optional bool disabled_update = 46 [default = false]; + optional string engine = 47 [default = ""]; + optional int32 warmup_iter = 48 [default = 0]; + optional float warmup_start_lr = 49 [default = 0]; } // A message that stores the solver snapshots @@ -247,6 +420,8 @@ message SolverState { optional string learned_net = 2; // The file that stores the learned net. repeated BlobProto history = 3; // The history for sgd solvers optional int32 current_step = 4 [default = 0]; // The current step for learning rate + optional float minimum_loss = 5 [default = 1E38]; // Historical minimum loss + optional int32 iter_last_event = 6 [default = 0]; // The iteration when last lr-update or min_loss-update happend } enum Phase { @@ -306,7 +481,7 @@ message ParamSpec { // NOTE // Update the next available ID when you add a new LayerParameter field. // -// LayerParameter next available layer-specific ID: 147 (last added: recurrent_param) +// LayerParameter next available layer-specific ID: 152 (last added: mn_activation_param) message LayerParameter { optional string name = 1; // the layer name optional string type = 2; // the layer type @@ -359,6 +534,7 @@ message LayerParameter { // engine parameter for selecting the implementation. // The default for the engine is set by the ENGINE switch at compile-time. optional AccuracyParameter accuracy_param = 102; + optional AnnotatedDataParameter annotated_data_param = 200; optional ArgMaxParameter argmax_param = 103; optional BatchNormParameter batch_norm_param = 139; optional BiasParameter bias_param = 141; @@ -367,6 +543,8 @@ message LayerParameter { optional ConvolutionParameter convolution_param = 106; optional CropParameter crop_param = 144; optional DataParameter data_param = 107; + optional DetectionEvaluateParameter detection_evaluate_param = 205; + optional DetectionOutputParameter detection_output_param = 204; optional DropoutParameter dropout_param = 108; optional DummyDataParameter dummy_data_param = 109; optional EltwiseParameter eltwise_param = 110; @@ -384,11 +562,15 @@ message LayerParameter { optional LogParameter log_param = 134; optional LRNParameter lrn_param = 118; optional MemoryDataParameter memory_data_param = 119; + optional MultiBoxLossParameter multibox_loss_param = 201; optional MVNParameter mvn_param = 120; + optional NormalizeParameter norm_param = 206; optional ParameterParameter parameter_param = 145; + optional PermuteParameter permute_param = 202; optional PoolingParameter pooling_param = 121; optional PowerParameter power_param = 122; optional PReLUParameter prelu_param = 131; + optional PriorBoxParameter prior_box_param = 203; optional PythonParameter python_param = 130; optional RecurrentParameter recurrent_param = 146; optional ReductionParameter reduction_param = 136; @@ -398,11 +580,37 @@ message LayerParameter { optional SigmoidParameter sigmoid_param = 124; optional SoftmaxParameter softmax_param = 125; optional SPPParameter spp_param = 132; + optional SplitParameter split_param = 147; optional SliceParameter slice_param = 126; optional TanHParameter tanh_param = 127; optional ThresholdParameter threshold_param = 128; optional TileParameter tile_param = 138; + optional VideoDataParameter video_data_param = 207; optional WindowDataParameter window_data_param = 129; + + optional string engine = 149 [default = ""]; + + optional MultinodeLayerParameter multinode = 150; + optional MnActivationParameter mn_activation_param = 151; +} + +message MultinodeLayerParameter { + // 0 means all nodes + optional uint32 num_nodes = 1; + // 0 or > num_nodes, means all nodes + optional uint32 model_parts = 2 [default = 1]; +} + +message MnActivationParameter { + // 0 means all nodes + optional uint32 num_nodes_in = 1; + // 0 means all nodes + optional uint32 num_nodes_out = 2; + // 0 or > num_nodes, means all nodes + optional uint32 model_parts_in = 3 [default = 1]; + // 0 or > num_nodes, means all nodes + optional uint32 model_parts_out = 4 [default = 1]; + optional bool need_reduce = 5 [default = true]; } // Message that stores parameters used to apply transformation @@ -416,6 +624,9 @@ message TransformationParameter { optional bool mirror = 2 [default = false]; // Specify if we would like to randomly crop an image. optional uint32 crop_size = 3 [default = 0]; + optional uint32 crop_h = 11 [default = 0]; + optional uint32 crop_w = 12 [default = 0]; + // mean_file and mean_value cannot be specified at the same time optional string mean_file = 4; // if specified can be repeated once (would substract it from all the channels) @@ -426,6 +637,157 @@ message TransformationParameter { optional bool force_color = 6 [default = false]; // Force the decoded image to have 1 color channels. optional bool force_gray = 7 [default = false]; + // Resize policy + optional ResizeParameter resize_param = 8; + // Noise policy + optional NoiseParameter noise_param = 9; + // Distortion policy + optional DistortionParameter distort_param = 13; + // Expand policy + optional ExpansionParameter expand_param = 14; + // Constraint for emitting the annotation after transformation. + optional EmitConstraint emit_constraint = 10; + // Resize the input randomly + optional RandomResizeParameter random_resize_param = 15; + optional RandomAspectRatioParameter random_aspect_ratio_param = 16; +} + +message RandomResizeParameter { + optional uint32 min_size = 1 [default = 0]; + optional uint32 max_size = 2 [default = 0]; + optional ResizeParameter resize_param = 3; +} + +message RandomAspectRatioParameter { + optional float min_area_ratio = 1 [default = 0.5]; + optional float max_area_ratio = 2 [default = 1]; + optional float aspect_ratio_change = 3 [default = 1]; + optional ResizeParameter resize_param = 5; +} + +// Message that stores parameters used by data transformer for resize policy +message ResizeParameter { + //Probability of using this resize policy + optional float prob = 1 [default = 1]; + + enum Resize_mode { + WARP = 1; + FIT_SMALL_SIZE = 2; + FIT_LARGE_SIZE_AND_PAD = 3; + } + optional Resize_mode resize_mode = 2 [default = WARP]; + optional uint32 height = 3 [default = 0]; + optional uint32 width = 4 [default = 0]; + // A parameter used to update bbox in FIT_SMALL_SIZE mode. + optional uint32 height_scale = 8 [default = 0]; + optional uint32 width_scale = 9 [default = 0]; + + enum Pad_mode { + CONSTANT = 1; + MIRRORED = 2; + REPEAT_NEAREST = 3; + } + // Padding mode for BE_SMALL_SIZE_AND_PAD mode and object centering + optional Pad_mode pad_mode = 5 [default = CONSTANT]; + // if specified can be repeated once (would fill all the channels) + // or can be repeated the same number of times as channels + // (would use it them to the corresponding channel) + repeated float pad_value = 6; + + enum Interp_mode { //Same as in OpenCV + LINEAR = 1; + AREA = 2; + NEAREST = 3; + CUBIC = 4; + LANCZOS4 = 5; + } + //interpolation for for resizing + repeated Interp_mode interp_mode = 7; +} + +message SaltPepperParameter { + //Percentage of pixels + optional float fraction = 1 [default = 0]; + repeated float value = 2; +} + +// Message that stores parameters used by data transformer for transformation +// policy +message NoiseParameter { + //Probability of using this resize policy + optional float prob = 1 [default = 0]; + // Histogram equalized + optional bool hist_eq = 2 [default = false]; + // Color inversion + optional bool inverse = 3 [default = false]; + // Grayscale + optional bool decolorize = 4 [default = false]; + // Gaussian blur + optional bool gauss_blur = 5 [default = false]; + + // JPEG compression quality (-1 = no compression) + optional float jpeg = 6 [default = -1]; + + // Posterization + optional bool posterize = 7 [default = false]; + + // Erosion + optional bool erode = 8 [default = false]; + + // Salt-and-pepper noise + optional bool saltpepper = 9 [default = false]; + + optional SaltPepperParameter saltpepper_param = 10; + + // Local histogram equalization + optional bool clahe = 11 [default = false]; + + // Color space conversion + optional bool convert_to_hsv = 12 [default = false]; + + // Color space conversion + optional bool convert_to_lab = 13 [default = false]; +} + +// Message that stores parameters used by data transformer for distortion policy +message DistortionParameter { + // The probability of adjusting brightness. + optional float brightness_prob = 1 [default = 0.0]; + // Amount to add to the pixel values within [-delta, delta]. + // The possible value is within [0, 255]. Recommend 32. + optional float brightness_delta = 2 [default = 0.0]; + + // The probability of adjusting contrast. + optional float contrast_prob = 3 [default = 0.0]; + // Lower bound for random contrast factor. Recommend 0.5. + optional float contrast_lower = 4 [default = 0.0]; + // Upper bound for random contrast factor. Recommend 1.5. + optional float contrast_upper = 5 [default = 0.0]; + + // The probability of adjusting hue. + optional float hue_prob = 6 [default = 0.0]; + // Amount to add to the hue channel within [-delta, delta]. + // The possible value is within [0, 180]. Recommend 36. + optional float hue_delta = 7 [default = 0.0]; + + // The probability of adjusting saturation. + optional float saturation_prob = 8 [default = 0.0]; + // Lower bound for the random saturation factor. Recommend 0.5. + optional float saturation_lower = 9 [default = 0.0]; + // Upper bound for the random saturation factor. Recommend 1.5. + optional float saturation_upper = 10 [default = 0.0]; + + // The probability of randomly order the image channels. + optional float random_order_prob = 11 [default = 0.0]; +} + +// Message that stores parameters used by data transformer for expansion policy +message ExpansionParameter { + //Probability of using this expansion policy + optional float prob = 1 [default = 1]; + + // The ratio to expand the image. + optional float max_expand_ratio = 2 [default = 1.]; } // Message that stores parameters shared by loss layers @@ -434,7 +796,7 @@ message LossParameter { optional int32 ignore_label = 1; // How to normalize the loss for loss layers that aggregate across batches, // spatial dimensions, or other dimensions. Currently only implemented in - // SoftmaxWithLoss layer. + // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. enum NormalizationMode { // Divide by the number of examples in the batch times spatial dimensions. // Outputs that receive the ignore label will NOT be ignored in computing @@ -448,6 +810,8 @@ message LossParameter { // Do not normalize the loss. NONE = 3; } + // For historical reasons, the default normalization for + // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. optional NormalizationMode normalization = 3 [default = VALID]; // Deprecated. Ignored if normalization is specified. If normalization // is not specified, then setting this to false will be equivalent to @@ -475,6 +839,16 @@ message AccuracyParameter { optional int32 ignore_label = 3; } +message AnnotatedDataParameter { + // Define the sampler. + repeated BatchSampler batch_sampler = 1; + // Store label name and label id in LabelMap format. + optional string label_map_file = 2; + // If provided, it will replace the AnnotationType stored in each + // AnnotatedDatum. + optional AnnotatedDatum.AnnotationType anno_type = 3; +} + message ArgMaxParameter { // If true produce pairs (argmax, maxval) optional bool out_max_val = 1 [default = false]; @@ -495,6 +869,14 @@ message ConcatParameter { // DEPRECATED: alias for "axis" -- does not support negative indexing. optional uint32 concat_dim = 1 [default = 1]; + + enum Engine { + DEFAULT = 0; + CAFFE = 1; + MKL2017 = 3; + MKLDNN = 4; + } + optional Engine engine = 3 [default = DEFAULT]; } message BatchNormParameter { @@ -507,6 +889,27 @@ message BatchNormParameter { // Small value to add to the variance estimate so that we don't divide by // zero. optional float eps = 3 [default = 1e-5]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + MKL2017 = 3; + MKLDNN = 4; + } + optional Engine engine = 4 [default = DEFAULT]; + optional bool use_weight_bias = 5 [default = true]; + optional bool bias_term = 6 [default = true]; // whether to have bias terms + optional FillerParameter filler = 7; // The filler for the weight + optional FillerParameter bias_filler = 8; // The filler for the bias +} + +message SplitParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + MKL2017 = 3; + MKLDNN = 4; + } + optional Engine engine = 1 [default = DEFAULT]; } message BiasParameter { @@ -579,11 +982,14 @@ message ConvolutionParameter { optional uint32 group = 5 [default = 1]; // The group size for group conv optional FillerParameter weight_filler = 7; // The filler for the weight + optional FillerParameter bias_filler = 8; // The filler for the bias enum Engine { DEFAULT = 0; CAFFE = 1; CUDNN = 2; + MKL2017 = 3; + MKLDNN = 4; } optional Engine engine = 15 [default = DEFAULT]; @@ -604,6 +1010,9 @@ message ConvolutionParameter { // implementation; for input blobs with num_axes != 2, this option is // ignored and the ND implementation will be used.) optional bool force_nd_im2col = 17 [default = false]; + optional bool relu = 19 [default = false]; + optional float negative_slope = 20 [default = 0]; + optional string conv_algorithm = 21 [default = "direct"]; } message CropParameter { @@ -656,6 +1065,97 @@ message DataParameter { // Prefetch queue (Number of batches to prefetch to host memory, increase if // data access bandwidth varies). optional uint32 prefetch = 10 [default = 4]; + // Whether or not DataLayer should shuffle the images at every epoch. + optional bool shuffle = 11 [default = false]; +} + +// Message that store parameters used by DetectionEvaluateLayer +message DetectionEvaluateParameter { + // Number of classes that are actually predicted. Required! + optional uint32 num_classes = 1; + // Label id for background class. Needed for sanity check so that + // background class is neither in the ground truth nor the detections. + optional uint32 background_label_id = 2 [default = 0]; + // Threshold for deciding true/false positive. + optional float overlap_threshold = 3 [default = 0.5]; + // If true, also consider difficult ground truth for evaluation. + optional bool evaluate_difficult_gt = 4 [default = true]; + // A file which contains a list of names and sizes with same order + // of the input DB. The file is in the following format: + // name height width + // ... + // If provided, we will scale the prediction and ground truth NormalizedBBox + // for evaluation. + optional string name_size_file = 5; + // The resize parameter used in converting NormalizedBBox to original image. + optional ResizeParameter resize_param = 6; +} + +message NonMaximumSuppressionParameter { + // Threshold to be used in nms. + optional float nms_threshold = 1 [default = 0.3]; + // Maximum number of results to be kept. + optional int32 top_k = 2; + // Parameter for adaptive nms. + optional float eta = 3 [default = 1.0]; +} + +message SaveOutputParameter { + // Output directory. If not empty, we will save the results. + optional string output_directory = 1; + // Output name prefix. + optional string output_name_prefix = 2; + // Output format. + // VOC - PASCAL VOC output format. + // COCO - MS COCO output format. + optional string output_format = 3; + // If you want to output results, must also provide the following two files. + // Otherwise, we will ignore saving results. + // label map file. + optional string label_map_file = 4; + // A file which contains a list of names and sizes with same order + // of the input DB. The file is in the following format: + // name height width + // ... + optional string name_size_file = 5; + // Number of test images. It can be less than the lines specified in + // name_size_file. For example, when we only want to evaluate on part + // of the test images. + optional uint32 num_test_image = 6; + // The resize parameter used in saving the data. + optional ResizeParameter resize_param = 7; +} + +// Message that store parameters used by DetectionOutputLayer +message DetectionOutputParameter { + // Number of classes to be predicted. Required! + optional uint32 num_classes = 1; + // If true, bounding box are shared among different classes. + optional bool share_location = 2 [default = true]; + // Background label id. If there is no background class, + // set it as -1. + optional int32 background_label_id = 3 [default = 0]; + // Parameters used for non maximum suppression. + optional NonMaximumSuppressionParameter nms_param = 4; + // Parameters used for saving detection results. + optional SaveOutputParameter save_output_param = 5; + // Type of coding method for bbox. + optional PriorBoxParameter.CodeType code_type = 6 [default = CORNER]; + // If true, variance is encoded in target; otherwise we need to adjust the + // predicted offset accordingly. + optional bool variance_encoded_in_target = 8 [default = false]; + // Number of total bboxes to be kept per image after nms step. + // -1 means keeping all bboxes after nms step. + optional int32 keep_top_k = 7 [default = -1]; + // Only consider detections whose confidences are larger than a threshold. + // If not provided, consider all boxes. + optional float confidence_threshold = 9; + // If true, visualize the detection results. + optional bool visualize = 10 [default = false]; + // The threshold used to visualize the detection results. + optional float visualize_threshold = 11; + // If provided, save outputs to video file. + optional string save_file = 12; } message DropoutParameter { @@ -693,6 +1193,13 @@ message EltwiseParameter { // Whether to use an asymptotically slower (for >2 inputs) but stabler method // of computing the gradient for the PROD operation. (No effect for SUM op.) optional bool stable_prod_grad = 3 [default = true]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + MKL2017 = 3; + MKLDNN = 4; + } + optional Engine engine = 4 [default = DEFAULT]; } // Message that stores parameters used by ELULayer @@ -818,6 +1325,13 @@ message InnerProductParameter { // of the weight matrix. The weight matrix itself is not going to be transposed // but rather the transfer flag of operations will be toggled accordingly. optional bool transpose = 6 [default = false]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + MKLDNN = 3; + } + optional Engine engine = 7 [default = DEFAULT]; } message InputParameter { @@ -853,6 +1367,8 @@ message LRNParameter { DEFAULT = 0; CAFFE = 1; CUDNN = 2; + MKL2017 = 3; + MKLDNN = 4; } optional Engine engine = 6 [default = DEFAULT]; } @@ -864,6 +1380,78 @@ message MemoryDataParameter { optional uint32 width = 4; } +// Message that store parameters used by MultiBoxLossLayer +message MultiBoxLossParameter { + // Localization loss type. + enum LocLossType { + L2 = 0; + SMOOTH_L1 = 1; + } + optional LocLossType loc_loss_type = 1 [default = SMOOTH_L1]; + // Confidence loss type. + enum ConfLossType { + SOFTMAX = 0; + LOGISTIC = 1; + } + optional ConfLossType conf_loss_type = 2 [default = SOFTMAX]; + // Weight for localization loss. + optional float loc_weight = 3 [default = 1.0]; + // Number of classes to be predicted. Required! + optional uint32 num_classes = 4; + // If true, bounding box are shared among different classes. + optional bool share_location = 5 [default = true]; + // Matching method during training. + enum MatchType { + BIPARTITE = 0; + PER_PREDICTION = 1; + } + optional MatchType match_type = 6 [default = PER_PREDICTION]; + // If match_type is PER_PREDICTION, use overlap_threshold to + // determine the extra matching bboxes. + optional float overlap_threshold = 7 [default = 0.5]; + // Use prior for matching. + optional bool use_prior_for_matching = 8 [default = true]; + // Background label id. + optional uint32 background_label_id = 9 [default = 0]; + // If true, also consider difficult ground truth. + optional bool use_difficult_gt = 10 [default = true]; + // If true, perform negative mining. + // DEPRECATED: use mining_type instead. + optional bool do_neg_mining = 11; + // The negative/positive ratio. + optional float neg_pos_ratio = 12 [default = 3.0]; + // The negative overlap upperbound for the unmatched predictions. + optional float neg_overlap = 13 [default = 0.5]; + // Type of coding method for bbox. + optional PriorBoxParameter.CodeType code_type = 14 [default = CORNER]; + // If true, encode the variance of prior box in the loc loss target instead of + // in bbox. + optional bool encode_variance_in_target = 16 [default = false]; + // If true, map all object classes to agnostic class. It is useful for learning + // objectness detector. + optional bool map_object_to_agnostic = 17 [default = false]; + // If true, ignore cross boundary bbox during matching. + // Cross boundary bbox is a bbox who is outside of the image region. + optional bool ignore_cross_boundary_bbox = 18 [default = false]; + // If true, only backpropagate on corners which are inside of the image + // region when encode_type is CORNER or CORNER_SIZE. + optional bool bp_inside = 19 [default = false]; + // Mining type during training. + // NONE : use all negatives. + // MAX_NEGATIVE : select negatives based on the score. + // HARD_EXAMPLE : select hard examples based on "Training Region-based Object Detectors with Online Hard Example Mining", Shrivastava et.al. + enum MiningType { + NONE = 0; + MAX_NEGATIVE = 1; + HARD_EXAMPLE = 2; + } + optional MiningType mining_type = 20 [default = MAX_NEGATIVE]; + // Parameters used for non maximum suppression durig hard example mining. + optional NonMaximumSuppressionParameter nms_param = 21; + optional int32 sample_size = 22 [default = 64]; + optional bool use_prior_for_nms = 23 [default = false]; +} + message MVNParameter { // This parameter can be set to false to normalize mean only optional bool normalize_variance = 1 [default = true]; @@ -875,16 +1463,35 @@ message MVNParameter { optional float eps = 3 [default = 1e-9]; } +// Message that stores parameters used by NormalizeLayer +message NormalizeParameter { + optional bool across_spatial = 1 [default = true]; + // Initial value of scale. Default is 1.0 for all + optional FillerParameter scale_filler = 2; + // Whether or not scale parameters are shared across channels. + optional bool channel_shared = 3 [default = true]; + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 4 [default = 1e-10]; +} + message ParameterParameter { optional BlobShape shape = 1; } +message PermuteParameter { + // The new orders of the axes of data. Notice it should be with + // in the same range as the input data, and it starts from 0. + // Do not provide repeated order. + repeated uint32 order = 1; +} + message PoolingParameter { enum PoolMethod { MAX = 0; AVE = 1; STOCHASTIC = 2; } + optional bool avg_include_pad = 13 [default = true]; optional PoolMethod pool = 1 [default = MAX]; // The pooling method // Pad, kernel size, and stride are all given as a single value for equal // dimensions in height and width or as Y, X pairs. @@ -901,6 +1508,8 @@ message PoolingParameter { DEFAULT = 0; CAFFE = 1; CUDNN = 2; + MKL2017 = 3; + MKLDNN = 4; } optional Engine engine = 11 [default = DEFAULT]; // If global_pooling then it will pool over the size of the bottom by doing @@ -915,6 +1524,48 @@ message PowerParameter { optional float shift = 3 [default = 0.0]; } +// Message that store parameters used by PriorBoxLayer +message PriorBoxParameter { + // Encode/decode type. + enum CodeType { + CORNER = 1; + CENTER_SIZE = 2; + CORNER_SIZE = 3; + } + // Minimum box size (in pixels). Required! + repeated float min_size = 1; + // Maximum box size (in pixels). Required! + repeated float max_size = 2; + // Various of aspect ratios. Duplicate ratios will be ignored. + // If none is provided, we use default ratio 1. + repeated float aspect_ratio = 3; + // If true, will flip each aspect ratio. + // For example, if there is aspect ratio "r", + // we will generate aspect ratio "1.0/r" as well. + optional bool flip = 4 [default = true]; + // If true, will clip the prior so that it is within [0, 1] + optional bool clip = 5 [default = false]; + // Variance for adjusting the prior bboxes. + repeated float variance = 6; + // By default, we calculate img_height, img_width, step_x, step_y based on + // bottom[0] (feat) and bottom[1] (img). Unless these values are explicitely + // provided. + // Explicitly provide the img_size. + optional uint32 img_size = 7; + // Either img_size or img_h/img_w should be specified; not both. + optional uint32 img_h = 8; + optional uint32 img_w = 9; + + // Explicitly provide the step size. + optional float step = 10; + // Either step or step_h/step_w should be specified; not both. + optional float step_h = 11; + optional float step_w = 12; + + // Offset to the top left corner of each cell. + optional float offset = 13 [default = 0.5]; +} + message PythonParameter { optional string module = 1; optional string layer = 2; @@ -989,8 +1640,11 @@ message ReLUParameter { DEFAULT = 0; CAFFE = 1; CUDNN = 2; + MKL2017 = 3; + MKLDNN = 4; } optional Engine engine = 2 [default = DEFAULT]; + optional bool fuse = 3 [default = false]; } message ReshapeParameter { @@ -1152,6 +1806,18 @@ message ThresholdParameter { optional float threshold = 1 [default = 0]; // Strictly positive values } +message VideoDataParameter{ + enum VideoType { + WEBCAM = 0; + VIDEO = 1; + } + optional VideoType video_type = 1 [default = WEBCAM]; + optional int32 device_id = 2 [default = 0]; + optional string video_file = 3; + // Number of frames to be skipped before processing a frame. + optional uint32 skip_frames = 4 [default = 0]; +} + message WindowDataParameter { // Specify the data source. optional string source = 1; diff --git a/src/caffe/serialization/ProtoSerialize.cpp b/src/caffe/serialization/ProtoSerialize.cpp new file mode 100644 index 00000000000..9a30d9079e1 --- /dev/null +++ b/src/caffe/serialization/ProtoSerialize.cpp @@ -0,0 +1,66 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include "caffe/serialization/ProtoSerialize.hpp" + +namespace caffe { + +bool deserialize(const char* data, + size_t size, + ::google::protobuf::Message* msg) { + static const size_t max_decode_size = 300 * 1024 * 1024; + using google::protobuf::io::ArrayInputStream; + using google::protobuf::io::CodedInputStream; + ArrayInputStream zero_stream(data, size); + CodedInputStream coded_stream(&zero_stream); + coded_stream.SetTotalBytesLimit(max_decode_size, max_decode_size); + bool ret = msg->ParseFromCodedStream(&coded_stream); +// CHECK(ret); + return ret; +} + +string serialize(const ::google::protobuf::Message& msg) { + string str; + msg.SerializeToString(&str); + return str; +} + +} // namespace caffe + diff --git a/src/caffe/solver.cpp b/src/caffe/solver.cpp index ece3913e88a..3c8d1e66b25 100644 --- a/src/caffe/solver.cpp +++ b/src/caffe/solver.cpp @@ -1,14 +1,63 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include #include - +#include #include +#include #include +#include + +#include "boost/bind.hpp" #include "caffe/solver.hpp" +#include "caffe/util/bbox_util.hpp" #include "caffe/util/format.hpp" #include "caffe/util/hdf5.hpp" #include "caffe/util/io.hpp" +#include "caffe/util/performance.hpp" #include "caffe/util/upgrade_proto.hpp" +#ifdef USE_MLSL +#include "caffe/multinode/mlsl.hpp" +#endif + namespace caffe { template @@ -28,28 +77,40 @@ SolverAction::Enum Solver::GetRequestedAction() { template Solver::Solver(const SolverParameter& param, const Solver* root_solver) : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { + requested_early_exit_(false), + forward_backward_(boost::bind(&Solver::ForwardBackward, this)) { Init(param); + Caffe::set_iter_size(param_.iter_size()); } template Solver::Solver(const string& param_file, const Solver* root_solver) : net_(), callbacks_(), root_solver_(root_solver), - requested_early_exit_(false) { + requested_early_exit_(false), + forward_backward_(boost::bind(&Solver::ForwardBackward, this)) { SolverParameter param; ReadSolverParamsFromTextFileOrDie(param_file, ¶m); Init(param); + Caffe::set_iter_size(param_.iter_size()); } template void Solver::Init(const SolverParameter& param) { CHECK(Caffe::root_solver() || root_solver_) << "root_solver_ needs to be set for all non-root solvers"; - LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " - << std::endl << param.DebugString(); param_ = param; + +#ifdef USE_MLSL + ReplaceMultinodeSolverParams(¶m_); +#endif + + LOG_IF(INFO, Caffe::root_solver()) << "Initializing solver from parameters: " + << std::endl << param_.DebugString(); + CHECK_GE(param_.average_loss(), 1) << "average_loss should be non-negative."; +#ifndef USE_MLSL CheckSnapshotWritePermissions(); +#endif if (Caffe::root_solver() && param_.random_seed() >= 0) { Caffe::set_random_seed(param_.random_seed()); } @@ -61,6 +122,10 @@ void Solver::Init(const SolverParameter& param) { } iter_ = 0; current_step_ = 0; + +#ifdef CAFFE_PER_LAYER_TIMINGS + InitTimers(); +#endif } template @@ -92,6 +157,8 @@ void Solver::InitTrainNet() { << "Creating training net from net file: " << param_.net(); ReadNetParamsFromTextFileOrDie(param_.net(), &net_param); } + if (param_.engine() != "") + net_param.set_engine(param_.engine()); // Set the correct NetState. We start with the solver defaults (lowest // precedence); then, merge in any NetState specified by the net_param itself; // finally, merge in any NetState specified by the train_state (highest @@ -178,6 +245,10 @@ void Solver::InitTestNets() { net_state.MergeFrom(param_.test_state(i)); } net_params[i].mutable_state()->CopyFrom(net_state); + + if (param_.engine() != "") + net_params[i].set_engine(param_.engine()); + LOG(INFO) << "Creating test net (#" << i << ") specified by " << sources[i]; if (Caffe::root_solver()) { @@ -191,6 +262,21 @@ void Solver::InitTestNets() { } template +Dtype Solver::ForwardBackward() { + // zero-init the params + net_->ClearParamDiffs(); + + Dtype loss = Dtype(); + vector*> bottom_vec; + + // accumulate the loss and gradient + for (int i = 0; i < param_.iter_size(); ++i) { + loss += net_->ForwardBackward(); + } + return loss / param_.iter_size(); +} + +template void Solver::Step(int iters) { const int start_iter = iter_; const int stop_iter = iter_ + iters; @@ -199,8 +285,6 @@ void Solver::Step(int iters) { smoothed_loss_ = 0; while (iter_ < stop_iter) { - // zero-init the params - net_->ClearParamDiffs(); if (param_.test_interval() && iter_ % param_.test_interval() == 0 && (iter_ > 0 || param_.test_initialization()) && Caffe::root_solver()) { @@ -216,12 +300,15 @@ void Solver::Step(int iters) { } const bool display = param_.display() && iter_ % param_.display() == 0; net_->set_debug_info(display && param_.debug_info()); - // accumulate the loss and gradient - Dtype loss = 0; - for (int i = 0; i < param_.iter_size(); ++i) { - loss += net_->ForwardBackward(); - } - loss /= param_.iter_size(); + + Timer iter_timer; + double iter_time = 0; + iter_timer.Start(); + + Dtype loss = forward_backward_(); + + iter_time += iter_timer.MilliSeconds(); + // average the loss across iterations for smoothed reporting UpdateSmoothedLoss(loss, start_iter, average_loss); if (display) { @@ -246,11 +333,31 @@ void Solver::Step(int iters) { << result_vec[k] << loss_msg_stream.str(); } } + +#ifdef CAFFE_PER_LAYER_TIMINGS + PrintTimers(false); + ResetTimers(); +#endif } + + iter_timer.Start(); + for (int i = 0; i < callbacks_.size(); ++i) { callbacks_[i]->on_gradients_ready(); } - ApplyUpdate(); + if (!param().disabled_update()) { + PERFORMANCE_MEASUREMENT_BEGIN(); + ApplyUpdate(); + PERFORMANCE_MEASUREMENT_END_STATIC("weights_update"); + } + + iter_time += iter_timer.MilliSeconds(); + +#ifdef CAFFE_PER_LAYER_TIMINGS + if (mn::get_node_id() == 0) + LOG(INFO) << "iter " << iter_ << ", forward_backward_update_time: " + << iter_time << " ms"; +#endif // Increment the internal iter_ counter -- its value should always indicate // the number of times the weights have been updated. @@ -271,14 +378,184 @@ void Solver::Step(int iters) { break; } } + +#ifdef CAFFE_PER_LAYER_TIMINGS + ResetTimers(); + PrintTimers(true); +#endif +} + +#ifdef CAFFE_PER_LAYER_TIMINGS + +template +void Solver::InitTimers() { + int layer_count = net_->layers().size(); + + this->forward_time_per_layer.resize(layer_count, 0.0); + this->backward_time_per_layer.resize(layer_count, 0.0); + this->update_time_per_layer.resize(layer_count, 0.0); +#ifdef USE_MLSL + this->startcomm_time_per_layer.resize(layer_count, 0.0); + this->waitcomm_time_per_layer.resize(layer_count, 0.0); +#endif + this->forward_time_per_layer_total.resize(layer_count, 0.0); + this->backward_time_per_layer_total.resize(layer_count, 0.0); + this->update_time_per_layer_total.resize(layer_count, 0.0); +#ifdef USE_MLSL + this->startcomm_time_per_layer_total.resize(layer_count, 0.0); + this->waitcomm_time_per_layer_total.resize(layer_count, 0.0); +#endif } template +void Solver::ResetTimers() { + std::transform(this->forward_time_per_layer_total.begin(), + this->forward_time_per_layer_total.end(), + this->forward_time_per_layer.begin(), + this->forward_time_per_layer_total.begin(), + std::plus()); + + std::transform(this->backward_time_per_layer_total.begin(), + this->backward_time_per_layer_total.end(), + this->backward_time_per_layer.begin(), + this->backward_time_per_layer_total.begin(), + std::plus()); + + std::transform(this->update_time_per_layer_total.begin(), + this->update_time_per_layer_total.end(), + this->update_time_per_layer.begin(), + this->update_time_per_layer_total.begin(), + std::plus()); +#ifdef USE_MLSL + std::transform(this->startcomm_time_per_layer_total.begin(), + this->startcomm_time_per_layer_total.end(), + this->startcomm_time_per_layer.begin(), + this->startcomm_time_per_layer_total.begin(), + std::plus()); + + std::transform(this->waitcomm_time_per_layer_total.begin(), + this->waitcomm_time_per_layer_total.end(), + this->waitcomm_time_per_layer.begin(), + this->waitcomm_time_per_layer_total.begin(), + std::plus()); +#endif + + std::fill(this->forward_time_per_layer.begin(), + this->forward_time_per_layer.end(), 0); + std::fill(this->backward_time_per_layer.begin(), + this->backward_time_per_layer.end(), 0); + std::fill(this->update_time_per_layer.begin(), + this->update_time_per_layer.end(), 0); +#ifdef USE_MLSL + std::fill(this->startcomm_time_per_layer.begin(), + this->startcomm_time_per_layer.end(), 0); + std::fill(this->waitcomm_time_per_layer.begin(), + this->waitcomm_time_per_layer.end(), 0); +#endif +} + +template +void Solver::PrintTimers(bool printTotal) { +#ifdef USE_MLSL + if (mn::get_node_id() != 0) + return; +#endif + + LOG(WARNING) << std::endl; + LOG(WARNING) << "####################################################"; + + std::vector& forward_timers = printTotal ? + forward_time_per_layer_total : forward_time_per_layer; + std::vector& backward_timers = printTotal ? + backward_time_per_layer_total : backward_time_per_layer; + std::vector& update_timers = printTotal ? + update_time_per_layer_total : update_time_per_layer; +#ifdef USE_MLSL + std::vector& startcomm_timers = printTotal ? + startcomm_time_per_layer_total : startcomm_time_per_layer; + std::vector& waitcomm_timers = printTotal ? + waitcomm_time_per_layer_total : waitcomm_time_per_layer; + std::string prefix = printTotal ? "TOTAL " : "DELTA "; +#endif + + double forward_time = std::accumulate(forward_timers.begin(), + forward_timers.end(), 0) / 1000; + LOG(WARNING) << prefix << "FORWARD TIME: " << forward_time << " ms"; + for (int layer_idx = 0; layer_idx < net_->layers().size(); layer_idx++) { + LOG(WARNING) << "LAYER-" << layer_idx << " " + << net_->layers()[layer_idx]->type() + << ": forward_time: " << forward_timers[layer_idx] / 1000 + << " ms"; + } + LOG(WARNING) << std::endl; + + double backward_time = std::accumulate(backward_timers.begin(), + backward_timers.end(), 0) / 1000; + LOG(WARNING) << prefix << "BACKWARD TIME: " << backward_time << " ms"; + for (int layer_idx = 0; layer_idx < net_->layers().size(); layer_idx++) { + LOG(WARNING) << "LAYER-" << layer_idx << " " + << net_->layers()[layer_idx]->type() + << ": backward_time: " << backward_timers[layer_idx] / 1000 + << " ms"; + } + LOG(WARNING) << std::endl; + + double update_time = std::accumulate(update_timers.begin(), + update_timers.end(), 0) / 1000; + LOG(WARNING) << prefix << "UPDATE TIME: " << update_time << " ms"; + for (int layer_idx = 0; layer_idx < net_->layers().size(); layer_idx++) { + LOG(WARNING) << "LAYER-" << layer_idx << " " + << net_->layers()[layer_idx]->type() + << ": update_time: " << update_timers[layer_idx] / 1000 + << " ms"; + } + LOG(WARNING) << std::endl; + +#ifdef USE_MLSL + double startcomm_time = std::accumulate(startcomm_timers.begin(), + startcomm_timers.end(), 0) / 1000; + LOG(WARNING) << prefix << "START COMMUNICATION TIME: " << startcomm_time << " ms"; + for (int layer_idx = 0; layer_idx < net_->layers().size(); layer_idx++) { + LOG(WARNING) << "LAYER-" << layer_idx << " " + << net_->layers()[layer_idx]->type() + << ": startcomm_time: " << startcomm_timers[layer_idx] / 1000 + << " ms"; + } + LOG(WARNING) << std::endl; + + double waitcomm_time = std::accumulate(waitcomm_timers.begin(), + waitcomm_timers.end(), 0) / 1000; + LOG(WARNING) << prefix << "WAIT COMMUNICATION TIME: " << waitcomm_time << " ms"; + for (int layer_idx = 0; layer_idx < net_->layers().size(); layer_idx++) { + LOG(WARNING) << "LAYER-" << layer_idx << " " + << net_->layers()[layer_idx]->type() + << ": waitcomm_time: " << waitcomm_timers[layer_idx] / 1000 + << " ms"; + } + LOG(WARNING) << std::endl; + + LOG(WARNING) << prefix << "TIME (Computation + Communication): " << (forward_time + + backward_time + update_time + startcomm_time + waitcomm_time) / 1000 + << " sec"; +#else + LOG(WARNING) << prefix << "TIME (Computation): " << (forward_time + + backward_time + update_time) / 1000 << " sec"; +#endif + + LOG(WARNING) << "####################################################"; + LOG(WARNING) << std::endl; +} + +#endif /* CAFFE_PER_LAYER_TIMINGS */ + +template void Solver::Solve(const char* resume_file) { CHECK(Caffe::root_solver()); LOG(INFO) << "Solving " << net_->name(); LOG(INFO) << "Learning Rate Policy: " << param_.lr_policy(); + PERFORMANCE_INIT_MONITOR(); + // Initialize to false every time we start solving. requested_early_exit_ = false; @@ -313,26 +590,43 @@ void Solver::Solve(const char* resume_file) { net_->Forward(&loss); UpdateSmoothedLoss(loss, start_iter, average_loss); - LOG(INFO) << "Iteration " << iter_ << ", loss = " << smoothed_loss_; } - if (param_.test_interval() && iter_ % param_.test_interval() == 0) { + + // in multinode last test must be done after weights update + if (param_.test_interval() && iter_ % param_.test_interval() == 0) TestAll(); - } + LOG(INFO) << "Optimization Done."; } template void Solver::TestAll() { +#ifdef USE_MLSL + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_before_test(); + } +#endif for (int test_net_id = 0; test_net_id < test_nets_.size() && !requested_early_exit_; ++test_net_id) { - Test(test_net_id); + if (param_.eval_type() == "classification") { + TestClassification(test_net_id); + } else if (param_.eval_type() == "detection") { + TestDetection(test_net_id); + } else { + LOG(FATAL) << "Unknown evaluation type: " << param_.eval_type(); + } } +#ifdef USE_MLSL + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_after_test(); + } +#endif } template -void Solver::Test(const int test_net_id) { +void Solver::TestClassification(const int test_net_id) { CHECK(Caffe::root_solver()); LOG(INFO) << "Iteration " << iter_ << ", Testing net (#" << test_net_id << ")"; @@ -387,16 +681,33 @@ void Solver::Test(const int test_net_id) { return; } if (param_.test_compute_loss()) { +#ifdef USE_MLSL + mn::allreduce(&loss, 1); + loss /= (param_.test_iter(test_net_id) * mn::get_nodes_count()); + if (mn::get_node_id() == 0) { + LOG(INFO) << "Test loss: " << loss; + } +#else /* !USE_MLSL */ loss /= param_.test_iter(test_net_id); LOG(INFO) << "Test loss: " << loss; +#endif /* USE_MLSL */ } +#ifdef USE_MLSL + mn::allreduce(test_score.data(), test_score.size()); + if (mn::get_node_id() == 0) +#endif /* USE_MLSL */ for (int i = 0; i < test_score.size(); ++i) { const int output_blob_index = test_net->output_blob_indices()[test_score_output_id[i]]; const string& output_name = test_net->blob_names()[output_blob_index]; const Dtype loss_weight = test_net->blob_loss_weights()[output_blob_index]; ostringstream loss_msg_stream; +#ifdef USE_MLSL + const Dtype mean_score = + test_score[i] / (param_.test_iter(test_net_id) * mn::get_nodes_count()); +#else /* !USE_MLSL */ const Dtype mean_score = test_score[i] / param_.test_iter(test_net_id); +#endif /* USE_MLSL */ if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * mean_score << " loss)"; @@ -407,8 +718,132 @@ void Solver::Test(const int test_net_id) { } template +void Solver::TestDetection(const int test_net_id) { + CHECK(Caffe::root_solver()); + LOG(INFO) << "Iteration " << iter_ + << ", Testing net (#" << test_net_id << ")"; + CHECK_NOTNULL(test_nets_[test_net_id].get())-> + ShareTrainedLayersWith(net_.get()); + map > > > all_true_pos; + map > > > all_false_pos; + map > all_num_pos; + const shared_ptr >& test_net = test_nets_[test_net_id]; + Dtype loss = 0; + for (int i = 0; i < param_.test_iter(test_net_id); ++i) { + SolverAction::Enum request = GetRequestedAction(); + // Check to see if stoppage of testing/training has been requested. + while (request != SolverAction::NONE) { + if (SolverAction::SNAPSHOT == request) { + Snapshot(); + } else if (SolverAction::STOP == request) { + requested_early_exit_ = true; + } + request = GetRequestedAction(); + } + if (requested_early_exit_) { + // break out of test loop. + break; + } + + Dtype iter_loss; + const vector*>& result = test_net->Forward(&iter_loss); + if (param_.test_compute_loss()) { + loss += iter_loss; + } + for (int j = 0; j < result.size(); ++j) { + CHECK_EQ(result[j]->width(), 5); + const Dtype* result_vec = result[j]->cpu_data(); + int num_det = result[j]->height(); + for (int k = 0; k < num_det; ++k) { + int item_id = static_cast(result_vec[k * 5]); + int label = static_cast(result_vec[k * 5 + 1]); + if (item_id == -1) { + // Special row of storing number of positives for a label. + if (all_num_pos[j].find(label) == all_num_pos[j].end()) { + all_num_pos[j][label] = static_cast(result_vec[k * 5 + 2]); + } else { + all_num_pos[j][label] += static_cast(result_vec[k * 5 + 2]); + } + } else { + // Normal row storing detection status. + float score = result_vec[k * 5 + 2]; + int tp = static_cast(result_vec[k * 5 + 3]); + int fp = static_cast(result_vec[k * 5 + 4]); + if (tp == 0 && fp == 0) { + // Ignore such case. It happens when a detection bbox is matched to + // a difficult gt bbox and we don't evaluate on difficult gt bbox. + continue; + } + all_true_pos[j][label].push_back(std::make_pair(score, tp)); + all_false_pos[j][label].push_back(std::make_pair(score, fp)); + } + } + } + } + if (requested_early_exit_) { + LOG(INFO) << "Test interrupted."; + return; + } + if (param_.test_compute_loss()) { + loss /= param_.test_iter(test_net_id); + LOG(INFO) << "Test loss: " << loss; + } + for (int i = 0; i < all_true_pos.size(); ++i) { + if (all_true_pos.find(i) == all_true_pos.end()) { + LOG(FATAL) << "Missing output_blob true_pos: " << i; + } + const map > >& true_pos = + all_true_pos.find(i)->second; + if (all_false_pos.find(i) == all_false_pos.end()) { + LOG(FATAL) << "Missing output_blob false_pos: " << i; + } + const map > >& false_pos = + all_false_pos.find(i)->second; + if (all_num_pos.find(i) == all_num_pos.end()) { + LOG(FATAL) << "Missing output_blob num_pos: " << i; + } + const map& num_pos = all_num_pos.find(i)->second; + map APs; + float mAP = 0.; + // Sort true_pos and false_pos with descend scores. + for (map::const_iterator it = num_pos.begin(); + it != num_pos.end(); ++it) { + int label = it->first; + int label_num_pos = it->second; + if (true_pos.find(label) == true_pos.end()) { + LOG(WARNING) << "Missing true_pos for label: " << label; + continue; + } + const vector >& label_true_pos = + true_pos.find(label)->second; + if (false_pos.find(label) == false_pos.end()) { + LOG(WARNING) << "Missing false_pos for label: " << label; + continue; + } + const vector >& label_false_pos = + false_pos.find(label)->second; + vector prec, rec; + ComputeAP(label_true_pos, label_num_pos, label_false_pos, + param_.ap_version(), &prec, &rec, &(APs[label])); + mAP += APs[label]; + } + mAP /= num_pos.size(); + const int output_blob_index = test_net->output_blob_indices()[i]; + const string& output_name = test_net->blob_names()[output_blob_index]; + LOG(INFO) << " Test net output #" << i << ": " << output_name << " = " + << mAP; + } +} + +template void Solver::Snapshot() { CHECK(Caffe::root_solver()); + +#ifdef USE_MLSL + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_before_snapshot(); + } +#endif /* USE_MLSL */ string model_filename; switch (param_.snapshot_format()) { case caffe::SolverParameter_SnapshotFormat_BINARYPROTO: @@ -422,6 +857,12 @@ void Solver::Snapshot() { } SnapshotSolverState(model_filename); + +#ifdef USE_MLSL + for (int i = 0; i < callbacks_.size(); ++i) { + callbacks_[i]->on_after_snapshot(); + } +#endif } template @@ -451,10 +892,16 @@ string Solver::SnapshotFilename(const string extension) { template string Solver::SnapshotToBinaryProto() { string model_filename = SnapshotFilename(".caffemodel"); - LOG(INFO) << "Snapshotting to binary proto file " << model_filename; NetParameter net_param; net_->ToProto(&net_param, param_.snapshot_diff()); +#ifdef USE_MLSL + if (mn::is_root()) { +#endif + LOG(INFO) << "Snapshotting to binary proto file " << model_filename; WriteProtoToBinaryFile(net_param, model_filename); +#ifdef USE_MLSL + } +#endif return model_filename; } diff --git a/src/caffe/solvers/adadelta_solver.cpp b/src/caffe/solvers/adadelta_solver.cpp index fd30f19acac..aa6ea85b04d 100644 --- a/src/caffe/solvers/adadelta_solver.cpp +++ b/src/caffe/solvers/adadelta_solver.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/sgd_solvers.hpp" diff --git a/src/caffe/solvers/adagrad_solver.cpp b/src/caffe/solvers/adagrad_solver.cpp index e78eadca141..dc40592fcd3 100644 --- a/src/caffe/solvers/adagrad_solver.cpp +++ b/src/caffe/solvers/adagrad_solver.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/sgd_solvers.hpp" diff --git a/src/caffe/solvers/adam_solver.cpp b/src/caffe/solvers/adam_solver.cpp index 4a91f00bd49..e92805bd509 100644 --- a/src/caffe/solvers/adam_solver.cpp +++ b/src/caffe/solvers/adam_solver.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/sgd_solvers.hpp" diff --git a/src/caffe/solvers/nesterov_solver.cpp b/src/caffe/solvers/nesterov_solver.cpp index 23ab2d4369a..d63ee7cc6cf 100644 --- a/src/caffe/solvers/nesterov_solver.cpp +++ b/src/caffe/solvers/nesterov_solver.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/sgd_solvers.hpp" diff --git a/src/caffe/solvers/rmsprop_solver.cpp b/src/caffe/solvers/rmsprop_solver.cpp index 3251ee423a7..6fd386a430d 100644 --- a/src/caffe/solvers/rmsprop_solver.cpp +++ b/src/caffe/solvers/rmsprop_solver.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/sgd_solvers.hpp" diff --git a/src/caffe/solvers/sgd_solver.cpp b/src/caffe/solvers/sgd_solver.cpp index f30f316d1a0..264ac954ff8 100644 --- a/src/caffe/solvers/sgd_solver.cpp +++ b/src/caffe/solvers/sgd_solver.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -7,7 +44,14 @@ #include "caffe/util/upgrade_proto.hpp" namespace caffe { - +template +Dtype SGDSolver::GetWarmUpLR(int cur_iter, int warmup_iter, Dtype warmup_start_lr) { + if (cur_iter < 0) { + cur_iter = 0; + } + return (cur_iter * this->param_.base_lr() + + (warmup_iter - cur_iter) * warmup_start_lr) / warmup_iter; +} // Return the current learning rate. The currently implemented learning rate // policies are as follows: // - fixed: always return base_lr. @@ -27,7 +71,13 @@ template Dtype SGDSolver::GetLearningRate() { Dtype rate; const string& lr_policy = this->param_.lr_policy(); - if (lr_policy == "fixed") { + + + if (this->param_.warmup_iter() > 0 && + this->iter_ < this->param_.warmup_iter()) { + rate = GetWarmUpLR(this->iter_, this->param_.warmup_iter(), + this->param_.warmup_start_lr()); + } else if (lr_policy == "fixed") { rate = this->param_.base_lr(); } else if (lr_policy == "step") { this->current_step_ = this->iter_ / this->param_.stepsize(); @@ -56,6 +106,35 @@ Dtype SGDSolver::GetLearningRate() { rate = this->param_.base_lr() * (Dtype(1.) / (Dtype(1.) + exp(-this->param_.gamma() * (Dtype(this->iter_) - Dtype(this->param_.stepsize()))))); + } else if (lr_policy == "plateau") { + // Update minimum loss if needed + if (this->smoothed_loss_ < this->minimum_loss_) { + this->minimum_loss_ = this->smoothed_loss_; + this->iter_last_event_ = this->iter_; + } + + // If sufficient iters have passed after the last event, then lower LR + // An event is defined an update of minimum loss or LR + if (this->current_step_ < this->param_.plateau_winsize_size()) { + int iter_next_update = this->iter_last_event_ + + this->param_.plateau_winsize(this->current_step_); + + if (this->iter_ >= iter_next_update) { + this->current_step_++; + this->iter_last_event_ = this->iter_; + LOG(INFO) << "Plateau Status: Iteration " << this->iter_ + << ", step = " << this->current_step_; + } + } + + if (this->param_.display() && this->iter_ % this->param_.display() == 0 + && this->iter_last_event_ > (this->iter_ - this->param_.display())) { + LOG(INFO) << "Plateau Status: Iteration " << this->iter_ + << ", current minimum_loss = " << this->minimum_loss_; + } + + rate = this->param_.base_lr() * + pow(this->param_.gamma(), this->current_step_); } else { LOG(FATAL) << "Unknown learning rate policy: " << lr_policy; } @@ -71,10 +150,14 @@ void SGDSolver::PreSolve() { temp_.clear(); for (int i = 0; i < net_params.size(); ++i) { const vector& shape = net_params[i]->shape(); + + // TODO: allocate these buffers taking into account owned_count to reduce memory footprint history_.push_back(shared_ptr >(new Blob(shape))); update_.push_back(shared_ptr >(new Blob(shape))); temp_.push_back(shared_ptr >(new Blob(shape))); } + + this->minimum_loss_ = std::numeric_limits::max(); } template @@ -108,23 +191,74 @@ void SGDSolver::ApplyUpdate() { ClipGradients(); for (int param_id = 0; param_id < this->net_->learnable_params().size(); ++param_id) { - Normalize(param_id); - Regularize(param_id); - ComputeUpdateValue(param_id, rate); + ApplyUpdate(param_id); + } +} + +template +void SGDSolver::ApplyUpdate(int param_id) { + CHECK(Caffe::root_solver()); + Dtype rate = GetLearningRate(); + + LOG_PARAM_BLOB(this->net_->learnable_params()[param_id], diff, param_id, "ApplyUpdate: raw delwt:"); + + // If Learning rate for this learnable params is zero then skip + // updating params + if (this->net_->params_lr()[param_id] == 0) { + return; } - this->net_->Update(); + + Normalize(param_id); + LOG_PARAM_BLOB(this->net_->learnable_params()[param_id], diff, param_id, "ApplyUpdate: delwt after Normalize:"); + + Regularize(param_id); + LOG_PARAM_BLOB(this->net_->learnable_params()[param_id], diff, param_id, "ApplyUpdate: delwt after Regularize:"); + + ComputeUpdateValue(param_id, rate); + LOG_PARAM_BLOB(this->net_->learnable_params()[param_id], diff, param_id, "ApplyUpdate: wtinc:"); + + LOG_PARAM_BLOB(this->net_->learnable_params()[param_id], data, param_id, "ApplyUpdate: weight before update:"); + + this->net_->learnable_params()[param_id]->Update(); + + LOG_PARAM_BLOB(this->net_->learnable_params()[param_id], data, param_id, "ApplyUpdate: weight after update:"); } template void SGDSolver::Normalize(int param_id) { + +#ifdef USE_MLSL + if ((this->param_.iter_size() == 1) && !mn::is_multinode()) { + return; + } +#else /* !USE_MLSL */ if (this->param_.iter_size() == 1) { return; } +#endif /* USE_MLSL */ + // Scale gradient to counterbalance accumulation. const vector*>& net_params = this->net_->learnable_params(); + +#ifdef USE_MLSL + const Dtype accum_normalization = Dtype(1.) / (this->param_.iter_size() * mn::get_nodes_count()); +#else /* !USE_MLSL */ const Dtype accum_normalization = Dtype(1.) / this->param_.iter_size(); +#endif /* USE_MLSL */ + switch (Caffe::mode()) { case Caffe::CPU: { - caffe_scal(net_params[param_id]->count(), accum_normalization, - net_params[param_id]->mutable_cpu_diff()); + + if (net_params[param_id]->prv_diff() + && (net_params[param_id]->prv_diff_count() + == net_params[param_id]->count())) { + + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_prv_diff()); + } + else { + caffe_scal(net_params[param_id]->count(), accum_normalization, + net_params[param_id]->mutable_cpu_diff()); + } + break; } case Caffe::GPU: { @@ -154,10 +288,23 @@ void SGDSolver::Regularize(int param_id) { if (local_decay) { if (regularization_type == "L2") { // add weight decay - caffe_axpy(net_params[param_id]->count(), - local_decay, - net_params[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); + if (net_params[param_id]->prv_data() + && (net_params[param_id]->prv_data_count() + == net_params[param_id]->count())) { + CHECK_EQ(true, + net_params[param_id]->get_prv_data_descriptor()->layout_compare( + net_params[param_id]->get_prv_diff_descriptor())); + + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->prv_data(), + net_params[param_id]->mutable_prv_diff()); + } else { + caffe_axpy(net_params[param_id]->count(), + local_decay, + net_params[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } } else if (regularization_type == "L1") { caffe_cpu_sign(net_params[param_id]->count(), net_params[param_id]->cpu_data(), @@ -215,15 +362,37 @@ void SGDSolver::ComputeUpdateValue(int param_id, Dtype rate) { const vector& net_params_lr = this->net_->params_lr(); Dtype momentum = this->param_.momentum(); Dtype local_rate = rate * net_params_lr[param_id]; + + if (this->param_.warmup_iter() > 0 && + this->iter_ < this->param_.warmup_iter()) { + // Momentum correction during warmup stage + Dtype prev_rate = GetWarmUpLR(this->iter_ - 1, this->param_.warmup_iter(), + this->param_.warmup_start_lr()); + momentum = momentum * (rate / prev_rate); + } // Compute the update to history, then copy it to the parameter diff. switch (Caffe::mode()) { case Caffe::CPU: { - caffe_cpu_axpby(net_params[param_id]->count(), local_rate, - net_params[param_id]->cpu_diff(), momentum, - history_[param_id]->mutable_cpu_data()); - caffe_copy(net_params[param_id]->count(), - history_[param_id]->cpu_data(), - net_params[param_id]->mutable_cpu_diff()); + if (net_params[param_id]->prv_diff() + && (net_params[param_id]->prv_diff_count() + == net_params[param_id]->count())) { + + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->prv_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_prv_diff()); + } else { + caffe_cpu_axpby(net_params[param_id]->count(), local_rate, + net_params[param_id]->cpu_diff(), momentum, + history_[param_id]->mutable_cpu_data()); + + caffe_copy(net_params[param_id]->count(), + history_[param_id]->cpu_data(), + net_params[param_id]->mutable_cpu_diff()); + } break; } case Caffe::GPU: { @@ -263,6 +432,8 @@ void SGDSolver::SnapshotSolverStateToBinaryProto( state.set_iter(this->iter_); state.set_learned_net(model_filename); state.set_current_step(this->current_step_); + state.set_iter_last_event(this->iter_last_event_); + state.set_minimum_loss(this->minimum_loss_); state.clear_history(); for (int i = 0; i < history_.size(); ++i) { // Add history @@ -270,9 +441,15 @@ void SGDSolver::SnapshotSolverStateToBinaryProto( history_[i]->ToProto(history_blob); } string snapshot_filename = Solver::SnapshotFilename(".solverstate"); +#ifdef USE_MLSL + if (mn::is_root()) { +#endif LOG(INFO) << "Snapshotting solver state to binary proto file " << snapshot_filename; WriteProtoToBinaryFile(state, snapshot_filename.c_str()); +#ifdef USE_MLSL + } +#endif } template @@ -288,6 +465,8 @@ void SGDSolver::SnapshotSolverStateToHDF5( hdf5_save_int(file_hid, "iter", this->iter_); hdf5_save_string(file_hid, "learned_net", model_filename); hdf5_save_int(file_hid, "current_step", this->current_step_); + hdf5_save_int(file_hid, "iter_last_event", this->iter_last_event_); + hdf5_save_float(file_hid, "minimum_loss", this->minimum_loss_); hid_t history_hid = H5Gcreate2(file_hid, "history", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); CHECK_GE(history_hid, 0) @@ -313,6 +492,8 @@ void SGDSolver::RestoreSolverStateFromBinaryProto( this->net_->CopyTrainedLayersFrom(net_param); } this->current_step_ = state.current_step(); + this->iter_last_event_ = state.iter_last_event(); + this->minimum_loss_ = state.minimum_loss(); CHECK_EQ(state.history_size(), history_.size()) << "Incorrect length of history blobs."; LOG(INFO) << "SGDSolver: restoring history"; @@ -331,6 +512,8 @@ void SGDSolver::RestoreSolverStateFromHDF5(const string& state_file) { this->net_->CopyTrainedLayersFrom(learned_net); } this->current_step_ = hdf5_load_int(file_hid, "current_step"); + this->iter_last_event_ = hdf5_load_int(file_hid, "iter_last_event"); + this->minimum_loss_ = hdf5_load_float(file_hid, "minimum_loss"); hid_t history_hid = H5Gopen2(file_hid, "history", H5P_DEFAULT); CHECK_GE(history_hid, 0) << "Error reading history from " << state_file; int state_history_size = hdf5_get_num_links(history_hid); diff --git a/src/caffe/syncedmem.cpp b/src/caffe/syncedmem.cpp index 4d3564172ab..e825640b1e7 100644 --- a/src/caffe/syncedmem.cpp +++ b/src/caffe/syncedmem.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include "caffe/common.hpp" #include "caffe/syncedmem.hpp" #include "caffe/util/math_functions.hpp" @@ -42,6 +79,17 @@ inline void SyncedMemory::to_cpu() { NO_GPU; #endif break; + case HEAD_AT_PRV: + if (cpu_ptr_ == NULL) { + CaffeMallocHost(&cpu_ptr_, size_, &cpu_malloc_use_cuda_); + own_cpu_data_ = true; + } + CHECK(prv_descriptor_.get()); + prv_descriptor_->convert_from_prv(cpu_ptr_); + prv_descriptor_->on_to_cpu(); + head_ = SYNCED_PRV; + break; + case SYNCED_PRV: case HEAD_AT_CPU: case SYNCED: break; @@ -58,6 +106,8 @@ inline void SyncedMemory::to_gpu() { head_ = HEAD_AT_GPU; own_gpu_data_ = true; break; + case HEAD_AT_PRV: + to_cpu(); case HEAD_AT_CPU: if (gpu_ptr_ == NULL) { CUDA_CHECK(cudaGetDevice(&gpu_device_)); @@ -77,11 +127,13 @@ inline void SyncedMemory::to_gpu() { } const void* SyncedMemory::cpu_data() { + boost::mutex::scoped_lock lock(mtx); to_cpu(); return (const void*)cpu_ptr_; } void SyncedMemory::set_cpu_data(void* data) { + boost::mutex::scoped_lock lock(mtx); CHECK(data); if (own_cpu_data_) { CaffeFreeHost(cpu_ptr_, cpu_malloc_use_cuda_); @@ -92,6 +144,7 @@ void SyncedMemory::set_cpu_data(void* data) { } const void* SyncedMemory::gpu_data() { + boost::mutex::scoped_lock lock(mtx); #ifndef CPU_ONLY to_gpu(); return (const void*)gpu_ptr_; @@ -102,6 +155,7 @@ const void* SyncedMemory::gpu_data() { } void SyncedMemory::set_gpu_data(void* data) { + boost::mutex::scoped_lock lock(mtx); #ifndef CPU_ONLY CHECK(data); if (own_gpu_data_) { @@ -122,12 +176,14 @@ void SyncedMemory::set_gpu_data(void* data) { } void* SyncedMemory::mutable_cpu_data() { + boost::mutex::scoped_lock lock(mtx); to_cpu(); head_ = HEAD_AT_CPU; return cpu_ptr_; } void* SyncedMemory::mutable_gpu_data() { + boost::mutex::scoped_lock lock(mtx); #ifndef CPU_ONLY to_gpu(); head_ = HEAD_AT_GPU; @@ -140,6 +196,7 @@ void* SyncedMemory::mutable_gpu_data() { #ifndef CPU_ONLY void SyncedMemory::async_gpu_push(const cudaStream_t& stream) { + boost::mutex::scoped_lock lock(mtx); CHECK(head_ == HEAD_AT_CPU); if (gpu_ptr_ == NULL) { CUDA_CHECK(cudaGetDevice(&gpu_device_)); @@ -153,5 +210,39 @@ void SyncedMemory::async_gpu_push(const cudaStream_t& stream) { } #endif -} // namespace caffe +void SyncedMemory::set_prv_descriptor(shared_ptr descriptor, + bool same_data) { + // If it wasn't synced before, it won't be now. + if (descriptor == NULL) { + if (head_ != UNINITIALIZED) + head_ = HEAD_AT_CPU; + } else { + if ((head_ != HEAD_AT_PRV) && same_data) + head_ = SYNCED_PRV; + else + head_ = HEAD_AT_PRV; + } + + prv_descriptor_ = descriptor; +} + +const void* SyncedMemory::prv_data() { + if ((head_ != HEAD_AT_PRV) && + (head_ != SYNCED_PRV)) { + return NULL; + } + CHECK(prv_descriptor_.get()); + return (const void* ) prv_descriptor_->prv_ptr(); +} + +void* SyncedMemory::mutable_prv_data() { + CHECK(prv_descriptor_.get()); + if (head_ == HEAD_AT_CPU) { + prv_descriptor_->convert_to_prv(cpu_ptr_); + } + head_ = HEAD_AT_PRV; + return prv_descriptor_->prv_ptr(); +} + +} // namespace caffe diff --git a/src/caffe/test/CMakeLists.txt b/src/caffe/test/CMakeLists.txt index 35a803f2f41..f79f5109c5f 100644 --- a/src/caffe/test/CMakeLists.txt +++ b/src/caffe/test/CMakeLists.txt @@ -27,7 +27,7 @@ endif() # ---[ Adding test target add_executable(${the_target} EXCLUDE_FROM_ALL ${test_srcs}) -target_link_libraries(${the_target} gtest ${Caffe_LINK}) +target_link_libraries(${the_target} gtest gmock ${Caffe_LINK}) caffe_default_properties(${the_target}) caffe_set_runtime_directory(${the_target} "${PROJECT_BINARY_DIR}/test") diff --git a/src/caffe/test/test_accuracy_layer.cpp b/src/caffe/test/test_accuracy_layer.cpp index 6fe808bd5c5..54663615e80 100644 --- a/src/caffe/test/test_accuracy_layer.cpp +++ b/src/caffe/test/test_accuracy_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_annotated_data_layer.cpp b/src/caffe/test/test_annotated_data_layer.cpp new file mode 100644 index 00000000000..4992ff88e95 --- /dev/null +++ b/src/caffe/test/test_annotated_data_layer.cpp @@ -0,0 +1,771 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_OPENCV +#include +#include +#include + +#include "boost/scoped_ptr.hpp" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/annotated_data_layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" +#include "caffe/util/io.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +using boost::scoped_ptr; + +static bool kBoolChoices[] = {false, true}; +static int kNumChoices = 2; + +// Compute bounding box number. +int OneBBoxNum(int n) { + int sum = 0; + for (int g = 0; g < n; ++g) { + sum += g; + } + return sum; +} + +int BBoxNum(int n) { + int sum = 0; + for (int i = 0; i < n; ++i) { + for (int g = 0; g < i; ++g) { + sum += g; + } + } + return sum; +} + +template +class AnnotatedDataLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + AnnotatedDataLayerTest() + : backend_(DataParameter_DB_LEVELDB), + blob_top_data_(new Blob()), + blob_top_label_(new Blob()), + seed_(1701), + num_(6), + channels_(2), + height_(10), + width_(10), + eps_(1e-6) {} + + virtual void SetUp() { + spatial_dim_ = height_ * width_; + size_ = channels_ * spatial_dim_; + filename_.reset(new string()); + GetTempDirname(filename_.get()); + *filename_ += "/db"; + blob_top_vec_.push_back(blob_top_data_); + blob_top_vec_.push_back(blob_top_label_); + } + + // Fill the DB with data. + // - backend: can be either LevelDB or LMDB + // - unique_pixel: if true, each pixel is unique but all images are the same; + // else each image is unique but all pixels within an image are the same. + // - unique_annotation: if true, each annotation in a group is unique but all + // groups are the same at the same positions; else each group is unique but + // all annotations within a group are the same. + // - use_rich_annotation: if false, use datum.label() instead. + // - type: type of rich annotation. + void Fill(DataParameter_DB backend, bool unique_pixel, bool unique_annotation, + bool use_rich_annotation, AnnotatedDatum_AnnotationType type) { + backend_ = backend; + unique_pixel_ = unique_pixel; + unique_annotation_ = unique_annotation; + use_rich_annotation_ = use_rich_annotation; + type_ = type; + GetTempDirname(filename_.get()); + LOG(INFO) << "Using temporary dataset " << *filename_; + scoped_ptr db(db::GetDB(backend)); + db->Open(*filename_, db::NEW); + scoped_ptr txn(db->NewTransaction()); + for (int i = 0; i < num_; ++i) { + AnnotatedDatum anno_datum; + // Fill data. + Datum* datum = anno_datum.mutable_datum(); + datum->set_channels(channels_); + datum->set_height(height_); + datum->set_width(width_); + std::string* data = datum->mutable_data(); + for (int j = 0; j < size_; ++j) { + int elem = unique_pixel ? j : i; + data->push_back(static_cast(elem)); + } + // Fill annotation. + if (use_rich_annotation) { + anno_datum.set_type(type); + for (int g = 0; g < i; ++g) { + AnnotationGroup* anno_group = anno_datum.add_annotation_group(); + anno_group->set_group_label(g); + for (int a = 0; a < g; ++a) { + Annotation* anno = anno_group->add_annotation(); + anno->set_instance_id(a); + if (type == AnnotatedDatum_AnnotationType_BBOX) { + NormalizedBBox* bbox = anno->mutable_bbox(); + int b = unique_annotation ? a : g; + bbox->set_xmin(b*0.1); + bbox->set_ymin(b*0.1); + bbox->set_xmax(std::min(b*0.1 + 0.2, 1.0)); + bbox->set_ymax(std::min(b*0.1 + 0.2, 1.0)); + bbox->set_difficult(a % 2); + } + } + } + } else { + datum->set_label(i); + } + stringstream ss; + ss << i; + string out; + CHECK(anno_datum.SerializeToString(&out)); + txn->Put(ss.str(), out); + } + txn->Commit(); + db->Close(); + } + + void TestRead() { + LayerParameter param; + param.set_phase(TRAIN); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(num_); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + const Dtype scale = 3; + TransformationParameter* transform_param = + param.mutable_transform_param(); + transform_param->set_scale(scale); + + AnnotatedDataLayer layer(param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_data_->num(), num_); + EXPECT_EQ(blob_top_data_->channels(), channels_); + EXPECT_EQ(blob_top_data_->height(), height_); + EXPECT_EQ(blob_top_data_->width(), width_); + if (use_rich_annotation_) { + switch (type_) { + case AnnotatedDatum_AnnotationType_BBOX: + EXPECT_EQ(blob_top_label_->num(), 1); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 8); + break; + default: + LOG(FATAL) << "Unknown annotation type."; + break; + } + } else { + EXPECT_EQ(blob_top_label_->num(), num_); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 1); + } + + for (int iter = 0; iter < 5; ++iter) { + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Check label. + const Dtype* label_data = blob_top_label_->cpu_data(); + int cur_bbox = 0; + for (int i = 0; i < num_; ++i) { + if (use_rich_annotation_) { + if (type_ == AnnotatedDatum_AnnotationType_BBOX) { + EXPECT_EQ(blob_top_label_->num(), 1); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), BBoxNum(num_)); + EXPECT_EQ(blob_top_label_->width(), 8); + for (int g = 0; g < i; ++g) { + for (int a = 0; a < g; ++a) { + EXPECT_EQ(i, label_data[cur_bbox*8]); + EXPECT_EQ(g, label_data[cur_bbox*8+1]); + EXPECT_EQ(a, label_data[cur_bbox*8+2]); + int b = unique_annotation_ ? a : g; + for (int p = 3; p < 5; ++p) { + EXPECT_NEAR(b*0.1, label_data[cur_bbox*8+p], this->eps_); + } + for (int p = 5; p < 7; ++p) { + EXPECT_NEAR(std::min(b*0.1 + 0.2, 1.0), + label_data[cur_bbox*8+p], this->eps_); + } + EXPECT_EQ(a % 2, label_data[cur_bbox*8+7]); + cur_bbox++; + } + } + } else { + LOG(FATAL) << "Unknown annotation type."; + } + } else { + EXPECT_EQ(i, label_data[i]); + } + } + // Check data. + for (int i = 1; i < num_; ++i) { + for (int j = 0; j < size_; ++j) { + EXPECT_EQ(scale * (unique_pixel_ ? j : i), + blob_top_data_->cpu_data()[i * size_ + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + } + + void TestReshape(DataParameter_DB backend, bool unique_pixel, + bool unique_annotation, bool use_rich_annotation, + AnnotatedDatum_AnnotationType type) { + // Save data of varying shapes. + GetTempDirname(filename_.get()); + LOG(INFO) << "Using temporary dataset " << *filename_; + scoped_ptr db(db::GetDB(backend)); + db->Open(*filename_, db::NEW); + scoped_ptr txn(db->NewTransaction()); + for (int i = 0; i < num_; ++i) { + AnnotatedDatum anno_datum; + // Fill data. + Datum* datum = anno_datum.mutable_datum(); + datum->set_channels(channels_); + datum->set_height(i % 2 + 1); + datum->set_width(i % 4 + 1); + std::string* data = datum->mutable_data(); + const int data_size = + datum->channels() * datum->height() * datum->width(); + for (int j = 0; j < data_size; ++j) { + data->push_back(static_cast(j)); + } + // Fill annotation. + if (use_rich_annotation) { + anno_datum.set_type(type); + for (int g = 0; g < i; ++g) { + AnnotationGroup* anno_group = anno_datum.add_annotation_group(); + anno_group->set_group_label(g); + for (int a = 0; a < g; ++a) { + Annotation* anno = anno_group->add_annotation(); + anno->set_instance_id(a); + if (type == AnnotatedDatum_AnnotationType_BBOX) { + NormalizedBBox* bbox = anno->mutable_bbox(); + int b = unique_annotation ? a : g; + bbox->set_xmin(b*0.1); + bbox->set_ymin(b*0.1); + bbox->set_xmax(std::min(b*0.1 + 0.2, 1.0)); + bbox->set_ymax(std::min(b*0.1 + 0.2, 1.0)); + bbox->set_difficult(a % 2); + } + } + } + } else { + datum->set_label(i); + } + stringstream ss; + ss << i; + string out; + CHECK(anno_datum.SerializeToString(&out)); + txn->Put(ss.str(), out); + } + txn->Commit(); + db->Close(); + + // Load and check data of various shapes. + LayerParameter param; + param.set_phase(TEST); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(1); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend); + + AnnotatedDataLayer layer(param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_data_->num(), 1); + EXPECT_EQ(blob_top_data_->channels(), channels_); + if (use_rich_annotation) { + switch (type) { + case AnnotatedDatum_AnnotationType_BBOX: + EXPECT_EQ(blob_top_label_->num(), 1); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 8); + break; + default: + LOG(FATAL) << "Unknown annotation type."; + break; + } + } else { + EXPECT_EQ(blob_top_label_->num(), 1); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 1); + } + + for (int iter = 0; iter < 3; ++iter) { + layer.Forward(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_data_->height(), iter % 2 + 1); + EXPECT_EQ(blob_top_data_->width(), iter % 4 + 1); + // Check label. + const Dtype* label_data = blob_top_label_->cpu_data(); + if (use_rich_annotation) { + if (type == AnnotatedDatum_AnnotationType_BBOX) { + if (iter <= 1) { + EXPECT_EQ(blob_top_label_->num(), 1); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 8); + for (int i = 0; i < 8; ++i) { + EXPECT_NEAR(label_data[i], -1, this->eps_); + } + } else { + int cur_bbox = 0; + EXPECT_EQ(blob_top_label_->num(), 1); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), OneBBoxNum(iter)); + EXPECT_EQ(blob_top_label_->width(), 8); + for (int g = 0; g < iter; ++g) { + for (int a = 0; a < g; ++a) { + EXPECT_EQ(0, label_data[cur_bbox*8]); + EXPECT_EQ(g, label_data[cur_bbox*8+1]); + EXPECT_EQ(a, label_data[cur_bbox*8+2]); + int b = unique_annotation ? a : g; + for (int p = 3; p < 5; ++p) { + EXPECT_NEAR(b*0.1, label_data[cur_bbox*8+p], this->eps_); + } + for (int p = 5; p < 7; ++p) { + EXPECT_NEAR(std::min(b*0.1 + 0.2, 1.0), + label_data[cur_bbox*8+p], this->eps_); + } + EXPECT_EQ(a % 2, label_data[cur_bbox*8+7]); + cur_bbox++; + } + } + } + } else { + LOG(FATAL) << "Unknown annotation type."; + } + } else { + EXPECT_EQ(iter, label_data[0]); + } + // Check data. + const int channels = blob_top_data_->channels(); + const int height = blob_top_data_->height(); + const int width = blob_top_data_->width(); + for (int c = 0; c < channels; ++c) { + for (int h = 0; h < height; ++h) { + for (int w = 0; w < width; ++w) { + const int idx = (c * height + h) * width + w; + EXPECT_EQ(idx, static_cast(blob_top_data_->cpu_data()[idx])) + << "debug: iter " << iter << " c " << c + << " h " << h << " w " << w; + } + } + } + } + } + + void TestReadCrop(Phase phase) { + const Dtype scale = 3; + LayerParameter param; + param.set_phase(phase); + Caffe::set_random_seed(1701); + + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(num_); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + TransformationParameter* transform_param = + param.mutable_transform_param(); + transform_param->set_scale(scale); + transform_param->set_crop_size(1); + + AnnotatedDataLayer layer(param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_data_->num(), num_); + EXPECT_EQ(blob_top_data_->channels(), channels_); + EXPECT_EQ(blob_top_data_->height(), 1); + EXPECT_EQ(blob_top_data_->width(), 1); + EXPECT_EQ(blob_top_label_->num(), num_); + EXPECT_EQ(blob_top_label_->channels(), 1); + EXPECT_EQ(blob_top_label_->height(), 1); + EXPECT_EQ(blob_top_label_->width(), 1); + + for (int iter = 0; iter < 5; ++iter) { + layer.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < num_; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + int num_with_center_value = 0; + for (int i = 0; i < num_; ++i) { + for (int j = 0; j < channels_; ++j) { + const Dtype center_value = + scale * ((ceil(height_ / 2.0) - 1) * width_ + + ceil(width_ / 2.0) - 1 + j * spatial_dim_); + num_with_center_value += + (center_value == blob_top_data_->cpu_data()[i * 2 + j]); + // At TEST time, check that we always get center value. + if (phase == caffe::TEST) { + EXPECT_EQ(center_value, + this->blob_top_data_->cpu_data()[i * channels_ + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + // At TRAIN time, check that we did not get the center crop all 10 times. + // (This check fails with probability 1-1/12^10 in a correct + // implementation, so we call set_random_seed.) + if (phase == caffe::TRAIN) { + EXPECT_LT(num_with_center_value, 10); + } + } + } + + void TestReadCropTrainSequenceSeeded() { + LayerParameter param; + param.set_phase(TRAIN); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(num_); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + TransformationParameter* transform_param = + param.mutable_transform_param(); + transform_param->set_crop_size(1); + transform_param->set_mirror(true); + + // Get crop sequence with Caffe seed 1701. + Caffe::set_random_seed(seed_); + vector > crop_sequence; + { + AnnotatedDataLayer layer1(param); + layer1.SetUp(blob_bottom_vec_, blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer1.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < num_; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + vector iter_crop_sequence; + for (int i = 0; i < num_; ++i) { + for (int j = 0; j < channels_; ++j) { + iter_crop_sequence.push_back( + blob_top_data_->cpu_data()[i * channels_ + j]); + } + } + crop_sequence.push_back(iter_crop_sequence); + } + } // destroy 1st data layer and unlock the db + + // Get crop sequence after reseeding Caffe with 1701. + // Check that the sequence is the same as the original. + Caffe::set_random_seed(seed_); + AnnotatedDataLayer layer2(param); + layer2.SetUp(blob_bottom_vec_, blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer2.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < num_; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + for (int i = 0; i < num_; ++i) { + for (int j = 0; j < channels_; ++j) { + EXPECT_EQ(crop_sequence[iter][i * channels_ + j], + blob_top_data_->cpu_data()[i * channels_ + j]) + << "debug: iter " << iter << " i " << i << " j " << j; + } + } + } + } + + void TestReadCropTrainSequenceUnseeded() { + LayerParameter param; + param.set_phase(TRAIN); + DataParameter* data_param = param.mutable_data_param(); + data_param->set_batch_size(num_); + data_param->set_source(filename_->c_str()); + data_param->set_backend(backend_); + + TransformationParameter* transform_param = + param.mutable_transform_param(); + transform_param->set_crop_size(1); + transform_param->set_mirror(true); + + // Get crop sequence with Caffe seed 1701, srand seed 1701. + Caffe::set_random_seed(seed_); + srand(seed_); + vector > crop_sequence; + { + AnnotatedDataLayer layer1(param); + layer1.SetUp(blob_bottom_vec_, blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer1.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < num_; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + vector iter_crop_sequence; + for (int i = 0; i < num_; ++i) { + for (int j = 0; j < channels_; ++j) { + iter_crop_sequence.push_back( + blob_top_data_->cpu_data()[i * channels_ + j]); + } + } + crop_sequence.push_back(iter_crop_sequence); + } + } // destroy 1st data layer and unlock the db + + // Get crop sequence continuing from previous Caffe RNG state; reseed + // srand with 1701. Check that the sequence differs from the original. + srand(seed_); + AnnotatedDataLayer layer2(param); + layer2.SetUp(blob_bottom_vec_, blob_top_vec_); + for (int iter = 0; iter < 2; ++iter) { + layer2.Forward(blob_bottom_vec_, blob_top_vec_); + for (int i = 0; i < num_; ++i) { + EXPECT_EQ(i, blob_top_label_->cpu_data()[i]); + } + int num_sequence_matches = 0; + for (int i = 0; i < num_; ++i) { + for (int j = 0; j < channels_; ++j) { + num_sequence_matches += + (crop_sequence[iter][i * channels_ + j] == + blob_top_data_->cpu_data()[i * channels_ + j]); + } + } + EXPECT_LT(num_sequence_matches, num_ * channels_); + } + } + + virtual ~AnnotatedDataLayerTest() { + delete blob_top_data_; + delete blob_top_label_; + } + + DataParameter_DB backend_; + shared_ptr filename_; + Blob* const blob_top_data_; + Blob* const blob_top_label_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + int seed_; + int num_; + int channels_; + int height_; + int width_; + Dtype eps_; + int spatial_dim_; + int size_; + bool unique_pixel_; + bool unique_annotation_; + bool use_rich_annotation_; + AnnotatedDatum_AnnotationType type_; +}; + +TYPED_TEST_CASE(AnnotatedDataLayerTest, TestDtypesAndDevices); + +#ifdef USE_LEVELDB +TYPED_TEST(AnnotatedDataLayerTest, TestReadLevelDB) { + const AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + for (int p = 0; p < kNumChoices; ++p) { + bool unique_pixel = kBoolChoices[p]; + for (int r = 0; r < kNumChoices; ++r) { + bool use_rich_annotation = kBoolChoices[r]; + for (int a = 0; a < kNumChoices; ++a) { + if (!use_rich_annotation) { + continue; + } + bool unique_annotation = kBoolChoices[a]; + this->Fill(DataParameter_DB_LEVELDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestRead(); + } + } + } +} + +TYPED_TEST(AnnotatedDataLayerTest, TestReshapeLevelDB) { + const AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + for (int p = 0; p < kNumChoices; ++p) { + bool unique_pixel = kBoolChoices[p]; + for (int r = 0; r < kNumChoices; ++r) { + bool use_rich_annotation = kBoolChoices[r]; + for (int a = 0; a < kNumChoices; ++a) { + if (!use_rich_annotation) { + continue; + } + bool unique_annotation = kBoolChoices[a]; + this->TestReshape(DataParameter_DB_LEVELDB, unique_pixel, + unique_annotation, use_rich_annotation, type); + } + } + } +} + +TYPED_TEST(AnnotatedDataLayerTest, TestReadCropTrainLevelDB) { + const bool unique_pixel = true; // all pixels the same; images different + const bool unique_annotation = false; // all anno the same; groups different + const bool use_rich_annotation = false; + AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + this->Fill(DataParameter_DB_LEVELDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestReadCrop(TRAIN); +} + +// Test that the sequence of random crops is consistent when using +// Caffe::set_random_seed. +TYPED_TEST(AnnotatedDataLayerTest, TestReadCropTrainSequenceSeededLevelDB) { + const bool unique_pixel = true; // all pixels the same; images different + const bool unique_annotation = false; // all anno the same; groups different + const bool use_rich_annotation = false; + AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + this->Fill(DataParameter_DB_LEVELDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestReadCropTrainSequenceSeeded(); +} + +// Test that the sequence of random crops differs across iterations when +// Caffe::set_random_seed isn't called (and seeds from srand are ignored). +TYPED_TEST(AnnotatedDataLayerTest, TestReadCropTrainSequenceUnseededLevelDB) { + const bool unique_pixel = true; // all pixels the same; images different + const bool unique_annotation = false; // all anno the same; groups different + const bool use_rich_annotation = false; + AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + this->Fill(DataParameter_DB_LEVELDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestReadCropTrainSequenceUnseeded(); +} + +TYPED_TEST(AnnotatedDataLayerTest, TestReadCropTestLevelDB) { + const bool unique_pixel = true; // all pixels the same; images different + const bool unique_annotation = false; // all anno the same; groups different + const bool use_rich_annotation = false; + AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + this->Fill(DataParameter_DB_LEVELDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestReadCrop(TEST); +} +#endif // USE_LEVELDB + +#ifdef USE_LMDB +TYPED_TEST(AnnotatedDataLayerTest, TestReadLMDB) { + const AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + for (int p = 0; p < kNumChoices; ++p) { + bool unique_pixel = kBoolChoices[p]; + for (int r = 0; r < kNumChoices; ++r) { + bool use_rich_annotation = kBoolChoices[r]; + for (int a = 0; a < kNumChoices; ++a) { + if (!use_rich_annotation) { + continue; + } + bool unique_annotation = kBoolChoices[a]; + this->Fill(DataParameter_DB_LMDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestRead(); + } + } + } +} + +TYPED_TEST(AnnotatedDataLayerTest, TestReshapeLMDB) { + const AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + for (int p = 0; p < kNumChoices; ++p) { + bool unique_pixel = kBoolChoices[p]; + for (int r = 0; r < kNumChoices; ++r) { + bool use_rich_annotation = kBoolChoices[r]; + for (int a = 0; a < kNumChoices; ++a) { + if (!use_rich_annotation) { + continue; + } + bool unique_annotation = kBoolChoices[a]; + this->TestReshape(DataParameter_DB_LMDB, unique_pixel, + unique_annotation, use_rich_annotation, type); + } + } + } +} + +TYPED_TEST(AnnotatedDataLayerTest, TestReadCropTrainLMDB) { + const bool unique_pixel = true; // all pixels the same; images different + const bool unique_annotation = false; // all anno the same; groups different + const bool use_rich_annotation = false; + AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + this->Fill(DataParameter_DB_LMDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestReadCrop(TRAIN); +} + +// Test that the sequence of random crops is consistent when using +// Caffe::set_random_seed. +TYPED_TEST(AnnotatedDataLayerTest, TestReadCropTrainSequenceSeededLMDB) { + const bool unique_pixel = true; // all pixels the same; images different + const bool unique_annotation = false; // all anno the same; groups different + const bool use_rich_annotation = false; + AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + this->Fill(DataParameter_DB_LMDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestReadCropTrainSequenceSeeded(); +} + +// Test that the sequence of random crops differs across iterations when +// Caffe::set_random_seed isn't called (and seeds from srand are ignored). +TYPED_TEST(AnnotatedDataLayerTest, TestReadCropTrainSequenceUnseededLMDB) { + const bool unique_pixel = true; // all pixels the same; images different + const bool unique_annotation = false; // all anno the same; groups different + const bool use_rich_annotation = false; + AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + this->Fill(DataParameter_DB_LMDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestReadCropTrainSequenceUnseeded(); +} + +TYPED_TEST(AnnotatedDataLayerTest, TestReadCropTestLMDB) { + const bool unique_pixel = true; // all pixels the same; images different + const bool unique_annotation = false; // all anno the same; groups different + const bool use_rich_annotation = false; + AnnotatedDatum_AnnotationType type = AnnotatedDatum_AnnotationType_BBOX; + this->Fill(DataParameter_DB_LMDB, unique_pixel, unique_annotation, + use_rich_annotation, type); + this->TestReadCrop(TEST); +} + +#endif // USE_LMDB +} // namespace caffe +#endif // USE_OPENCV diff --git a/src/caffe/test/test_argmax_layer.cpp b/src/caffe/test/test_argmax_layer.cpp index 472e6652239..56cbe29016e 100644 --- a/src/caffe/test/test_argmax_layer.cpp +++ b/src/caffe/test/test_argmax_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_batch_norm_layer.cpp b/src/caffe/test/test_batch_norm_layer.cpp index 936b93a1756..2db5f4d05bb 100644 --- a/src/caffe/test/test_batch_norm_layer.cpp +++ b/src/caffe/test/test_batch_norm_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/test/test_batch_reindex_layer.cpp b/src/caffe/test/test_batch_reindex_layer.cpp index 9ea1a2f6f47..d609d23d271 100644 --- a/src/caffe/test/test_batch_reindex_layer.cpp +++ b/src/caffe/test/test_batch_reindex_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_bbox_util.cpp b/src/caffe/test/test_bbox_util.cpp new file mode 100644 index 00000000000..83d20d9447b --- /dev/null +++ b/src/caffe/test/test_bbox_util.cpp @@ -0,0 +1,2187 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/util/bbox_util.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +static const float eps = 1e-6; + +void FillBBoxes(vector* gt_bboxes, + vector* pred_bboxes) { + gt_bboxes->clear(); + pred_bboxes->clear(); + NormalizedBBox bbox; + + // Fill in ground truth bboxes. + bbox.set_label(1); + bbox.set_xmin(0.1); + bbox.set_ymin(0.1); + bbox.set_xmax(0.3); + bbox.set_ymax(0.3); + gt_bboxes->push_back(bbox); + + bbox.set_label(2); + bbox.set_xmin(0.3); + bbox.set_ymin(0.3); + bbox.set_xmax(0.6); + bbox.set_ymax(0.5); + gt_bboxes->push_back(bbox); + + // Fill in prediction bboxes. + // 4/9 with label 1 + // 0 with label 2 + bbox.set_xmin(0.1); + bbox.set_ymin(0); + bbox.set_xmax(0.4); + bbox.set_ymax(0.3); + pred_bboxes->push_back(bbox); + + // 2/6 with label 1 + // 0 with label 2 + bbox.set_xmin(0); + bbox.set_ymin(0.1); + bbox.set_xmax(0.2); + bbox.set_ymax(0.3); + pred_bboxes->push_back(bbox); + + // 2/8 with label 1 + // 1/11 with label 2 + bbox.set_xmin(0.2); + bbox.set_ymin(0.1); + bbox.set_xmax(0.4); + bbox.set_ymax(0.4); + pred_bboxes->push_back(bbox); + + // 0 with label 1 + // 4/8 with label 2 + bbox.set_xmin(0.4); + bbox.set_ymin(0.3); + bbox.set_xmax(0.7); + bbox.set_ymax(0.5); + pred_bboxes->push_back(bbox); + + // 0 with label 1 + // 1/11 with label 2 + bbox.set_xmin(0.5); + bbox.set_ymin(0.4); + bbox.set_xmax(0.7); + bbox.set_ymax(0.7); + pred_bboxes->push_back(bbox); + + // 0 with label 1 + // 0 with label 2 + bbox.set_xmin(0.7); + bbox.set_ymin(0.7); + bbox.set_xmax(0.8); + bbox.set_ymax(0.8); + pred_bboxes->push_back(bbox); +} + +template +class BBoxUtilTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; +}; + +class CPUBBoxUtilTest : public BBoxUtilTest > { +}; + +TEST_F(CPUBBoxUtilTest, TestIntersectBBox) { + NormalizedBBox bbox_ref; + bbox_ref.set_xmin(0.2); + bbox_ref.set_ymin(0.3); + bbox_ref.set_xmax(0.3); + bbox_ref.set_ymax(0.5); + + NormalizedBBox bbox_test; + NormalizedBBox bbox_intersect; + + // Partially overlapped. + bbox_test.set_xmin(0.1); + bbox_test.set_ymin(0.1); + bbox_test.set_xmax(0.3); + bbox_test.set_ymax(0.4); + IntersectBBox(bbox_ref, bbox_test, &bbox_intersect); + EXPECT_NEAR(bbox_intersect.xmin(), 0.2, eps); + EXPECT_NEAR(bbox_intersect.ymin(), 0.3, eps); + EXPECT_NEAR(bbox_intersect.xmax(), 0.3, eps); + EXPECT_NEAR(bbox_intersect.ymax(), 0.4, eps); + + // Fully contain. + bbox_test.set_xmin(0.1); + bbox_test.set_ymin(0.1); + bbox_test.set_xmax(0.4); + bbox_test.set_ymax(0.6); + IntersectBBox(bbox_ref, bbox_test, &bbox_intersect); + EXPECT_NEAR(bbox_intersect.xmin(), 0.2, eps); + EXPECT_NEAR(bbox_intersect.ymin(), 0.3, eps); + EXPECT_NEAR(bbox_intersect.xmax(), 0.3, eps); + EXPECT_NEAR(bbox_intersect.ymax(), 0.5, eps); + + // Outside. + bbox_test.set_xmin(0); + bbox_test.set_ymin(0); + bbox_test.set_xmax(0.1); + bbox_test.set_ymax(0.1); + IntersectBBox(bbox_ref, bbox_test, &bbox_intersect); + EXPECT_NEAR(bbox_intersect.xmin(), 0, eps); + EXPECT_NEAR(bbox_intersect.ymin(), 0, eps); + EXPECT_NEAR(bbox_intersect.xmax(), 0, eps); + EXPECT_NEAR(bbox_intersect.ymax(), 0, eps); +} + +TEST_F(CPUBBoxUtilTest, TestBBoxSize) { + NormalizedBBox bbox; + float size; + + // Valid box. + bbox.set_xmin(0.2); + bbox.set_ymin(0.3); + bbox.set_xmax(0.3); + bbox.set_ymax(0.5); + size = BBoxSize(bbox); + EXPECT_NEAR(size, 0.02, eps); + + // A line. + bbox.set_xmin(0.2); + bbox.set_ymin(0.3); + bbox.set_xmax(0.2); + bbox.set_ymax(0.5); + size = BBoxSize(bbox); + EXPECT_NEAR(size, 0., eps); + + // Invalid box. + bbox.set_xmin(0.2); + bbox.set_ymin(0.3); + bbox.set_xmax(0.1); + bbox.set_ymax(0.5); + size = BBoxSize(bbox); + EXPECT_NEAR(size, 0., eps); +} + +TEST_F(CPUBBoxUtilTest, TestScaleBBox) { + NormalizedBBox bbox; + bbox.set_xmin(0.21); + bbox.set_ymin(0.32); + bbox.set_xmax(0.33); + bbox.set_ymax(0.54); + NormalizedBBox scale_bbox; + float eps = 1e-5; + + int height = 10; + int width = 20; + ScaleBBox(bbox, height, width, &scale_bbox); + EXPECT_NEAR(scale_bbox.xmin(), 4.2, eps); + EXPECT_NEAR(scale_bbox.ymin(), 3.2, eps); + EXPECT_NEAR(scale_bbox.xmax(), 6.6, eps); + EXPECT_NEAR(scale_bbox.ymax(), 5.4, eps); + EXPECT_NEAR(scale_bbox.size(), 10.88, eps); + + height = 1; + width = 1; + ScaleBBox(bbox, height, width, &scale_bbox); + EXPECT_NEAR(bbox.xmin(), scale_bbox.xmin(), eps); + EXPECT_NEAR(bbox.ymin(), scale_bbox.ymin(), eps); + EXPECT_NEAR(bbox.xmax(), scale_bbox.xmax(), eps); + EXPECT_NEAR(bbox.ymax(), scale_bbox.ymax(), eps); + EXPECT_NEAR(scale_bbox.size(), 0.0264, eps); +} + +TEST_F(CPUBBoxUtilTest, TestClipBBox) { + NormalizedBBox bbox; + NormalizedBBox clip_bbox; + + bbox.set_xmin(0.2); + bbox.set_ymin(0.3); + bbox.set_xmax(0.3); + bbox.set_ymax(0.5); + ClipBBox(bbox, &clip_bbox); + EXPECT_NEAR(bbox.xmin(), clip_bbox.xmin(), eps); + EXPECT_NEAR(bbox.ymin(), clip_bbox.ymin(), eps); + EXPECT_NEAR(bbox.xmax(), clip_bbox.xmax(), eps); + EXPECT_NEAR(bbox.ymax(), clip_bbox.ymax(), eps); + EXPECT_NEAR(clip_bbox.size(), 0.02, eps); + + bbox.set_xmin(-0.2); + bbox.set_ymin(-0.3); + bbox.set_xmax(1.3); + bbox.set_ymax(1.5); + ClipBBox(bbox, &clip_bbox); + EXPECT_NEAR(clip_bbox.xmin(), 0., eps); + EXPECT_NEAR(clip_bbox.ymin(), 0., eps); + EXPECT_NEAR(clip_bbox.xmax(), 1., eps); + EXPECT_NEAR(clip_bbox.ymax(), 1., eps); + EXPECT_NEAR(clip_bbox.size(), 1., eps); +} + +TEST_F(CPUBBoxUtilTest, TestOutputBBox) { + NormalizedBBox bbox; + bbox.set_xmin(-0.1); + bbox.set_ymin(0.3); + bbox.set_xmax(0.3); + bbox.set_ymax(0.5); + pair img_size(300, 500); + bool has_resize = false; + ResizeParameter resize_param; + resize_param.set_height(300); + resize_param.set_width(300); + NormalizedBBox out_bbox; + + OutputBBox(bbox, img_size, has_resize, resize_param, &out_bbox); + CHECK_EQ(out_bbox.xmin(), 0.); + CHECK_EQ(out_bbox.ymin(), 90.); + CHECK_EQ(out_bbox.xmax(), 150.); + CHECK_EQ(out_bbox.ymax(), 150.); + + has_resize = true; + resize_param.set_resize_mode(ResizeParameter_Resize_mode_WARP); + OutputBBox(bbox, img_size, has_resize, resize_param, &out_bbox); + CHECK_EQ(out_bbox.xmin(), 0.); + CHECK_EQ(out_bbox.ymin(), 90.); + CHECK_EQ(out_bbox.xmax(), 150.); + CHECK_EQ(out_bbox.ymax(), 150.); + + resize_param.set_resize_mode(ResizeParameter_Resize_mode_FIT_SMALL_SIZE); + OutputBBox(bbox, img_size, has_resize, resize_param, &out_bbox); + CHECK_EQ(out_bbox.xmin(), 0.); + CHECK_EQ(out_bbox.ymin(), 90.); + CHECK_EQ(out_bbox.xmax(), 150.); + CHECK_EQ(out_bbox.ymax(), 150.); + + resize_param.set_resize_mode(ResizeParameter_Resize_mode_FIT_SMALL_SIZE); + resize_param.set_height_scale(300); + resize_param.set_width_scale(300); + OutputBBox(bbox, img_size, has_resize, resize_param, &out_bbox); + CHECK_EQ(out_bbox.xmin(), 0.); + CHECK_EQ(out_bbox.ymin(), 90.); + CHECK_EQ(out_bbox.xmax(), 90.); + CHECK_EQ(out_bbox.ymax(), 150.); + + resize_param.set_resize_mode( + ResizeParameter_Resize_mode_FIT_LARGE_SIZE_AND_PAD); + OutputBBox(bbox, img_size, has_resize, resize_param, &out_bbox); + CHECK_EQ(out_bbox.xmin(), 0.); + CHECK_EQ(out_bbox.ymin(), 50.); + CHECK_EQ(out_bbox.xmax(), 150.); + CHECK_EQ(out_bbox.ymax(), 150.); + + img_size.first = 500; + img_size.second = 300; + OutputBBox(bbox, img_size, has_resize, resize_param, &out_bbox); + CHECK_EQ(out_bbox.xmin(), 0.); + CHECK_EQ(out_bbox.ymin(), 150.); + CHECK_EQ(out_bbox.xmax(), 50.); + CHECK_EQ(out_bbox.ymax(), 250.); +} + +TEST_F(CPUBBoxUtilTest, TestJaccardOverlap) { + NormalizedBBox bbox1; + bbox1.set_xmin(0.2); + bbox1.set_ymin(0.3); + bbox1.set_xmax(0.3); + bbox1.set_ymax(0.5); + + NormalizedBBox bbox2; + float overlap; + + // Partially overlapped. + bbox2.set_xmin(0.1); + bbox2.set_ymin(0.1); + bbox2.set_xmax(0.3); + bbox2.set_ymax(0.4); + overlap = JaccardOverlap(bbox1, bbox2); + EXPECT_NEAR(overlap, 1./7, eps); + + // Fully contain. + bbox2.set_xmin(0.1); + bbox2.set_ymin(0.1); + bbox2.set_xmax(0.4); + bbox2.set_ymax(0.6); + overlap = JaccardOverlap(bbox1, bbox2); + EXPECT_NEAR(overlap, 2./15, eps); + + // Outside. + bbox2.set_xmin(0); + bbox2.set_ymin(0); + bbox2.set_xmax(0.1); + bbox2.set_ymax(0.1); + overlap = JaccardOverlap(bbox1, bbox2); + EXPECT_NEAR(overlap, 0., eps); +} + +TEST_F(CPUBBoxUtilTest, TestEncodeBBoxCorner) { + NormalizedBBox prior_bbox; + prior_bbox.set_xmin(0.1); + prior_bbox.set_ymin(0.1); + prior_bbox.set_xmax(0.3); + prior_bbox.set_ymax(0.3); + vector prior_variance(4, 0.1); + + NormalizedBBox bbox; + bbox.set_xmin(0); + bbox.set_ymin(0.2); + bbox.set_xmax(0.4); + bbox.set_ymax(0.5); + + CodeType code_type = PriorBoxParameter_CodeType_CORNER; + NormalizedBBox encode_bbox; + + bool encode_variance_in_target = true; + EncodeBBox(prior_bbox, prior_variance, code_type, encode_variance_in_target, + bbox, &encode_bbox); + EXPECT_NEAR(encode_bbox.xmin(), -0.1, eps); + EXPECT_NEAR(encode_bbox.ymin(), 0.1, eps); + EXPECT_NEAR(encode_bbox.xmax(), 0.1, eps); + EXPECT_NEAR(encode_bbox.ymax(), 0.2, eps); + + encode_variance_in_target = false; + EncodeBBox(prior_bbox, prior_variance, code_type, encode_variance_in_target, + bbox, &encode_bbox); + EXPECT_NEAR(encode_bbox.xmin(), -1, eps); + EXPECT_NEAR(encode_bbox.ymin(), 1, eps); + EXPECT_NEAR(encode_bbox.xmax(), 1, eps); + EXPECT_NEAR(encode_bbox.ymax(), 2, eps); +} + +TEST_F(CPUBBoxUtilTest, TestEncodeBBoxCenterSize) { + NormalizedBBox prior_bbox; + prior_bbox.set_xmin(0.1); + prior_bbox.set_ymin(0.1); + prior_bbox.set_xmax(0.3); + prior_bbox.set_ymax(0.3); + vector prior_variance; + prior_variance.push_back(0.1); + prior_variance.push_back(0.1); + prior_variance.push_back(0.2); + prior_variance.push_back(0.2); + + NormalizedBBox bbox; + bbox.set_xmin(0); + bbox.set_ymin(0.2); + bbox.set_xmax(0.4); + bbox.set_ymax(0.5); + + CodeType code_type = PriorBoxParameter_CodeType_CENTER_SIZE; + NormalizedBBox encode_bbox; + + bool encode_variance_in_target = true; + EncodeBBox(prior_bbox, prior_variance, code_type, encode_variance_in_target, + bbox, &encode_bbox); + EXPECT_NEAR(encode_bbox.xmin(), 0, eps); + EXPECT_NEAR(encode_bbox.ymin(), 0.75, eps); + EXPECT_NEAR(encode_bbox.xmax(), log(2.), eps); + EXPECT_NEAR(encode_bbox.ymax(), log(3./2), eps); + + encode_variance_in_target = false; + EncodeBBox(prior_bbox, prior_variance, code_type, encode_variance_in_target, + bbox, &encode_bbox); + float eps = 1e-5; + EXPECT_NEAR(encode_bbox.xmin(), 0 / 0.1, eps); + EXPECT_NEAR(encode_bbox.ymin(), 0.75 / 0.1, eps); + EXPECT_NEAR(encode_bbox.xmax(), log(2.) / 0.2, eps); + EXPECT_NEAR(encode_bbox.ymax(), log(3./2) / 0.2, eps); +} + +TEST_F(CPUBBoxUtilTest, TestDecodeBBoxCorner) { + NormalizedBBox prior_bbox; + prior_bbox.set_xmin(0.1); + prior_bbox.set_ymin(0.1); + prior_bbox.set_xmax(0.3); + prior_bbox.set_ymax(0.3); + vector prior_variance(4, 0.1); + + NormalizedBBox bbox; + bbox.set_xmin(-1); + bbox.set_ymin(1); + bbox.set_xmax(1); + bbox.set_ymax(2); + + CodeType code_type = PriorBoxParameter_CodeType_CORNER; + NormalizedBBox decode_bbox; + + bool variance_encoded_in_target = false; + DecodeBBox(prior_bbox, prior_variance, code_type, variance_encoded_in_target, + false, bbox, &decode_bbox); + EXPECT_NEAR(decode_bbox.xmin(), 0, eps); + EXPECT_NEAR(decode_bbox.ymin(), 0.2, eps); + EXPECT_NEAR(decode_bbox.xmax(), 0.4, eps); + EXPECT_NEAR(decode_bbox.ymax(), 0.5, eps); + + variance_encoded_in_target = true; + DecodeBBox(prior_bbox, prior_variance, code_type, variance_encoded_in_target, + false, bbox, &decode_bbox); + EXPECT_NEAR(decode_bbox.xmin(), -0.9, eps); + EXPECT_NEAR(decode_bbox.ymin(), 1.1, eps); + EXPECT_NEAR(decode_bbox.xmax(), 1.3, eps); + EXPECT_NEAR(decode_bbox.ymax(), 2.3, eps); +} + +TEST_F(CPUBBoxUtilTest, TestDecodeBBoxCenterSize) { + NormalizedBBox prior_bbox; + prior_bbox.set_xmin(0.1); + prior_bbox.set_ymin(0.1); + prior_bbox.set_xmax(0.3); + prior_bbox.set_ymax(0.3); + vector prior_variance; + prior_variance.push_back(0.1); + prior_variance.push_back(0.1); + prior_variance.push_back(0.2); + prior_variance.push_back(0.2); + + NormalizedBBox bbox; + bbox.set_xmin(0); + bbox.set_ymin(0.75); + bbox.set_xmax(log(2)); + bbox.set_ymax(log(3./2)); + + CodeType code_type = PriorBoxParameter_CodeType_CENTER_SIZE; + NormalizedBBox decode_bbox; + + bool variance_encoded_in_target = true; + DecodeBBox(prior_bbox, prior_variance, code_type, variance_encoded_in_target, + false, bbox, &decode_bbox); + EXPECT_NEAR(decode_bbox.xmin(), 0, eps); + EXPECT_NEAR(decode_bbox.ymin(), 0.2, eps); + EXPECT_NEAR(decode_bbox.xmax(), 0.4, eps); + EXPECT_NEAR(decode_bbox.ymax(), 0.5, eps); + + bbox.set_xmin(0); + bbox.set_ymin(7.5); + bbox.set_xmax(log(2) * 5); + bbox.set_ymax(log(3./2) * 5); + variance_encoded_in_target = false; + DecodeBBox(prior_bbox, prior_variance, code_type, variance_encoded_in_target, + false, bbox, &decode_bbox); + EXPECT_NEAR(decode_bbox.xmin(), 0, eps); + EXPECT_NEAR(decode_bbox.ymin(), 0.2, eps); + EXPECT_NEAR(decode_bbox.xmax(), 0.4, eps); + EXPECT_NEAR(decode_bbox.ymax(), 0.5, eps); +} + +TEST_F(CPUBBoxUtilTest, TestDecodeBBoxesCorner) { + vector prior_bboxes; + vector > prior_variances; + vector bboxes; + for (int i = 1; i < 5; ++i) { + NormalizedBBox prior_bbox; + prior_bbox.set_xmin(0.1*i); + prior_bbox.set_ymin(0.1*i); + prior_bbox.set_xmax(0.1*i + 0.2); + prior_bbox.set_ymax(0.1*i + 0.2); + prior_bboxes.push_back(prior_bbox); + + vector prior_variance(4, 0.1); + prior_variances.push_back(prior_variance); + + NormalizedBBox bbox; + bbox.set_xmin(-1 * (i%2)); + bbox.set_ymin((i+1)%2); + bbox.set_xmax((i+1)%2); + bbox.set_ymax(i%2); + bboxes.push_back(bbox); + } + + CodeType code_type = PriorBoxParameter_CodeType_CORNER; + vector decode_bboxes; + + bool variance_encoded_in_target = false; + DecodeBBoxes(prior_bboxes, prior_variances, code_type, + variance_encoded_in_target, false, bboxes, &decode_bboxes); + EXPECT_EQ(decode_bboxes.size(), 4); + for (int i = 1; i < 5; ++i) { + EXPECT_NEAR(decode_bboxes[i-1].xmin(), 0.1*i + i%2 * -0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].ymin(), 0.1*i + (i+1)%2 * 0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].xmax(), 0.1*i + 0.2 + (i+1)%2 * 0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].ymax(), 0.1*i + 0.2 + i%2 * 0.1, eps); + } + + variance_encoded_in_target = true; + DecodeBBoxes(prior_bboxes, prior_variances, code_type, + variance_encoded_in_target, false, bboxes, &decode_bboxes); + EXPECT_EQ(decode_bboxes.size(), 4); + for (int i = 1; i < 5; ++i) { + EXPECT_NEAR(decode_bboxes[i-1].xmin(), 0.1*i + i%2 * -1, eps); + EXPECT_NEAR(decode_bboxes[i-1].ymin(), 0.1*i + (i+1)%2, eps); + EXPECT_NEAR(decode_bboxes[i-1].xmax(), 0.1*i + 0.2 + (i+1)%2, eps); + EXPECT_NEAR(decode_bboxes[i-1].ymax(), 0.1*i + 0.2 + i%2, eps); + } +} + +TEST_F(CPUBBoxUtilTest, TestDecodeBBoxesCenterSize) { + vector prior_bboxes; + vector > prior_variances; + vector bboxes; + for (int i = 1; i < 5; ++i) { + NormalizedBBox prior_bbox; + prior_bbox.set_xmin(0.1*i); + prior_bbox.set_ymin(0.1*i); + prior_bbox.set_xmax(0.1*i + 0.2); + prior_bbox.set_ymax(0.1*i + 0.2); + prior_bboxes.push_back(prior_bbox); + + vector prior_variance; + prior_variance.push_back(0.1); + prior_variance.push_back(0.1); + prior_variance.push_back(0.2); + prior_variance.push_back(0.2); + prior_variances.push_back(prior_variance); + + NormalizedBBox bbox; + bbox.set_xmin(0); + bbox.set_ymin(0.75); + bbox.set_xmax(log(2.)); + bbox.set_ymax(log(3./2)); + bboxes.push_back(bbox); + } + + CodeType code_type = PriorBoxParameter_CodeType_CENTER_SIZE; + vector decode_bboxes; + + bool variance_encoded_in_target = true; + DecodeBBoxes(prior_bboxes, prior_variances, code_type, + variance_encoded_in_target, false, bboxes, &decode_bboxes); + EXPECT_EQ(decode_bboxes.size(), 4); + float eps = 1e-5; + for (int i = 1; i < 5; ++i) { + EXPECT_NEAR(decode_bboxes[i-1].xmin(), 0 + (i - 1) * 0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].ymin(), 0.2 + (i - 1) * 0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].xmax(), 0.4 + (i - 1) * 0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].ymax(), 0.5 + (i - 1) * 0.1, eps); + } + + variance_encoded_in_target = false; + for (int i = 0; i < 4; ++i) { + NormalizedBBox bbox; + bboxes[i].set_xmin(0); + bboxes[i].set_ymin(7.5); + bboxes[i].set_xmax(log(2.) * 5); + bboxes[i].set_ymax(log(3./2) * 5); + } + DecodeBBoxes(prior_bboxes, prior_variances, code_type, + variance_encoded_in_target, false, bboxes, &decode_bboxes); + EXPECT_EQ(decode_bboxes.size(), 4); + for (int i = 1; i < 5; ++i) { + EXPECT_NEAR(decode_bboxes[i-1].xmin(), 0 + (i - 1) * 0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].ymin(), 0.2 + (i - 1) * 0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].xmax(), 0.4 + (i - 1) * 0.1, eps); + EXPECT_NEAR(decode_bboxes[i-1].ymax(), 0.5 + (i - 1) * 0.1, eps); + } +} + +TEST_F(CPUBBoxUtilTest, TestMatchBBoxLableOneBipartite) { + vector gt_bboxes; + vector pred_bboxes; + + FillBBoxes(>_bboxes, &pred_bboxes); + + int label = 1; + MatchType match_type = MultiBoxLossParameter_MatchType_BIPARTITE; + float overlap = -1; + + vector match_indices; + vector match_overlaps; + + MatchBBox(gt_bboxes, pred_bboxes, label, match_type, overlap, true, + &match_indices, &match_overlaps); + + EXPECT_EQ(match_indices.size(), 6); + EXPECT_EQ(match_overlaps.size(), 6); + + EXPECT_EQ(match_indices[0], 0); + EXPECT_EQ(match_indices[1], -1); + EXPECT_EQ(match_indices[2], -1); + EXPECT_NEAR(match_overlaps[0], 4./9, eps); + EXPECT_NEAR(match_overlaps[1], 2./6, eps); + EXPECT_NEAR(match_overlaps[2], 2./8, eps); + for (int i = 3; i < 6; ++i) { + EXPECT_EQ(match_indices[i], -1); + EXPECT_NEAR(match_overlaps[i], 0, eps); + } +} + +TEST_F(CPUBBoxUtilTest, TestMatchBBoxLableAllBipartite) { + vector gt_bboxes; + vector pred_bboxes; + + FillBBoxes(>_bboxes, &pred_bboxes); + + int label = -1; + MatchType match_type = MultiBoxLossParameter_MatchType_BIPARTITE; + float overlap = -1; + + vector match_indices; + vector match_overlaps; + + MatchBBox(gt_bboxes, pred_bboxes, label, match_type, overlap, true, + &match_indices, &match_overlaps); + + EXPECT_EQ(match_indices.size(), 6); + EXPECT_EQ(match_overlaps.size(), 6); + + EXPECT_EQ(match_indices[0], 0); + EXPECT_EQ(match_indices[3], 1); + EXPECT_NEAR(match_overlaps[0], 4./9, eps); + EXPECT_NEAR(match_overlaps[1], 2./6, eps); + EXPECT_NEAR(match_overlaps[2], 2./8, eps); + EXPECT_NEAR(match_overlaps[3], 4./8, eps); + EXPECT_NEAR(match_overlaps[4], 1./11, eps); + EXPECT_NEAR(match_overlaps[5], 0., eps); + for (int i = 0; i < 6; ++i) { + if (i == 0 || i == 3) { + continue; + } + EXPECT_EQ(match_indices[i], -1); + } +} + +TEST_F(CPUBBoxUtilTest, TestMatchBBoxLableOnePerPrediction) { + vector gt_bboxes; + vector pred_bboxes; + + FillBBoxes(>_bboxes, &pred_bboxes); + + int label = 1; + MatchType match_type = MultiBoxLossParameter_MatchType_PER_PREDICTION; + float overlap = 0.3; + + vector match_indices; + vector match_overlaps; + + MatchBBox(gt_bboxes, pred_bboxes, label, match_type, overlap, true, + &match_indices, &match_overlaps); + + EXPECT_EQ(match_indices.size(), 6); + EXPECT_EQ(match_overlaps.size(), 6); + + EXPECT_EQ(match_indices[0], 0); + EXPECT_EQ(match_indices[1], 0); + EXPECT_EQ(match_indices[2], -1); + EXPECT_NEAR(match_overlaps[0], 4./9, eps); + EXPECT_NEAR(match_overlaps[1], 2./6, eps); + EXPECT_NEAR(match_overlaps[2], 2./8, eps); + for (int i = 3; i < 6; ++i) { + EXPECT_EQ(match_indices[i], -1); + EXPECT_NEAR(match_overlaps[i], 0, eps); + } +} + +TEST_F(CPUBBoxUtilTest, TestMatchBBoxLableAllPerPrediction) { + vector gt_bboxes; + vector pred_bboxes; + + FillBBoxes(>_bboxes, &pred_bboxes); + + int label = -1; + MatchType match_type = MultiBoxLossParameter_MatchType_PER_PREDICTION; + float overlap = 0.3; + + vector match_indices; + vector match_overlaps; + + MatchBBox(gt_bboxes, pred_bboxes, label, match_type, overlap, true, + &match_indices, &match_overlaps); + + EXPECT_EQ(match_indices.size(), 6); + EXPECT_EQ(match_overlaps.size(), 6); + + EXPECT_EQ(match_indices[0], 0); + EXPECT_EQ(match_indices[1], 0); + EXPECT_EQ(match_indices[2], -1); + EXPECT_EQ(match_indices[3], 1); + EXPECT_EQ(match_indices[4], -1); + EXPECT_EQ(match_indices[5], -1); + EXPECT_NEAR(match_overlaps[0], 4./9, eps); + EXPECT_NEAR(match_overlaps[1], 2./6, eps); + EXPECT_NEAR(match_overlaps[2], 2./8, eps); + EXPECT_NEAR(match_overlaps[3], 4./8, eps); + EXPECT_NEAR(match_overlaps[4], 1./11, eps); + EXPECT_NEAR(match_overlaps[5], 0, eps); +} + +TEST_F(CPUBBoxUtilTest, TestMatchBBoxLableAllPerPredictionEx) { + vector gt_bboxes; + vector pred_bboxes; + + FillBBoxes(>_bboxes, &pred_bboxes); + + int label = -1; + MatchType match_type = MultiBoxLossParameter_MatchType_PER_PREDICTION; + float overlap = 0.001; + + vector match_indices; + vector match_overlaps; + + MatchBBox(gt_bboxes, pred_bboxes, label, match_type, overlap, true, + &match_indices, &match_overlaps); + + EXPECT_EQ(match_indices.size(), 6); + EXPECT_EQ(match_overlaps.size(), 6); + + EXPECT_EQ(match_indices[0], 0); + EXPECT_EQ(match_indices[1], 0); + EXPECT_EQ(match_indices[2], 0); + EXPECT_EQ(match_indices[3], 1); + EXPECT_EQ(match_indices[4], 1); + EXPECT_EQ(match_indices[5], -1); + EXPECT_NEAR(match_overlaps[0], 4./9, eps); + EXPECT_NEAR(match_overlaps[1], 2./6, eps); + EXPECT_NEAR(match_overlaps[2], 2./8, eps); + EXPECT_NEAR(match_overlaps[3], 4./8, eps); + EXPECT_NEAR(match_overlaps[4], 1./11, eps); + EXPECT_NEAR(match_overlaps[5], 0., eps); +} + +TEST_F(CPUBBoxUtilTest, TestGetGroundTruth) { + const int num_gt = 4; + Blob gt_blob(1, 1, num_gt, 8); + float* gt_data = gt_blob.mutable_cpu_data(); + for (int i = 0; i < 4; ++i) { + int image_id = ceil(i / 2.); + gt_data[i * 8] = image_id; + gt_data[i * 8 + 1] = i; + gt_data[i * 8 + 2] = 0; + gt_data[i * 8 + 3] = 0.1; + gt_data[i * 8 + 4] = 0.1; + gt_data[i * 8 + 5] = 0.3; + gt_data[i * 8 + 6] = 0.3; + gt_data[i * 8 + 7] = i % 2; + } + + map > all_gt_bboxes; + GetGroundTruth(gt_data, num_gt, -1, true, &all_gt_bboxes); + + EXPECT_EQ(all_gt_bboxes.size(), 3); + + EXPECT_EQ(all_gt_bboxes[0].size(), 1); + EXPECT_EQ(all_gt_bboxes[0][0].label(), 0); + EXPECT_NEAR(all_gt_bboxes[0][0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[0][0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[0][0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[0][0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[0][0].difficult(), false); + EXPECT_NEAR(all_gt_bboxes[0][0].size(), 0.04, eps); + + EXPECT_EQ(all_gt_bboxes[1].size(), 2); + for (int i = 1; i < 3; ++i) { + EXPECT_EQ(all_gt_bboxes[1][i-1].label(), i); + EXPECT_NEAR(all_gt_bboxes[1][i-1].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[1][i-1].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[1][i-1].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[1][i-1].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[1][i-1].difficult(), i % 2); + EXPECT_NEAR(all_gt_bboxes[1][i-1].size(), 0.04, eps); + } + + EXPECT_EQ(all_gt_bboxes[2].size(), 1); + EXPECT_EQ(all_gt_bboxes[2][0].label(), 3); + EXPECT_NEAR(all_gt_bboxes[2][0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[2][0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[2][0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[2][0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[2][0].difficult(), true); + EXPECT_NEAR(all_gt_bboxes[2][0].size(), 0.04, eps); + + // Skip difficult ground truth. + GetGroundTruth(gt_data, num_gt, -1, false, &all_gt_bboxes); + + EXPECT_EQ(all_gt_bboxes.size(), 2); + + EXPECT_EQ(all_gt_bboxes[0].size(), 1); + EXPECT_EQ(all_gt_bboxes[0][0].label(), 0); + EXPECT_NEAR(all_gt_bboxes[0][0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[0][0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[0][0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[0][0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[0][0].difficult(), false); + EXPECT_NEAR(all_gt_bboxes[0][0].size(), 0.04, eps); + + EXPECT_EQ(all_gt_bboxes[1].size(), 1); + EXPECT_EQ(all_gt_bboxes[1][0].label(), 2); + EXPECT_NEAR(all_gt_bboxes[1][0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[1][0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[1][0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[1][0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[1][0].difficult(), false); + EXPECT_NEAR(all_gt_bboxes[1][0].size(), 0.04, eps); +} + +TEST_F(CPUBBoxUtilTest, TestGetGroundTruthLabelBBox) { + const int num_gt = 4; + Blob gt_blob(1, 1, num_gt, 8); + float* gt_data = gt_blob.mutable_cpu_data(); + for (int i = 0; i < 4; ++i) { + int image_id = ceil(i / 2.); + gt_data[i * 8] = image_id; + gt_data[i * 8 + 1] = i; + gt_data[i * 8 + 2] = 0; + gt_data[i * 8 + 3] = 0.1; + gt_data[i * 8 + 4] = 0.1; + gt_data[i * 8 + 5] = 0.3; + gt_data[i * 8 + 6] = 0.3; + gt_data[i * 8 + 7] = i % 2; + } + + map all_gt_bboxes; + GetGroundTruth(gt_data, num_gt, -1, true, &all_gt_bboxes); + + EXPECT_EQ(all_gt_bboxes.size(), 3); + + EXPECT_EQ(all_gt_bboxes[0].size(), 1); + EXPECT_EQ(all_gt_bboxes[0].find(0)->first, 0); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[0].find(0)->second[0].difficult(), false); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].size(), 0.04, eps); + + EXPECT_EQ(all_gt_bboxes[1].size(), 2); + for (int i = 1; i < 3; ++i) { + EXPECT_EQ(all_gt_bboxes[1].find(i)->first, i); + EXPECT_NEAR(all_gt_bboxes[1].find(i)->second[0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[1].find(i)->second[0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[1].find(i)->second[0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[1].find(i)->second[0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[1].find(i)->second[0].difficult(), i % 2); + EXPECT_NEAR(all_gt_bboxes[1].find(i)->second[0].size(), 0.04, eps); + } + + EXPECT_EQ(all_gt_bboxes[2].size(), 1); + EXPECT_EQ(all_gt_bboxes[2].find(3)->first, 3); + EXPECT_NEAR(all_gt_bboxes[2].find(3)->second[0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[2].find(3)->second[0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[2].find(3)->second[0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[2].find(3)->second[0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[2].find(3)->second[0].difficult(), true); + EXPECT_NEAR(all_gt_bboxes[2].find(3)->second[0].size(), 0.04, eps); + + // Skip difficult ground truth. + GetGroundTruth(gt_data, num_gt, -1, false, &all_gt_bboxes); + + EXPECT_EQ(all_gt_bboxes.size(), 2); + + EXPECT_EQ(all_gt_bboxes[0].size(), 1); + EXPECT_EQ(all_gt_bboxes[0].find(0)->first, 0); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[0].find(0)->second[0].difficult(), false); + EXPECT_NEAR(all_gt_bboxes[0].find(0)->second[0].size(), 0.04, eps); + + EXPECT_EQ(all_gt_bboxes[1].size(), 1); + EXPECT_EQ(all_gt_bboxes[1].find(2)->first, 2); + EXPECT_NEAR(all_gt_bboxes[1].find(2)->second[0].xmin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[1].find(2)->second[0].ymin(), 0.1, eps); + EXPECT_NEAR(all_gt_bboxes[1].find(2)->second[0].xmax(), 0.3, eps); + EXPECT_NEAR(all_gt_bboxes[1].find(2)->second[0].ymax(), 0.3, eps); + EXPECT_EQ(all_gt_bboxes[1].find(2)->second[0].difficult(), false); + EXPECT_NEAR(all_gt_bboxes[1].find(2)->second[0].size(), 0.04, eps); +} + +TEST_F(CPUBBoxUtilTest, TestGetLocPredictionsShared) { + const int num = 2; + const int num_preds_per_class = 2; + const int num_loc_classes = 1; + const bool share_location = true; + const int dim = num_preds_per_class * num_loc_classes * 4; + Blob loc_blob(num, dim, 1, 1); + float* loc_data = loc_blob.mutable_cpu_data(); + for (int i = 0; i < num; ++i) { + for (int j = 0; j < num_preds_per_class; ++j) { + int start_idx = i * dim + j * 4; + loc_data[start_idx] = i * num_preds_per_class * 0.1 + j * 0.1; + loc_data[start_idx + 1] = i * num_preds_per_class * 0.1 + j * 0.1; + loc_data[start_idx + 2] = i * num_preds_per_class * 0.1 + j * 0.1 + 0.2; + loc_data[start_idx + 3] = i * num_preds_per_class * 0.1 + j * 0.1 + 0.2; + } + } + + vector all_loc_bboxes; + GetLocPredictions(loc_data, num, num_preds_per_class, num_loc_classes, + share_location, &all_loc_bboxes); + + EXPECT_EQ(all_loc_bboxes.size(), num); + + for (int i = 0; i < num; ++i) { + EXPECT_EQ(all_loc_bboxes[i].size(), 1); + LabelBBox::iterator it = all_loc_bboxes[i].begin(); + EXPECT_EQ(it->first, -1); + const vector& bboxes = it->second; + EXPECT_EQ(bboxes.size(), num_preds_per_class); + float start_value = i * num_preds_per_class * 0.1; + for (int j = 0; j < num_preds_per_class; ++j) { + EXPECT_EQ(bboxes[j].has_label(), false); + EXPECT_NEAR(bboxes[j].xmin(), start_value + j * 0.1, eps); + EXPECT_NEAR(bboxes[j].ymin(), start_value + j * 0.1, eps); + EXPECT_NEAR(bboxes[j].xmax(), start_value + j * 0.1 + 0.2, eps); + EXPECT_NEAR(bboxes[j].ymax(), start_value + j * 0.1 + 0.2, eps); + EXPECT_EQ(bboxes[j].has_size(), false); + } + } +} + +TEST_F(CPUBBoxUtilTest, TestGetLocPredictionsUnShared) { + const int num = 2; + const int num_preds_per_class = 2; + const int num_loc_classes = 2; + const bool share_location = false; + const int dim = num_preds_per_class * num_loc_classes * 4; + Blob loc_blob(num, dim, 1, 1); + float* loc_data = loc_blob.mutable_cpu_data(); + for (int i = 0; i < num; ++i) { + for (int j = 0; j < num_preds_per_class; ++j) { + float start_value = (i * num_preds_per_class + j) * num_loc_classes * 0.1; + for (int c = 0; c < num_loc_classes; ++c) { + int idx = ((i * num_preds_per_class + j) * num_loc_classes + c) * 4; + loc_data[idx] = start_value + c * 0.1; + loc_data[idx + 1] = start_value + c * 0.1; + loc_data[idx + 2] = start_value + c * 0.1 + 0.2; + loc_data[idx + 3] = start_value + c * 0.1 + 0.2; + } + } + } + + vector all_loc_bboxes; + GetLocPredictions(loc_data, num, num_preds_per_class, num_loc_classes, + share_location, &all_loc_bboxes); + + EXPECT_EQ(all_loc_bboxes.size(), num); + + for (int i = 0; i < num; ++i) { + EXPECT_EQ(all_loc_bboxes[i].size(), num_loc_classes); + for (int c = 0; c < num_loc_classes; ++c) { + LabelBBox::iterator it = all_loc_bboxes[i].find(c); + EXPECT_EQ(it->first, c); + const vector& bboxes = it->second; + EXPECT_EQ(bboxes.size(), num_preds_per_class); + for (int j = 0; j < num_preds_per_class; ++j) { + float start_value = + (i * num_preds_per_class + j) * num_loc_classes * 0.1; + EXPECT_EQ(bboxes[j].has_label(), false); + EXPECT_NEAR(bboxes[j].xmin(), start_value + c * 0.1, eps); + EXPECT_NEAR(bboxes[j].ymin(), start_value + c * 0.1, eps); + EXPECT_NEAR(bboxes[j].xmax(), start_value + c * 0.1 + 0.2, eps); + EXPECT_NEAR(bboxes[j].ymax(), start_value + c * 0.1 + 0.2, eps); + EXPECT_EQ(bboxes[j].has_size(), false); + } + } + } +} + +TEST_F(CPUBBoxUtilTest, TestGetConfidenceScores) { + const int num = 2; + const int num_preds_per_class = 2; + const int num_classes = 2; + const int dim = num_preds_per_class * num_classes; + Blob conf_blob(num, dim, 1, 1); + float* conf_data = conf_blob.mutable_cpu_data(); + for (int i = 0; i < num; ++i) { + for (int j = 0; j < num_preds_per_class; ++j) { + for (int c = 0; c < num_classes; ++c) { + int idx = (i * num_preds_per_class + j) * num_classes + c; + conf_data[idx] = idx * 0.1; + } + } + } + + vector > > all_conf_preds; + GetConfidenceScores(conf_data, num, num_preds_per_class, num_classes, + &all_conf_preds); + + EXPECT_EQ(all_conf_preds.size(), num); + + for (int i = 0; i < num; ++i) { + EXPECT_EQ(all_conf_preds[i].size(), num_classes); + for (int c = 0; c < num_classes; ++c) { + map >::iterator it = all_conf_preds[i].find(c); + EXPECT_EQ(it->first, c); + const vector& confidences = it->second; + EXPECT_EQ(confidences.size(), num_preds_per_class); + for (int j = 0; j < num_preds_per_class; ++j) { + int idx = (i * num_preds_per_class + j) * num_classes + c; + EXPECT_NEAR(confidences[j], idx * 0.1, eps); + } + } + } +} + +TEST_F(CPUBBoxUtilTest, TestComputeConfLoss) { + const int num = 2; + const int num_preds_per_class = 2; + const int num_classes = 2; + const int dim = num_preds_per_class * num_classes; + Blob conf_blob(num, dim, 1, 1); + float* conf_data = conf_blob.mutable_cpu_data(); + for (int i = 0; i < num; ++i) { + int sign = i % 2 ? 1 : -1; + for (int j = 0; j < num_preds_per_class; ++j) { + for (int c = 0; c < num_classes; ++c) { + int idx = (i * num_preds_per_class + j) * num_classes + c; + conf_data[idx] = sign * idx * 0.1; + } + } + } + + vector > all_conf_loss; + ConfLossType loss_type = MultiBoxLossParameter_ConfLossType_LOGISTIC; + ComputeConfLoss(conf_data, num, num_preds_per_class, num_classes, + -1, loss_type, &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + EXPECT_EQ(all_conf_loss[0].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[0][0], + -(log(exp(0.)/(1.+exp(0.))) + log(exp(0.1)/(1+exp(0.1)))), eps); + EXPECT_NEAR(all_conf_loss[0][1], + -(log(exp(0.2)/(1.+exp(0.2))) + log(exp(0.3)/(1+exp(0.3)))), eps); + EXPECT_EQ(all_conf_loss[1].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[1][0], + -(log(exp(-0.4)/(1.+exp(-0.4))) + log(exp(-0.5)/(1+exp(-0.5)))), + eps); + EXPECT_NEAR(all_conf_loss[1][1], + -(log(exp(-0.6)/(1.+exp(-0.6))) + log(exp(-0.7)/(1+exp(-0.7)))), + eps); + + ComputeConfLoss(conf_data, num, num_preds_per_class, num_classes, + 0, loss_type, &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + EXPECT_EQ(all_conf_loss[0].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[0][0], + -(log(1./(1.+exp(0.))) + log(exp(0.1)/(1+exp(0.1)))), eps); + EXPECT_NEAR(all_conf_loss[0][1], + -(log(1./(1.+exp(0.2))) + log(exp(0.3)/(1+exp(0.3)))), eps); + EXPECT_EQ(all_conf_loss[1].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[1][0], + -(log(1./(1.+exp(-0.4))) + log(exp(-0.5)/(1+exp(-0.5)))), eps); + EXPECT_NEAR(all_conf_loss[1][1], + -(log(1./(1.+exp(-0.6))) + log(exp(-0.7)/(1+exp(-0.7)))), eps); + + loss_type = MultiBoxLossParameter_ConfLossType_SOFTMAX; + ComputeConfLoss(conf_data, num, num_preds_per_class, num_classes, + 0, loss_type, &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + for (int i = 0; i < num; ++i) { + EXPECT_EQ(all_conf_loss[i].size(), num_preds_per_class); + int sign = i % 2 ? 1 : -1; + for (int j = 0; j < num_preds_per_class; ++j) { + if (sign == 1) { + EXPECT_NEAR(all_conf_loss[i][j], -log(exp(-0.1)/(1+exp(-0.1))), eps); + } else { + EXPECT_NEAR(all_conf_loss[i][j], -log(1./(1+exp(-0.1))), eps); + } + } + } +} + +TEST_F(CPUBBoxUtilTest, TestComputeConfLossMatch) { + const int num = 2; + const int num_preds_per_class = 2; + const int num_classes = 2; + const int dim = num_preds_per_class * num_classes; + Blob conf_blob(num, dim, 1, 1); + float* conf_data = conf_blob.mutable_cpu_data(); + vector > > all_match_indices; + map > all_gt_bboxes; + for (int i = 0; i < num; ++i) { + int sign = i % 2 ? 1 : -1; + for (int j = 0; j < num_preds_per_class; ++j) { + for (int c = 0; c < num_classes; ++c) { + int idx = (i * num_preds_per_class + j) * num_classes + c; + conf_data[idx] = sign * idx * 0.1; + } + } + map > match_indices; + vector indices(num_preds_per_class, -1); + match_indices[-1] = indices; + if (i == 1) { + NormalizedBBox gt_bbox; + gt_bbox.set_label(1); + all_gt_bboxes[i].push_back(gt_bbox); + // The first prior in second image is matched to a gt bbox of label 1. + match_indices[-1][0] = 0; + } + all_match_indices.push_back(match_indices); + } + + vector > all_conf_loss; + ConfLossType loss_type = MultiBoxLossParameter_ConfLossType_LOGISTIC; + ComputeConfLoss(conf_data, num, num_preds_per_class, num_classes, + -1, loss_type, all_match_indices, all_gt_bboxes, + &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + EXPECT_EQ(all_conf_loss[0].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[0][0], + -(log(exp(0.)/(1.+exp(0.))) + log(exp(0.1)/(1+exp(0.1)))), eps); + EXPECT_NEAR(all_conf_loss[0][1], + -(log(exp(0.2)/(1.+exp(0.2))) + log(exp(0.3)/(1+exp(0.3)))), eps); + EXPECT_EQ(all_conf_loss[1].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[1][0], + -(log(exp(-0.4)/(1.+exp(-0.4))) + log(1./(1+exp(-0.5)))), + eps); + EXPECT_NEAR(all_conf_loss[1][1], + -(log(exp(-0.6)/(1.+exp(-0.6))) + log(exp(-0.7)/(1+exp(-0.7)))), + eps); + + ComputeConfLoss(conf_data, num, num_preds_per_class, num_classes, + 0, loss_type, all_match_indices, all_gt_bboxes, + &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + EXPECT_EQ(all_conf_loss[0].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[0][0], + -(log(1./(1.+exp(0.))) + log(exp(0.1)/(1+exp(0.1)))), eps); + EXPECT_NEAR(all_conf_loss[0][1], + -(log(1./(1.+exp(0.2))) + log(exp(0.3)/(1+exp(0.3)))), eps); + EXPECT_EQ(all_conf_loss[1].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[1][0], + -(log(exp(-0.4)/(1.+exp(-0.4))) + log(1./(1+exp(-0.5)))), eps); + EXPECT_NEAR(all_conf_loss[1][1], + -(log(1./(1.+exp(-0.6))) + log(exp(-0.7)/(1+exp(-0.7)))), eps); + + loss_type = MultiBoxLossParameter_ConfLossType_SOFTMAX; + ComputeConfLoss(conf_data, num, num_preds_per_class, num_classes, + 0, loss_type, all_match_indices, all_gt_bboxes, + &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + for (int i = 0; i < num; ++i) { + EXPECT_EQ(all_conf_loss[i].size(), num_preds_per_class); + int sign = i % 2 ? 1 : -1; + for (int j = 0; j < num_preds_per_class; ++j) { + if (sign == 1) { + if (j == 0) { + EXPECT_NEAR(all_conf_loss[i][j], -log(1./(1+exp(-0.1))), eps); + } else { + EXPECT_NEAR(all_conf_loss[i][j], -log(exp(-0.1)/(1+exp(-0.1))), eps); + } + } else { + EXPECT_NEAR(all_conf_loss[i][j], -log(1./(1+exp(-0.1))), eps); + } + } + } +} + +TEST_F(CPUBBoxUtilTest, TestGetPriorBBoxes) { + const int num_channels = 2; + const int num_priors = 2; + const int dim = num_priors * 4; + Blob prior_blob(1, num_channels, dim, 1); + float* prior_data = prior_blob.mutable_cpu_data(); + for (int i = 0; i < num_priors; ++i) { + prior_data[i * 4] = i * 0.1; + prior_data[i * 4 + 1] = i * 0.1; + prior_data[i * 4 + 2] = i * 0.1 + 0.2; + prior_data[i * 4 + 3] = i * 0.1 + 0.1; + for (int j = 0; j < 4; ++j) { + prior_data[dim + i * 4 + j] = 0.1; + } + } + + vector prior_bboxes; + vector > prior_variances; + GetPriorBBoxes(prior_data, num_priors, &prior_bboxes, &prior_variances); + + EXPECT_EQ(prior_bboxes.size(), num_priors); + EXPECT_EQ(prior_variances.size(), num_priors); + + for (int i = 0; i < num_priors; ++i) { + EXPECT_NEAR(prior_bboxes[i].xmin(), i * 0.1, eps); + EXPECT_NEAR(prior_bboxes[i].ymin(), i * 0.1, eps); + EXPECT_NEAR(prior_bboxes[i].xmax(), i * 0.1 + 0.2, eps); + EXPECT_NEAR(prior_bboxes[i].ymax(), i * 0.1 + 0.1, eps); + EXPECT_EQ(prior_variances[i].size(), 4); + for (int j = 0; j < 4; ++j) { + EXPECT_NEAR(prior_variances[i][j], 0.1, eps); + } + } +} + +TEST_F(CPUBBoxUtilTest, TestGetDetectionResults) { + const int num = 4; + const int num_det = (1 + num) * num / 2; + Blob det_blob(1, 1, num_det, 7); + float* det_data = det_blob.mutable_cpu_data(); + int idx = 0; + for (int i = 0; i < num; ++i) { + int image_id = ceil(i / 2.); + for (int j = 0; j <= i; ++j) { + det_data[idx * 7] = image_id; + det_data[idx * 7 + 1] = i; + det_data[idx * 7 + 2] = 0; + det_data[idx * 7 + 3] = 0.1 + j * 0.1; + det_data[idx * 7 + 4] = 0.1 + j * 0.1; + det_data[idx * 7 + 5] = 0.3 + j * 0.1; + det_data[idx * 7 + 6] = 0.3 + j * 0.1; + ++idx; + } + } + CHECK_EQ(idx, num_det); + + map all_detections; + GetDetectionResults(det_data, num_det, -1, &all_detections); + + EXPECT_EQ(all_detections.size(), 3); + + EXPECT_EQ(all_detections[0].size(), 1); + EXPECT_EQ(all_detections[0].find(0)->first, 0); + EXPECT_EQ(all_detections[0].find(0)->second.size(), 1); + EXPECT_NEAR(all_detections[0].find(0)->second[0].xmin(), 0.1, eps); + EXPECT_NEAR(all_detections[0].find(0)->second[0].ymin(), 0.1, eps); + EXPECT_NEAR(all_detections[0].find(0)->second[0].xmax(), 0.3, eps); + EXPECT_NEAR(all_detections[0].find(0)->second[0].ymax(), 0.3, eps); + EXPECT_NEAR(all_detections[0].find(0)->second[0].size(), 0.04, eps); + + EXPECT_EQ(all_detections[1].size(), 2); + for (int i = 1; i < 3; ++i) { + EXPECT_EQ(all_detections[1].find(i)->first, i); + EXPECT_EQ(all_detections[1].find(i)->second.size(), i + 1); + for (int j = 0; j <= i; ++j) { + EXPECT_NEAR(all_detections[1].find(i)->second[j].xmin(), + 0.1 + j * 0.1, eps); + EXPECT_NEAR(all_detections[1].find(i)->second[j].ymin(), + 0.1 + j * 0.1, eps); + EXPECT_NEAR(all_detections[1].find(i)->second[j].xmax(), + 0.3 + j * 0.1, eps); + EXPECT_NEAR(all_detections[1].find(i)->second[j].ymax(), + 0.3 + j * 0.1, eps); + EXPECT_NEAR(all_detections[1].find(i)->second[j].size(), 0.04, eps); + } + } + + EXPECT_EQ(all_detections[2].size(), 1); + EXPECT_EQ(all_detections[2].find(3)->first, 3); + EXPECT_EQ(all_detections[2].find(3)->second.size(), 4); + for (int j = 0; j <= 3; ++j) { + EXPECT_NEAR(all_detections[2].find(3)->second[j].xmin(), + 0.1 + j * 0.1, eps); + EXPECT_NEAR(all_detections[2].find(3)->second[j].ymin(), + 0.1 + j * 0.1, eps); + EXPECT_NEAR(all_detections[2].find(3)->second[j].xmax(), + 0.3 + j * 0.1, eps); + EXPECT_NEAR(all_detections[2].find(3)->second[j].ymax(), + 0.3 + j * 0.1, eps); + EXPECT_NEAR(all_detections[2].find(3)->second[j].size(), 0.04, eps); + } +} + +TEST_F(CPUBBoxUtilTest, TestApplyNMS) { + vector bboxes; + vector scores; + float nms_threshold = 0.3; + int top_k = -1; + bool reuse_overlaps = false; + map > overlaps; + vector indices; + + // Fill in bboxes and confidences. + NormalizedBBox bbox; + bbox.set_xmin(0.1); + bbox.set_ymin(0.1); + bbox.set_xmax(0.3); + bbox.set_ymax(0.3); + bboxes.push_back(bbox); + scores.push_back(0.8); + + bbox.set_xmin(0.2); + bbox.set_ymin(0.1); + bbox.set_xmax(0.4); + bbox.set_ymax(0.3); + bboxes.push_back(bbox); + scores.push_back(0.7); + + bbox.set_xmin(0.2); + bbox.set_ymin(0.0); + bbox.set_xmax(0.4); + bbox.set_ymax(0.2); + bboxes.push_back(bbox); + scores.push_back(0.4); + + bbox.set_xmin(0.1); + bbox.set_ymin(0.2); + bbox.set_xmax(0.4); + bbox.set_ymax(0.4); + bboxes.push_back(bbox); + scores.push_back(0.5); + + ApplyNMS(bboxes, scores, nms_threshold, top_k, reuse_overlaps, &overlaps, + &indices); + + EXPECT_EQ(overlaps.size(), 0); // reuse_overlaps is false. + EXPECT_EQ(indices.size(), 3); + EXPECT_EQ(indices[0], 0); + EXPECT_EQ(indices[1], 3); + EXPECT_EQ(indices[2], 2); + + top_k = 2; + ApplyNMS(bboxes, scores, nms_threshold, top_k, reuse_overlaps, &overlaps, + &indices); + EXPECT_EQ(indices.size(), 1); + EXPECT_EQ(indices[0], 0); + + top_k = 3; + nms_threshold = 0.2; + ApplyNMS(bboxes, scores, nms_threshold, top_k, reuse_overlaps, &overlaps, + &indices); + EXPECT_EQ(indices.size(), 1); + EXPECT_EQ(indices[0], 0); + + reuse_overlaps = true; + ApplyNMS(bboxes, scores, nms_threshold, top_k, reuse_overlaps, &overlaps, + &indices); + EXPECT_EQ(overlaps.size(), 1); + EXPECT_NEAR(overlaps[0][1], 1./3, eps); + EXPECT_NEAR(overlaps[0][2], 0.0, eps); + EXPECT_NEAR(overlaps[0][3], 2./8, eps); + + map > old_overlaps = overlaps; + ApplyNMS(bboxes, scores, nms_threshold, top_k, reuse_overlaps, &overlaps, + &indices); + EXPECT_EQ(old_overlaps.size(), overlaps.size()); + for (int i = 1; i <= 3; ++i) { + EXPECT_NEAR(old_overlaps[0][i], overlaps[0][i], eps); + } +} + +TEST_F(CPUBBoxUtilTest, TestApplyNMSFast) { + vector bboxes; + vector scores; + float score_threshold = 0.; + float nms_threshold = 0.3; + float eta = 1.; + int top_k = -1; + vector indices; + + // Fill in bboxes and confidences. + NormalizedBBox bbox; + bbox.set_xmin(0.1); + bbox.set_ymin(0.1); + bbox.set_xmax(0.3); + bbox.set_ymax(0.3); + bboxes.push_back(bbox); + scores.push_back(0.8); + + bbox.set_xmin(0.2); + bbox.set_ymin(0.1); + bbox.set_xmax(0.4); + bbox.set_ymax(0.3); + bboxes.push_back(bbox); + scores.push_back(0.7); + + bbox.set_xmin(0.2); + bbox.set_ymin(0.0); + bbox.set_xmax(0.4); + bbox.set_ymax(0.2); + bboxes.push_back(bbox); + scores.push_back(0.4); + + bbox.set_xmin(0.1); + bbox.set_ymin(0.2); + bbox.set_xmax(0.4); + bbox.set_ymax(0.4); + bboxes.push_back(bbox); + scores.push_back(0.5); + + ApplyNMSFast(bboxes, scores, score_threshold, nms_threshold, eta, top_k, + &indices); + + EXPECT_EQ(indices.size(), 3); + EXPECT_EQ(indices[0], 0); + EXPECT_EQ(indices[1], 3); + EXPECT_EQ(indices[2], 2); + + top_k = 2; + ApplyNMSFast(bboxes, scores, score_threshold, nms_threshold, eta, top_k, + &indices); + EXPECT_EQ(indices.size(), 1); + EXPECT_EQ(indices[0], 0); + + top_k = 3; + nms_threshold = 0.2; + ApplyNMSFast(bboxes, scores, score_threshold, nms_threshold, eta, top_k, + &indices); + EXPECT_EQ(indices.size(), 1); + EXPECT_EQ(indices[0], 0); + + top_k = -1; + score_threshold = 0.5; + ApplyNMSFast(bboxes, scores, score_threshold, nms_threshold, eta, top_k, + &indices); + EXPECT_EQ(indices.size(), 1); + EXPECT_EQ(indices[0], 0); +} + +TEST_F(CPUBBoxUtilTest, TestCumSum) { + vector > pairs; + vector cumsum; + + pairs.push_back(std::make_pair(0.1, 0)); + pairs.push_back(std::make_pair(0.2, 1)); + pairs.push_back(std::make_pair(0.3, 0)); + + CumSum(pairs, &cumsum); + + EXPECT_EQ(cumsum.size(), 3); + EXPECT_EQ(cumsum[0], 0); + EXPECT_EQ(cumsum[1], 1); + EXPECT_EQ(cumsum[2], 1); +} + +TEST_F(CPUBBoxUtilTest, TestComputeAP) { + vector > tp; + vector > fp; + + tp.push_back(std::make_pair(1.0, 0)); + tp.push_back(std::make_pair(1.0, 1)); + tp.push_back(std::make_pair(0.9, 1)); + tp.push_back(std::make_pair(0.9, 0)); + tp.push_back(std::make_pair(0.8, 1)); + tp.push_back(std::make_pair(0.7, 0)); + tp.push_back(std::make_pair(0.7, 1)); + tp.push_back(std::make_pair(0.6, 0)); + tp.push_back(std::make_pair(0.5, 0)); + tp.push_back(std::make_pair(0.4, 0)); + tp.push_back(std::make_pair(0.4, 1)); + + fp.push_back(std::make_pair(1.0, 1)); + fp.push_back(std::make_pair(1.0, 0)); + fp.push_back(std::make_pair(0.9, 0)); + fp.push_back(std::make_pair(0.9, 1)); + fp.push_back(std::make_pair(0.8, 0)); + fp.push_back(std::make_pair(0.7, 1)); + fp.push_back(std::make_pair(0.7, 0)); + fp.push_back(std::make_pair(0.6, 1)); + fp.push_back(std::make_pair(0.5, 1)); + fp.push_back(std::make_pair(0.4, 1)); + fp.push_back(std::make_pair(0.4, 0)); + + float eps = 1e-5; + vector prec, rec; + float ap; + + ComputeAP(tp, 5, fp, "Integral", &prec, &rec, &ap); + + EXPECT_NEAR(ap, 0.558528, eps); + + EXPECT_EQ(prec.size(), 11); + EXPECT_NEAR(prec[0], 0.0/1.0, eps); + EXPECT_NEAR(prec[1], 1.0/2.0, eps); + EXPECT_NEAR(prec[2], 2.0/3.0, eps); + EXPECT_NEAR(prec[3], 2.0/4.0, eps); + EXPECT_NEAR(prec[4], 3.0/5.0, eps); + EXPECT_NEAR(prec[5], 3.0/6.0, eps); + EXPECT_NEAR(prec[6], 4.0/7.0, eps); + EXPECT_NEAR(prec[7], 4.0/8.0, eps); + EXPECT_NEAR(prec[8], 4.0/9.0, eps); + EXPECT_NEAR(prec[9], 4.0/10.0, eps); + EXPECT_NEAR(prec[10], 5.0/11.0, eps); + + EXPECT_EQ(rec.size(), 11); + EXPECT_NEAR(rec[0], 0.0, eps); + EXPECT_NEAR(rec[1], 0.2, eps); + EXPECT_NEAR(rec[2], 0.4, eps); + EXPECT_NEAR(rec[3], 0.4, eps); + EXPECT_NEAR(rec[4], 0.6, eps); + EXPECT_NEAR(rec[5], 0.6, eps); + EXPECT_NEAR(rec[6], 0.8, eps); + EXPECT_NEAR(rec[7], 0.8, eps); + EXPECT_NEAR(rec[8], 0.8, eps); + EXPECT_NEAR(rec[9], 0.8, eps); + EXPECT_NEAR(rec[10], 1.0, eps); + + vector prec_old = prec; + vector rec_old = rec; + ComputeAP(tp, 5, fp, "MaxIntegral", &prec, &rec, &ap); + + EXPECT_NEAR(ap, 0.591861, eps); + EXPECT_EQ(prec.size(), 11); + EXPECT_EQ(rec.size(), 11); + for (int i = 0; i < 11; ++i) { + EXPECT_NEAR(prec_old[i], prec[i], eps); + EXPECT_NEAR(rec_old[i], rec[i], eps); + } + + ComputeAP(tp, 5, fp, "11point", &prec, &rec, &ap); + + EXPECT_NEAR(ap, 0.598662, eps); + EXPECT_EQ(prec.size(), 11); + EXPECT_EQ(rec.size(), 11); + for (int i = 0; i < 11; ++i) { + EXPECT_NEAR(prec_old[i], prec[i], eps); + EXPECT_NEAR(rec_old[i], rec[i], eps); + } + + // Cut the last 4 predictions. + tp.resize(7); + fp.resize(7); + + ComputeAP(tp, 5, fp, "Integral", &prec, &rec, &ap); + + EXPECT_NEAR(ap, 0.558528 - prec_old.back() * 0.2, eps); + EXPECT_EQ(prec.size(), 7); + EXPECT_EQ(rec.size(), 7); + for (int i = 0; i < 7; ++i) { + EXPECT_NEAR(prec_old[i], prec[i], eps); + EXPECT_NEAR(rec_old[i], rec[i], eps); + } + + ComputeAP(tp, 5, fp, "MaxIntegral", &prec, &rec, &ap); + + EXPECT_NEAR(ap, 0.591861 - prec_old.back() * 0.2, eps); + EXPECT_EQ(prec.size(), 7); + EXPECT_EQ(rec.size(), 7); + for (int i = 0; i < 7; ++i) { + EXPECT_NEAR(prec_old[i], prec[i], eps); + EXPECT_NEAR(rec_old[i], rec[i], eps); + } + + ComputeAP(tp, 5, fp, "11point", &prec, &rec, &ap); + + EXPECT_NEAR(ap, 0.598662 - prec_old.back() * 2 / 11., eps); + EXPECT_EQ(prec.size(), 7); + EXPECT_EQ(rec.size(), 7); + for (int i = 0; i < 7; ++i) { + EXPECT_NEAR(prec_old[i], prec[i], eps); + EXPECT_NEAR(rec_old[i], rec[i], eps); + } +} + +#ifndef CPU_ONLY +template +void FillBBoxes(Dtype* gt_bboxes, Dtype* pred_bboxes) { +} + +template +class GPUBBoxUtilTest : public BBoxUtilTest > { +}; + +TYPED_TEST_CASE(GPUBBoxUtilTest, TestDtypes); + +TYPED_TEST(GPUBBoxUtilTest, TestBBoxSize) { + float size; + Blob bbox(1, 1, 1, 4); + TypeParam* bbox_data = bbox.mutable_cpu_data(); + + // Valid box. + bbox_data[0] = 0.2; + bbox_data[1] = 0.3; + bbox_data[2] = 0.3; + bbox_data[3] = 0.5; + size = BBoxSizeGPU(bbox_data); + EXPECT_NEAR(size, 0.02, eps); + + // A line. + bbox_data[2] = 0.2; + size = BBoxSizeGPU(bbox_data); + EXPECT_NEAR(size, 0., eps); + + // Invalid box. + bbox_data[2] = 0.1; + size = BBoxSizeGPU(bbox_data); + EXPECT_NEAR(size, 0., eps); +} + +TYPED_TEST(GPUBBoxUtilTest, TestJaccardOverlap) { + float overlap; + Blob bbox1(1, 1, 1, 4); + TypeParam* bbox1_data = bbox1.mutable_cpu_data(); + bbox1_data[0] = 0.2; + bbox1_data[1] = 0.3; + bbox1_data[2] = 0.3; + bbox1_data[3] = 0.5; + + Blob bbox2(1, 1, 1, 4); + TypeParam* bbox2_data = bbox2.mutable_cpu_data(); + + // Partially overlapped. + bbox2_data[0] = 0.1; + bbox2_data[1] = 0.1; + bbox2_data[2] = 0.3; + bbox2_data[3] = 0.4; + overlap = JaccardOverlapGPU(bbox1_data, bbox2_data); + EXPECT_NEAR(overlap, 1./7, eps); + + // Fully contain. + bbox2_data[0] = 0.1; + bbox2_data[1] = 0.1; + bbox2_data[2] = 0.4; + bbox2_data[3] = 0.6; + overlap = JaccardOverlapGPU(bbox1_data, bbox2_data); + EXPECT_NEAR(overlap, 2./15, eps); + + // Outside. + bbox2_data[0] = 0.; + bbox2_data[1] = 0.; + bbox2_data[2] = 0.1; + bbox2_data[3] = 0.1; + overlap = JaccardOverlapGPU(bbox1_data, bbox2_data); + EXPECT_NEAR(overlap, 0., eps); +} + +TYPED_TEST(GPUBBoxUtilTest, TestDecodeBBoxesCorner) { + int num = 4; + Blob prior_bboxes(1, 2, num * 4, 1); + TypeParam* prior_data = prior_bboxes.mutable_cpu_data(); + Blob loc_preds(1, num * 4, 1, 1); + TypeParam* loc_data = loc_preds.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + prior_data[(i - 1) * 4] = 0.1 * i; + prior_data[(i - 1) * 4 + 1] = 0.1 * i; + prior_data[(i - 1) * 4 + 2] = 0.1 * i + 0.2; + prior_data[(i - 1) * 4 + 3] = 0.1 * i + 0.2; + for (int j = 0; j < 4; ++j) { + prior_data[num * 4 + (i - 1) * 4 + j] = 0.1; + } + + loc_data[(i - 1) * 4] = -1 * (i % 2); + loc_data[(i - 1) * 4 + 1] = ((i + 1) % 2); + loc_data[(i - 1) * 4 + 2] = ((i + 1) % 2); + loc_data[(i - 1) * 4 + 3] = i % 2; + } + + CodeType code_type = PriorBoxParameter_CodeType_CORNER; + Blob bboxes(1, num * 4, 1, 1); + TypeParam* bbox_data = bboxes.mutable_gpu_data(); + + bool variance_encoded_in_target = false; + DecodeBBoxesGPU(num * 4, loc_data, prior_data, code_type, + variance_encoded_in_target, num, false, 1, -1, false, + bbox_data); + TypeParam* bbox_cpu_data = bboxes.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4], 0.1*i + i%2 * -0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 1], 0.1*i + (i+1)%2 * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 2], + 0.1*i + 0.2 + (i+1)%2 * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 3], 0.1*i + 0.2 + i%2 * 0.1, eps); + } + + variance_encoded_in_target = true; + bbox_data = bboxes.mutable_gpu_data(); + DecodeBBoxesGPU(num * 4, loc_data, prior_data, code_type, + variance_encoded_in_target, num, false, 1, -1, false, + bbox_data); + bbox_cpu_data = bboxes.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4], 0.1*i + i%2 * -1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 1], 0.1*i + (i+1)%2, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 2], 0.1*i + 0.2 + (i+1)%2, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 3], 0.1*i + 0.2 + i%2, eps); + } +} + +TYPED_TEST(GPUBBoxUtilTest, TestDecodeBBoxesCornerTwoClasses) { + int num = 4; + int num_loc_classes = 2; + Blob prior_bboxes(1, 2, num * 4, 1); + TypeParam* prior_data = prior_bboxes.mutable_cpu_data(); + Blob loc_preds(1, num * num_loc_classes * 4, 1, 1); + TypeParam* loc_data = loc_preds.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + prior_data[(i - 1) * 4] = 0.1 * i; + prior_data[(i - 1) * 4 + 1] = 0.1 * i; + prior_data[(i - 1) * 4 + 2] = 0.1 * i + 0.2; + prior_data[(i - 1) * 4 + 3] = 0.1 * i + 0.2; + for (int j = 0; j < 4; ++j) { + prior_data[num * 4 + (i - 1) * 4 + j] = 0.1; + } + + for (int j = 0; j < num_loc_classes; ++j) { + loc_data[((i - 1) * 2 + j) * 4] = -1 * (i % 2) * (2 - j); + loc_data[((i - 1) * 2 + j) * 4 + 1] = ((i + 1) % 2) * (2 - j); + loc_data[((i - 1) * 2 + j) * 4 + 2] = ((i + 1) % 2) * (2 - j); + loc_data[((i - 1) * 2 + j) * 4 + 3] = i % 2 * (2 - j); + } + } + + CodeType code_type = PriorBoxParameter_CodeType_CORNER; + Blob bboxes(1, num * num_loc_classes * 4, 1, 1); + TypeParam* bbox_data = bboxes.mutable_gpu_data(); + + bool variance_encoded_in_target = false; + DecodeBBoxesGPU(num * num_loc_classes * 4, loc_data, prior_data, code_type, + variance_encoded_in_target, num, false, num_loc_classes, -1, + false, bbox_data); + TypeParam* bbox_cpu_data = bboxes.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + for (int j = 0; j < num_loc_classes; ++j) { + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4], + 0.1*i + i%2 * (2-j) * -0.1, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 1], + 0.1*i + (i+1)%2 * (2-j) * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 2], + 0.1*i + 0.2 + (i+1)%2 * (2-j) * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 3], + 0.1*i + 0.2 + i%2 * (2-j) * 0.1, eps); + } + } + + variance_encoded_in_target = true; + bbox_data = bboxes.mutable_gpu_data(); + DecodeBBoxesGPU(num * num_loc_classes * 4, loc_data, prior_data, code_type, + variance_encoded_in_target, num, false, num_loc_classes, -1, + false, bbox_data); + bbox_cpu_data = bboxes.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + for (int j = 0; j < num_loc_classes; ++j) { + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4], + 0.1*i + i%2 * (2-j) * -1, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 1], + 0.1*i + (i+1)%2 * (2-j), eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 2], + 0.1*i + 0.2 + (i+1)%2 * (2-j), eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 3], + 0.1*i + 0.2 + i%2 * (2-j), eps); + } + } +} + +TYPED_TEST(GPUBBoxUtilTest, TestDecodeBBoxesCornerTwoClassesNegClass0) { + int num = 4; + int num_loc_classes = 2; + Blob prior_bboxes(1, 2, num * 4, 1); + TypeParam* prior_data = prior_bboxes.mutable_cpu_data(); + Blob loc_preds(1, num * num_loc_classes * 4, 1, 1); + TypeParam* loc_data = loc_preds.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + prior_data[(i - 1) * 4] = 0.1 * i; + prior_data[(i - 1) * 4 + 1] = 0.1 * i; + prior_data[(i - 1) * 4 + 2] = 0.1 * i + 0.2; + prior_data[(i - 1) * 4 + 3] = 0.1 * i + 0.2; + for (int j = 0; j < 4; ++j) { + prior_data[num * 4 + (i - 1) * 4 + j] = 0.1; + } + + for (int j = 0; j < num_loc_classes; ++j) { + loc_data[((i - 1) * 2 + j) * 4] = -1 * (i % 2) * (2 - j); + loc_data[((i - 1) * 2 + j) * 4 + 1] = ((i + 1) % 2) * (2 - j); + loc_data[((i - 1) * 2 + j) * 4 + 2] = ((i + 1) % 2) * (2 - j); + loc_data[((i - 1) * 2 + j) * 4 + 3] = i % 2 * (2 - j); + } + } + + CodeType code_type = PriorBoxParameter_CodeType_CORNER; + Blob bboxes(1, num * num_loc_classes * 4, 1, 1); + TypeParam* bbox_data = bboxes.mutable_gpu_data(); + + bool variance_encoded_in_target = false; + DecodeBBoxesGPU(num * num_loc_classes * 4, loc_data, prior_data, code_type, + variance_encoded_in_target, num, false, num_loc_classes, 0, + false, bbox_data); + TypeParam* bbox_cpu_data = bboxes.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + for (int j = 0; j < num_loc_classes; ++j) { + if (j == 0) { + for (int k = 0; k < 4; ++k) { + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 2 * 4 + k], 0., eps); + } + } else { + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4], + 0.1*i + i%2 * -0.1, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 1], + 0.1*i + (i+1)%2 * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 2], + 0.1*i + 0.2 + (i+1)%2 * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 3], + 0.1*i + 0.2 + i%2 * 0.1, eps); + } + } + } + + variance_encoded_in_target = true; + bbox_data = bboxes.mutable_gpu_data(); + DecodeBBoxesGPU(num * num_loc_classes * 4, loc_data, prior_data, code_type, + variance_encoded_in_target, num, false, num_loc_classes, 0, + false, bbox_data); + bbox_cpu_data = bboxes.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + for (int j = 0; j < num_loc_classes; ++j) { + if (j == 0) { + for (int k = 0; k < 4; ++k) { + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 2 * 4 + k], 0., eps); + } + } else { + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4], + 0.1*i + i%2 * -1, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 1], + 0.1*i + (i+1)%2, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 2], + 0.1*i + 0.2 + (i+1)%2, eps); + EXPECT_NEAR(bbox_cpu_data[((i - 1) * 2 + j) * 4 + 3], + 0.1*i + 0.2 + i%2, eps); + } + } + } +} + +TYPED_TEST(GPUBBoxUtilTest, TestDecodeBBoxesCenterSize) { + int num = 2; + Blob prior_bboxes(1, 2, num * 4, 1); + TypeParam* prior_data = prior_bboxes.mutable_cpu_data(); + Blob loc_preds(1, num * 4, 1, 1); + TypeParam* loc_data = loc_preds.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + prior_data[(i - 1) * 4] = 0.1 * i; + prior_data[(i - 1) * 4 + 1] = 0.1 * i; + prior_data[(i - 1) * 4 + 2] = 0.1 * i + 0.2; + prior_data[(i - 1) * 4 + 3] = 0.1 * i + 0.2; + prior_data[num * 4 + (i - 1) * 4] = 0.1; + prior_data[num * 4 + (i - 1) * 4 + 1] = 0.1; + prior_data[num * 4 + (i - 1) * 4 + 2] = 0.2; + prior_data[num * 4 + (i - 1) * 4 + 3] = 0.2; + + loc_data[(i - 1) * 4] = 0; + loc_data[(i - 1) * 4 + 1] = 0.75; + loc_data[(i - 1) * 4 + 2] = log(2.); + loc_data[(i - 1) * 4 + 3] = log(3./2); + } + + CodeType code_type = PriorBoxParameter_CodeType_CENTER_SIZE; + Blob bboxes(1, num * 4, 1, 1); + TypeParam* bbox_data = bboxes.mutable_gpu_data(); + + bool variance_encoded_in_target = true; + DecodeBBoxesGPU(num * 4, loc_data, prior_data, code_type, + variance_encoded_in_target, num, false, 1, -1, false, + bbox_data); + TypeParam* bbox_cpu_data = bboxes.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4], 0 + (i-1) * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 1], 0.2 + (i-1) * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 2], 0.4 + (i-1) * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 3], 0.5 + (i-1) * 0.1, eps); + } + + variance_encoded_in_target = false; + for (int i = 1; i <= num; ++i) { + loc_data[(i - 1) * 4] = 0; + loc_data[(i - 1) * 4 + 1] = 7.5; + loc_data[(i - 1) * 4 + 2] = log(2.) * 5; + loc_data[(i - 1) * 4 + 3] = log(3./2) * 5; + } + bbox_data = bboxes.mutable_gpu_data(); + DecodeBBoxesGPU(num * 4, loc_data, prior_data, code_type, + variance_encoded_in_target, num, false, 1, -1, false, + bbox_data); + bbox_cpu_data = bboxes.mutable_cpu_data(); + for (int i = 1; i <= num; ++i) { + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4], 0 + (i-1) * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 1], 0.2 + (i-1) * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 2], 0.4 + (i-1) * 0.1, eps); + EXPECT_NEAR(bbox_cpu_data[(i - 1) * 4 + 3], 0.5 + (i-1) * 0.1, eps); + } +} + +TYPED_TEST(GPUBBoxUtilTest, TestComputeOverlapped) { + const int num = 2; + const int num_bboxes = 2; + const int num_loc_classes = 1; + const TypeParam overlap_threshold = 0.3; + + // Fill bboxes. + Blob bboxes(num, num_bboxes * num_loc_classes * 4, 1, 1); + TypeParam* bbox_data = bboxes.mutable_cpu_data(); + // image1 + // bbox1 + bbox_data[0] = 0.1; + bbox_data[1] = 0.1; + bbox_data[2] = 0.3; + bbox_data[3] = 0.3; + // bbox2 + bbox_data[4] = 0.2; + bbox_data[5] = 0.1; + bbox_data[6] = 0.4; + bbox_data[7] = 0.3; + // image2 + // bbox1 + bbox_data[8] = 0.2; + bbox_data[9] = 0.0; + bbox_data[10] = 0.4; + bbox_data[11] = 0.2; + // bbox2 + bbox_data[12] = 0.2; + bbox_data[13] = 0.1; + bbox_data[14] = 0.4; + bbox_data[15] = 0.3; + + Blob overlapped(num, num_loc_classes, num_bboxes, num_bboxes); + const int total_bboxes = overlapped.count(); + bool* overlapped_data = overlapped.mutable_gpu_data(); + ComputeOverlappedGPU(total_bboxes, bbox_data, num_bboxes, num_loc_classes, + overlap_threshold, overlapped_data); + const bool* overlapped_cpu_data = overlapped.cpu_data(); + // image1 + // bbox1 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[0], 0); + EXPECT_EQ(overlapped_cpu_data[1], 1); + // bbox2 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[2], 1); + EXPECT_EQ(overlapped_cpu_data[3], 0); + // image2 + // bbox1 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[4], 0); + EXPECT_EQ(overlapped_cpu_data[5], 1); + // bbox2 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[6], 1); + EXPECT_EQ(overlapped_cpu_data[7], 0); +} + +TYPED_TEST(GPUBBoxUtilTest, TestComputeOverlappedMultiClass) { + const int num = 2; + const int num_bboxes = 2; + const int num_loc_classes = 2; + const TypeParam overlap_threshold = 0.3; + + // Fill bboxes. + Blob bboxes(num, num_bboxes * num_loc_classes * 4, 1, 1); + TypeParam* bbox_data = bboxes.mutable_cpu_data(); + // image1 + // bbox1 + // class1 + bbox_data[0] = 0.1; + bbox_data[1] = 0.1; + bbox_data[2] = 0.3; + bbox_data[3] = 0.3; + // class2 + bbox_data[4] = 0.0; + bbox_data[5] = 0.1; + bbox_data[6] = 0.2; + bbox_data[7] = 0.3; + // bbox2 + // class1 + bbox_data[8] = 0.2; + bbox_data[9] = 0.1; + bbox_data[10] = 0.4; + bbox_data[11] = 0.3; + // class2 + bbox_data[12] = 0.2; + bbox_data[13] = 0.1; + bbox_data[14] = 0.4; + bbox_data[15] = 0.3; + // image2 + // bbox1 + // class1 + bbox_data[16] = 0.2; + bbox_data[17] = 0.0; + bbox_data[18] = 0.4; + bbox_data[19] = 0.2; + // class2 + bbox_data[20] = 0.2; + bbox_data[21] = 0.1; + bbox_data[22] = 0.4; + bbox_data[23] = 0.3; + // bbox2 + // class1 + bbox_data[24] = 0.1; + bbox_data[25] = 0.1; + bbox_data[26] = 0.3; + bbox_data[27] = 0.3; + // class2 + bbox_data[28] = 0.1; + bbox_data[29] = 0.1; + bbox_data[30] = 0.3; + bbox_data[31] = 0.3; + + Blob overlapped(num, num_loc_classes, num_bboxes, num_bboxes); + const int total_bboxes = overlapped.count(); + bool* overlapped_data = overlapped.mutable_gpu_data(); + ComputeOverlappedGPU(total_bboxes, bbox_data, num_bboxes, num_loc_classes, + overlap_threshold, overlapped_data); + const bool* overlapped_cpu_data = overlapped.cpu_data(); + // image1 + // class1 + // bbox1 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[0], 0); + EXPECT_EQ(overlapped_cpu_data[1], 1); + // bbox2 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[2], 1); + EXPECT_EQ(overlapped_cpu_data[3], 0); + // class2 + // bbox1 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[4], 0); + EXPECT_EQ(overlapped_cpu_data[5], 0); + // bbox2 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[6], 0); + EXPECT_EQ(overlapped_cpu_data[7], 0); + // image2 + // class1 + // bbox1 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[8], 0); + EXPECT_EQ(overlapped_cpu_data[9], 0); + // bbox2 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[10], 0); + EXPECT_EQ(overlapped_cpu_data[11], 0); + // class2 + // bbox1 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[12], 0); + EXPECT_EQ(overlapped_cpu_data[13], 1); + // bbox2 with all other bboxes + EXPECT_EQ(overlapped_cpu_data[14], 1); + EXPECT_EQ(overlapped_cpu_data[15], 0); +} + +TYPED_TEST(GPUBBoxUtilTest, TestSoftMaxGPU) { + const int num = 2; + const int num_preds = 2; + const int num_classes = 2; + Blob data_blob(num, num_preds * num_classes, 1, 1); + Blob prob_blob(num, num_preds * num_classes, 1, 1); + TypeParam* cpu_data = data_blob.mutable_cpu_data(); + cpu_data[0] = 0.1; + cpu_data[1] = 0.9; + cpu_data[2] = 0.9; + cpu_data[3] = 0.1; + cpu_data[4] = 0.3; + cpu_data[5] = 0.7; + cpu_data[6] = 0.7; + cpu_data[7] = 0.3; + + const TypeParam* gpu_data = data_blob.gpu_data(); + TypeParam* gpu_prob = prob_blob.mutable_gpu_data(); + SoftMaxGPU(gpu_data, num * num_preds, num_classes, 1, gpu_prob); + + const TypeParam* cpu_prob = prob_blob.cpu_data(); + EXPECT_NEAR(cpu_prob[0], exp(-0.8) / (exp(-0.8) + 1), eps); + EXPECT_NEAR(cpu_prob[1], 1 / (exp(-0.8) + 1), eps); + EXPECT_NEAR(cpu_prob[2], 1 / (exp(-0.8) + 1), eps); + EXPECT_NEAR(cpu_prob[3], exp(-0.8) / (exp(-0.8) + 1), eps); + EXPECT_NEAR(cpu_prob[4], exp(-0.4) / (exp(-0.4) + 1), eps); + EXPECT_NEAR(cpu_prob[5], 1 / (exp(-0.4) + 1), eps); + EXPECT_NEAR(cpu_prob[6], 1 / (exp(-0.4) + 1), eps); + EXPECT_NEAR(cpu_prob[7], exp(-0.4) / (exp(-0.4) + 1), eps); +} + +TYPED_TEST(GPUBBoxUtilTest, TestComputeConfLossMatchGPU) { + const int num = 2; + const int num_preds_per_class = 2; + const int num_classes = 2; + const int dim = num_preds_per_class * num_classes; + Blob conf_blob(num, dim, 1, 1); + TypeParam* conf_data = conf_blob.mutable_cpu_data(); + vector > > all_match_indices; + map > all_gt_bboxes; + for (int i = 0; i < num; ++i) { + int sign = i % 2 ? 1 : -1; + for (int j = 0; j < num_preds_per_class; ++j) { + for (int c = 0; c < num_classes; ++c) { + int idx = (i * num_preds_per_class + j) * num_classes + c; + conf_data[idx] = sign * idx * 0.1; + } + } + map > match_indices; + vector indices(num_preds_per_class, -1); + match_indices[-1] = indices; + if (i == 1) { + NormalizedBBox gt_bbox; + gt_bbox.set_label(1); + all_gt_bboxes[i].push_back(gt_bbox); + // The first prior in second image is matched to a gt bbox of label 1. + match_indices[-1][0] = 0; + } + all_match_indices.push_back(match_indices); + } + + vector > all_conf_loss; + ConfLossType loss_type = MultiBoxLossParameter_ConfLossType_LOGISTIC; + ComputeConfLossGPU(conf_blob, num, num_preds_per_class, num_classes, + -1, loss_type, all_match_indices, all_gt_bboxes, &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + EXPECT_EQ(all_conf_loss[0].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[0][0], + -(log(exp(0.)/(1.+exp(0.))) + log(exp(0.1)/(1+exp(0.1)))), eps); + EXPECT_NEAR(all_conf_loss[0][1], + -(log(exp(0.2)/(1.+exp(0.2))) + log(exp(0.3)/(1+exp(0.3)))), eps); + EXPECT_EQ(all_conf_loss[1].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[1][0], + -(log(exp(-0.4)/(1.+exp(-0.4))) + log(1./(1+exp(-0.5)))), + eps); + EXPECT_NEAR(all_conf_loss[1][1], + -(log(exp(-0.6)/(1.+exp(-0.6))) + log(exp(-0.7)/(1+exp(-0.7)))), + eps); + + ComputeConfLossGPU(conf_blob, num, num_preds_per_class, num_classes, + 0, loss_type, all_match_indices, all_gt_bboxes, &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + EXPECT_EQ(all_conf_loss[0].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[0][0], + -(log(1./(1.+exp(0.))) + log(exp(0.1)/(1+exp(0.1)))), eps); + EXPECT_NEAR(all_conf_loss[0][1], + -(log(1./(1.+exp(0.2))) + log(exp(0.3)/(1+exp(0.3)))), eps); + EXPECT_EQ(all_conf_loss[1].size(), num_preds_per_class); + EXPECT_NEAR(all_conf_loss[1][0], + -(log(exp(-0.4)/(1.+exp(-0.4))) + log(1./(1+exp(-0.5)))), eps); + EXPECT_NEAR(all_conf_loss[1][1], + -(log(1./(1.+exp(-0.6))) + log(exp(-0.7)/(1+exp(-0.7)))), eps); + + loss_type = MultiBoxLossParameter_ConfLossType_SOFTMAX; + ComputeConfLossGPU(conf_blob, num, num_preds_per_class, num_classes, + 0, loss_type, all_match_indices, all_gt_bboxes, &all_conf_loss); + + EXPECT_EQ(all_conf_loss.size(), num); + for (int i = 0; i < num; ++i) { + EXPECT_EQ(all_conf_loss[i].size(), num_preds_per_class); + int sign = i % 2 ? 1 : -1; + for (int j = 0; j < num_preds_per_class; ++j) { + if (sign == 1) { + if (j == 0) { + EXPECT_NEAR(all_conf_loss[i][j], -log(1./(1+exp(-0.1))), eps); + } else { + EXPECT_NEAR(all_conf_loss[i][j], -log(exp(-0.1)/(1+exp(-0.1))), eps); + } + } else { + EXPECT_NEAR(all_conf_loss[i][j], -log(1./(1+exp(-0.1))), eps); + } + } + } +} + +#endif + +} // namespace caffe diff --git a/src/caffe/test/test_benchmark.cpp b/src/caffe/test/test_benchmark.cpp index b03fdf69a8a..2827c3a4df7 100644 --- a/src/caffe/test/test_benchmark.cpp +++ b/src/caffe/test/test_benchmark.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_bias_layer.cpp b/src/caffe/test/test_bias_layer.cpp index 3862e763e28..e7e9ab53687 100644 --- a/src/caffe/test/test_bias_layer.cpp +++ b/src/caffe/test/test_bias_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_blob.cpp b/src/caffe/test/test_blob.cpp index b88562223d0..1f2105bfb6b 100644 --- a/src/caffe/test/test_blob.cpp +++ b/src/caffe/test/test_blob.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_caffe_main.cpp b/src/caffe/test/test_caffe_main.cpp index 6473b74d0a6..40895e0bdeb 100644 --- a/src/caffe/test/test_caffe_main.cpp +++ b/src/caffe/test/test_caffe_main.cpp @@ -1,3 +1,44 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +// The main caffe test code. Your test cpp code should include this hpp +// to allow a main function to be compiled into the binary. + +#include #include "caffe/caffe.hpp" #include "caffe/test/test_caffe_main.hpp" @@ -12,7 +53,7 @@ using caffe::CAFFE_TEST_CUDA_PROP; #endif int main(int argc, char** argv) { - ::testing::InitGoogleTest(&argc, argv); + ::testing::InitGoogleMock(&argc, argv); caffe::GlobalInit(&argc, &argv); #ifndef CPU_ONLY // Before starting testing, let's first print out a few cuda defice info. diff --git a/src/caffe/test/test_common.cpp b/src/caffe/test/test_common.cpp index 58ae5c60a4f..87a8bba79e4 100644 --- a/src/caffe/test/test_common.cpp +++ b/src/caffe/test/test_common.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include "gtest/gtest.h" #include "caffe/common.hpp" diff --git a/src/caffe/test/test_concat_layer.cpp b/src/caffe/test/test_concat_layer.cpp index 23c1e8c1d29..65b8084de7d 100644 --- a/src/caffe/test/test_concat_layer.cpp +++ b/src/caffe/test/test_concat_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_contrastive_loss_layer.cpp b/src/caffe/test/test_contrastive_loss_layer.cpp index 2fa055ee0de..c4371efcc8c 100644 --- a/src/caffe/test/test_contrastive_loss_layer.cpp +++ b/src/caffe/test/test_contrastive_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/test/test_convolution_layer.cpp b/src/caffe/test/test_convolution_layer.cpp index 9bb19d13592..4d1caf7c963 100644 --- a/src/caffe/test/test_convolution_layer.cpp +++ b/src/caffe/test/test_convolution_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" @@ -136,6 +173,25 @@ void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, } } } + //relu + if (conv_param->relu()){ + for (int n = 0; n < out->shape(0); n++) { + for (int o = 0; o < out->shape(1); o++) { + for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int y = 0; y < out->shape(2 + has_depth); y++) { + for (int x = 0; x < out->shape(3 + has_depth); x++) { + out_offset[0] = n; + out_offset[1] = o; + if (has_depth) { out_offset[2] = z; } + out_offset[2 + has_depth] = y; + out_offset[3 + has_depth] = x; + if(out_data[out->offset(out_offset)] < 0) out_data[out->offset(out_offset)] = 0; + } + } + } + } + } + } } template void caffe_conv(const Blob* in, diff --git a/src/caffe/test/test_cpu_info.cpp b/src/caffe/test/test_cpu_info.cpp new file mode 100644 index 00000000000..b8723f51de4 --- /dev/null +++ b/src/caffe/test/test_cpu_info.cpp @@ -0,0 +1,271 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "gtest/gtest.h" + +#include "caffe/util/cpu_info.hpp" + +namespace caffe { +namespace cpu { + +class CpuInfoContent { + public: + CpuInfoContent(const char *modelName, + int numberOfSockets, int coresPerSocket, int threadsPerCore) { + + const int contentLength = 1 * 1024 * 1024; + content = new char[contentLength]; + + char *contentPosition = content; + char *contentEnd = &content[contentLength]; + + int processorId = 0; + for (int socketId = 0; socketId < numberOfSockets; socketId++) { + for (int threadId = 0; threadId < threadsPerCore; threadId++) { + for (int coreId = 0; coreId < coresPerSocket; coreId++) { + contentPosition += snprintf( + contentPosition, + contentEnd - contentPosition, + "processor : %i\n" + "model name : %s\n" + "physical id : %i\n" + "siblings : %i\n" + "core id : %i\n" + "cpu cores : %i\n" + "\n", + processorId++, + modelName, + socketId, + coresPerSocket * threadsPerCore, + coreId, + coresPerSocket); + } + } + } + } + + ~CpuInfoContent() { + delete [] content; + } + + const char *getContent() const { + return content; + } + + private: + char *content; +}; + +TEST(CpuInfo, isProcessorStructureInitialized) { + Processor processor; + EXPECT_EQ(processor.processor, 0); + EXPECT_EQ(processor.physicalId, 0); + EXPECT_EQ(processor.siblings, 0); + EXPECT_EQ(processor.coreId, 0); + EXPECT_EQ(processor.cpuCores, 0); + EXPECT_EQ(processor.speedMHz, 0); +} + +TEST(CpuInfo, testCpuInfoForEmptyInput) { + CpuInfo cpuInfo(""); + EXPECT_STREQ(cpuInfo.getFirstLine(), NULL); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); +} + +TEST(CpuInfo, testCpuInfoForSingleCharacterInput) { + CpuInfo cpuInfo("c"); + EXPECT_STREQ(cpuInfo.getFirstLine(), "c"); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); +} + +TEST(CpuInfo, testCpuInfoForSingleLineInput) { + CpuInfo cpuInfo("First line"); + EXPECT_STREQ(cpuInfo.getFirstLine(), "First line"); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); +} + +TEST(CpuInfo, testCpuInfoForMultiLineInput) { + CpuInfo cpuInfo("First line\nSecond line\nThird line"); + EXPECT_STREQ(cpuInfo.getFirstLine(), "First line"); + EXPECT_STREQ(cpuInfo.getNextLine(), "Second line"); + EXPECT_STREQ(cpuInfo.getNextLine(), "Third line"); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); +} + +TEST(CpuInfo, testCpuInfoForEmptyLinesInput) { + CpuInfo cpuInfo("\nSecond line\nThird line\n\nFifth line\n\n"); + EXPECT_STREQ(cpuInfo.getFirstLine(), ""); + EXPECT_STREQ(cpuInfo.getNextLine(), "Second line"); + EXPECT_STREQ(cpuInfo.getNextLine(), "Third line"); + EXPECT_STREQ(cpuInfo.getNextLine(), ""); + EXPECT_STREQ(cpuInfo.getNextLine(), "Fifth line"); + EXPECT_STREQ(cpuInfo.getNextLine(), ""); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); + EXPECT_STREQ(cpuInfo.getNextLine(), NULL); +} + +TEST(CpuInfo, testCollectionForEmptyInput) { + CpuInfo cpuInfo(""); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 0); + EXPECT_EQ(collection.getTotalNumberOfSockets(), 0); + EXPECT_EQ(collection.getTotalNumberOfCpuCores(), 0); + EXPECT_EQ(collection.getNumberOfProcessors(), 0); +} + +TEST(CpuInfo, testCollectionForSingleSocketSingleCoreSingleThread) { + CpuInfoContent cpuInfoContent("xxx", 1, 1, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 0); + EXPECT_EQ(collection.getTotalNumberOfSockets(), 1); + EXPECT_EQ(collection.getTotalNumberOfCpuCores(), 1); + EXPECT_EQ(collection.getNumberOfProcessors(), 1); +} + +TEST(CpuInfo, testCollectionForMultipleSockets) { + CpuInfoContent cpuInfoContent("xxx", 4, 1, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 0); + EXPECT_EQ(collection.getTotalNumberOfSockets(), 4); + EXPECT_EQ(collection.getTotalNumberOfCpuCores(), 4); + EXPECT_EQ(collection.getNumberOfProcessors(), 4); +} + +TEST(CpuInfo, testCollectionForMultipleCores) { + CpuInfoContent cpuInfoContent("xxx", 1, 8, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 0); + EXPECT_EQ(collection.getTotalNumberOfSockets(), 1); + EXPECT_EQ(collection.getTotalNumberOfCpuCores(), 8); + EXPECT_EQ(collection.getNumberOfProcessors(), 8); +} + +TEST(CpuInfo, testCollectionForMultithreading) { + CpuInfoContent cpuInfoContent("xxx", 1, 1, 2); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 0); + EXPECT_EQ(collection.getTotalNumberOfSockets(), 1); + EXPECT_EQ(collection.getTotalNumberOfCpuCores(), 1); + EXPECT_EQ(collection.getNumberOfProcessors(), 2); +} + +TEST(CpuInfo, testCollectionForMultipleCoresWithMultithreading) { + CpuInfoContent cpuInfoContent("xxx", 1, 4, 2); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 0); + EXPECT_EQ(collection.getTotalNumberOfSockets(), 1); + EXPECT_EQ(collection.getTotalNumberOfCpuCores(), 4); + EXPECT_EQ(collection.getNumberOfProcessors(), 8); +} + +TEST(CpuInfo, testCollectionForMultipleSocketsMultipleCoresWithMultithreading) { + CpuInfoContent cpuInfoContent("xxx", 2, 18, 2); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 0); + EXPECT_EQ(collection.getTotalNumberOfSockets(), 2); + EXPECT_EQ(collection.getTotalNumberOfCpuCores(), 36); + EXPECT_EQ(collection.getNumberOfProcessors(), 72); +} + +TEST(CpuInfo, testCollectionForSpeed) { + CpuInfoContent cpuInfoContent("Intel(R) Xeon(R) CPU E5-2699 v4 @ 2.20GHz", + 2, + 22, + 2); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 2200); + EXPECT_EQ(collection.getTotalNumberOfSockets(), 2); + EXPECT_EQ(collection.getTotalNumberOfCpuCores(), 44); + EXPECT_EQ(collection.getNumberOfProcessors(), 88); +} + +TEST(CpuInfo, testCollectionForSpeedInGhz1) { + CpuInfoContent cpuInfoContent("xxx @ 4.80GHz", 1, 1, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 4800); +} + +TEST(CpuInfo, testCollectionForSpeedInGhz2) { + CpuInfoContent cpuInfoContent("xxx @ 400 GHz", 1, 1, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 400000); +} + +TEST(CpuInfo, testCollectionForSpeedInMhz1) { + CpuInfoContent cpuInfoContent("xxx @ 400 MHz", 1, 1, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 400); +} + +TEST(CpuInfo, testCollectionForSpeedInMhz2) { + CpuInfoContent cpuInfoContent("xxx @ 2400 MHz", 1, 1, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 2400); +} + +TEST(CpuInfo, testCollectionForSpeedRecognitionGhz) { + CpuInfoContent cpuInfoContent("xxx @ 2.4", 1, 1, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 2400); +} + +TEST(CpuInfo, testCollectionForSpeedRecognitionMhz) { + CpuInfoContent cpuInfoContent("xxx @ 2400", 1, 1, 1); + CpuInfo cpuInfo(cpuInfoContent.getContent()); + Collection collection(&cpuInfo); + EXPECT_EQ(collection.getProcessorSpeedMHz(), 2400); +} + +} // namespace cpu +} // namespace caffe + diff --git a/src/caffe/test/test_crop_layer.cpp b/src/caffe/test/test_crop_layer.cpp index ce2c736f644..65d3271bc11 100644 --- a/src/caffe/test/test_crop_layer.cpp +++ b/src/caffe/test/test_crop_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_data/generate_sample_data.py b/src/caffe/test/test_data/generate_sample_data.py old mode 100644 new mode 100755 index 2645073575f..151c8e9db5d --- a/src/caffe/test/test_data/generate_sample_data.py +++ b/src/caffe/test/test_data/generate_sample_data.py @@ -1,3 +1,39 @@ +# +# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation +# +# All contributions by the University of California: +# Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +# All rights reserved. +# +# All other contributions: +# Copyright (c) 2014, 2015, the respective contributors +# All rights reserved. +# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md +# +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# * Neither the name of Intel Corporation nor the names of its contributors +# may be used to endorse or promote products derived from this software +# without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# """ Generate data used in the HDF5DataLayer and GradientBasedSolver tests. """ diff --git a/src/caffe/test/test_data/test_topology.prototxt b/src/caffe/test/test_data/test_topology.prototxt new file mode 100644 index 00000000000..4aaedc29942 --- /dev/null +++ b/src/caffe/test/test_data/test_topology.prototxt @@ -0,0 +1,43 @@ +name: "testNet" +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TRAIN + } + transform_param { + mirror: true + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_train_lmdb" + batch_size: 32 + backend: LMDB + } +} +layer { + name: "data" + type: "Data" + top: "data" + top: "label" + include { + phase: TEST + } + transform_param { + mirror: false + crop_size: 224 + mean_value: 104 + mean_value: 117 + mean_value: 123 + } + data_param { + source: "examples/imagenet/ilsvrc12_val_lmdb" + batch_size: 50 + backend: LMDB + } +} \ No newline at end of file diff --git a/src/caffe/test/test_data_layer.cpp b/src/caffe/test/test_data_layer.cpp index 3e8d113d918..e2705de765d 100644 --- a/src/caffe/test/test_data_layer.cpp +++ b/src/caffe/test/test_data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include #include diff --git a/src/caffe/test/test_data_transformer.cpp b/src/caffe/test/test_data_transformer.cpp index 31bf1c1fb14..5909a6aed48 100644 --- a/src/caffe/test/test_data_transformer.cpp +++ b/src/caffe/test/test_data_transformer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include #include diff --git a/src/caffe/test/test_db.cpp b/src/caffe/test/test_db.cpp index 1b487b14c58..22953b2b354 100644 --- a/src/caffe/test/test_db.cpp +++ b/src/caffe/test/test_db.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #if defined(USE_LEVELDB) && defined(USE_LMDB) && defined(USE_OPENCV) #include diff --git a/src/caffe/test/test_deconvolution_layer.cpp b/src/caffe/test/test_deconvolution_layer.cpp index c4b09ad555a..0003cd6a172 100644 --- a/src/caffe/test/test_deconvolution_layer.cpp +++ b/src/caffe/test/test_deconvolution_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_detection_evaluate_layer.cpp b/src/caffe/test/test_detection_evaluate_layer.cpp new file mode 100644 index 00000000000..ba0c619a765 --- /dev/null +++ b/src/caffe/test/test_detection_evaluate_layer.cpp @@ -0,0 +1,240 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/detection_evaluate_layer.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +static const float eps = 1e-6; + +template +class DetectionEvaluateLayerTest : public CPUDeviceTest { + protected: + DetectionEvaluateLayerTest() + : num_classes_(3), + background_label_id_(0), + overlap_threshold_(0.3), + blob_bottom_det_(new Blob(1, 1, 8, 7)), + blob_bottom_gt_(new Blob(1, 1, 4, 8)), + blob_top_(new Blob()) { + this->FillData(); + blob_bottom_vec_.push_back(blob_bottom_det_); + blob_bottom_vec_.push_back(blob_bottom_gt_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~DetectionEvaluateLayerTest() { + delete blob_bottom_det_; + delete blob_bottom_gt_; + delete blob_top_; + } + + void FillData() { + // Fill ground truth. + bool is_gt = true; + FillItem(blob_bottom_gt_, 0, "0 1 0 0.1 0.1 0.3 0.3 0", is_gt); + FillItem(blob_bottom_gt_, 1, "0 1 0 0.6 0.6 0.8 0.8 1", is_gt); + FillItem(blob_bottom_gt_, 2, "1 2 0 0.3 0.3 0.6 0.5 0", is_gt); + FillItem(blob_bottom_gt_, 3, "1 1 0 0.7 0.1 0.9 0.3 0", is_gt); + + // Fill detections. + is_gt = false; + FillItem(blob_bottom_det_, 0, "0 1 0.3 0.1 0.0 0.4 0.3", is_gt); + FillItem(blob_bottom_det_, 1, "0 1 0.7 0.0 0.1 0.2 0.3", is_gt); + FillItem(blob_bottom_det_, 2, "0 1 0.9 0.7 0.6 0.8 0.8", is_gt); + FillItem(blob_bottom_det_, 3, "1 2 0.8 0.2 0.1 0.4 0.4", is_gt); + FillItem(blob_bottom_det_, 4, "1 2 0.1 0.4 0.3 0.7 0.5", is_gt); + FillItem(blob_bottom_det_, 5, "1 1 0.2 0.8 0.1 1.0 0.3", is_gt); + FillItem(blob_bottom_det_, 6, "1 3 0.2 0.8 0.1 1.0 0.3", is_gt); + FillItem(blob_bottom_det_, 7, "2 1 0.2 0.8 0.1 1.0 0.3", is_gt); + } + + void FillItem(Blob* blob, const int item, const string values, + const bool is_gt) { + CHECK_LT(item, blob->height()); + + // Split values to vector of items. + vector items; + std::istringstream iss(values); + std::copy(std::istream_iterator(iss), + std::istream_iterator(), back_inserter(items)); + if (is_gt) { + EXPECT_EQ(items.size(), 8); + } else { + EXPECT_EQ(items.size(), 7); + } + int num_items = items.size(); + + // Fill item. + Dtype* blob_data = blob->mutable_cpu_data(); + for (int i = 0; i < 2; ++i) { + blob_data[item * num_items + i] = atoi(items[i].c_str()); + } + for (int i = 2; i < 7; ++i) { + blob_data[item * num_items + i] = atof(items[i].c_str()); + } + if (is_gt) { + blob_data[item * num_items + 7] = atoi(items[7].c_str()); + } + } + + void CheckEqual(const Blob& blob, const int num, const string values) { + CHECK_LT(num, blob.height()); + + // Split values to vector of items. + vector items; + std::istringstream iss(values); + std::copy(std::istream_iterator(iss), + std::istream_iterator(), back_inserter(items)); + EXPECT_EQ(items.size(), 5); + + // Check data. + const Dtype* blob_data = blob.cpu_data(); + for (int i = 0; i < 5; ++i) { + if (i == 2) { + EXPECT_NEAR(blob_data[num * blob.width() + i], + atof(items[i].c_str()), eps); + } else { + EXPECT_EQ(static_cast(blob_data[num * blob.width() + i]), + atoi(items[i].c_str())); + } + } + } + + int num_classes_; + int background_label_id_; + float overlap_threshold_; + + Blob* const blob_bottom_det_; + Blob* const blob_bottom_gt_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(DetectionEvaluateLayerTest, TestDtypes); + +TYPED_TEST(DetectionEvaluateLayerTest, TestSetup) { + LayerParameter layer_param; + DetectionEvaluateParameter* detection_evaluate_param = + layer_param.mutable_detection_evaluate_param(); + detection_evaluate_param->set_num_classes(this->num_classes_); + detection_evaluate_param->set_background_label_id(this->background_label_id_); + detection_evaluate_param->set_overlap_threshold(this->overlap_threshold_); + DetectionEvaluateLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_det_->height() + 2); + EXPECT_EQ(this->blob_top_->width(), 5); +} + +TYPED_TEST(DetectionEvaluateLayerTest, TestForward) { + LayerParameter layer_param; + DetectionEvaluateParameter* detection_evaluate_param = + layer_param.mutable_detection_evaluate_param(); + detection_evaluate_param->set_num_classes(this->num_classes_); + detection_evaluate_param->set_background_label_id(this->background_label_id_); + detection_evaluate_param->set_overlap_threshold(this->overlap_threshold_); + DetectionEvaluateLayer layer(layer_param); + + this->FillData(); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_det_->height() + 2); + EXPECT_EQ(this->blob_top_->width(), 5); + + this->CheckEqual(*(this->blob_top_), 0, "-1 1 3 -1 -1"); + this->CheckEqual(*(this->blob_top_), 1, "-1 2 1 -1 -1"); + this->CheckEqual(*(this->blob_top_), 2, "0 1 0.9 1 0"); + this->CheckEqual(*(this->blob_top_), 3, "0 1 0.7 1 0"); + this->CheckEqual(*(this->blob_top_), 4, "0 1 0.3 0 1"); + this->CheckEqual(*(this->blob_top_), 5, "1 1 0.2 1 0"); + this->CheckEqual(*(this->blob_top_), 6, "1 2 0.8 0 1"); + this->CheckEqual(*(this->blob_top_), 7, "1 2 0.1 1 0"); + this->CheckEqual(*(this->blob_top_), 8, "1 3 0.2 0 1"); + this->CheckEqual(*(this->blob_top_), 9, "2 1 0.2 0 1"); +} + +TYPED_TEST(DetectionEvaluateLayerTest, TestForwardSkipDifficult) { + LayerParameter layer_param; + DetectionEvaluateParameter* detection_evaluate_param = + layer_param.mutable_detection_evaluate_param(); + detection_evaluate_param->set_num_classes(this->num_classes_); + detection_evaluate_param->set_background_label_id(this->background_label_id_); + detection_evaluate_param->set_overlap_threshold(this->overlap_threshold_); + detection_evaluate_param->set_evaluate_difficult_gt(false); + DetectionEvaluateLayer layer(layer_param); + + this->FillData(); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_det_->height() + 2); + EXPECT_EQ(this->blob_top_->width(), 5); + + this->CheckEqual(*(this->blob_top_), 0, "-1 1 2 -1 -1"); + this->CheckEqual(*(this->blob_top_), 1, "-1 2 1 -1 -1"); + this->CheckEqual(*(this->blob_top_), 2, "0 1 0.9 0 0"); + this->CheckEqual(*(this->blob_top_), 3, "0 1 0.7 1 0"); + this->CheckEqual(*(this->blob_top_), 4, "0 1 0.3 0 1"); + this->CheckEqual(*(this->blob_top_), 5, "1 1 0.2 1 0"); + this->CheckEqual(*(this->blob_top_), 6, "1 2 0.8 0 1"); + this->CheckEqual(*(this->blob_top_), 7, "1 2 0.1 1 0"); + this->CheckEqual(*(this->blob_top_), 8, "1 3 0.2 0 1"); + this->CheckEqual(*(this->blob_top_), 9, "2 1 0.2 0 1"); +} + +} // namespace caffe diff --git a/src/caffe/test/test_detection_output_layer.cpp b/src/caffe/test/test_detection_output_layer.cpp new file mode 100644 index 00000000000..df597dd7cef --- /dev/null +++ b/src/caffe/test/test_detection_output_layer.cpp @@ -0,0 +1,376 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/detection_output_layer.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +static const float eps = 1e-6; + +template +class DetectionOutputLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + DetectionOutputLayerTest() + : num_(2), + num_priors_(4), + num_classes_(2), + share_location_(true), + num_loc_classes_(share_location_ ? 1 : num_classes_), + background_label_id_(0), + nms_threshold_(0.1), + top_k_(2), + blob_bottom_loc_( + new Blob(num_, num_priors_ * num_loc_classes_ * 4, 1, 1)), + blob_bottom_conf_( + new Blob(num_, num_priors_ * num_classes_, 1, 1)), + blob_bottom_prior_(new Blob(num_, 2, num_priors_ * 4, 1)), + blob_top_(new Blob()) { + // Fill prior data first. + Dtype* prior_data = blob_bottom_prior_->mutable_cpu_data(); + const float step = 0.5; + const float box_size = 0.3; + int idx = 0; + for (int h = 0; h < 2; ++h) { + float center_y = (h + 0.5) * step; + for (int w = 0; w < 2; ++w) { + float center_x = (w + 0.5) * step; + prior_data[idx++] = (center_x - box_size / 2); + prior_data[idx++] = (center_y - box_size / 2); + prior_data[idx++] = (center_x + box_size / 2); + prior_data[idx++] = (center_y + box_size / 2); + } + } + for (int i = 0; i < idx; ++i) { + prior_data[idx + i] = 0.1; + } + + // Fill confidences. + Dtype* conf_data = blob_bottom_conf_->mutable_cpu_data(); + idx = 0; + for (int i = 0; i < this->num_; ++i) { + for (int j = 0; j < this->num_priors_; ++j) { + for (int c = 0; c < this->num_classes_; ++c) { + if (i % 2 == c % 2) { + conf_data[idx++] = j * 0.2; + } else { + conf_data[idx++] = 1 - j * 0.2; + } + } + } + } + + blob_bottom_vec_.push_back(blob_bottom_loc_); + blob_bottom_vec_.push_back(blob_bottom_conf_); + blob_bottom_vec_.push_back(blob_bottom_prior_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~DetectionOutputLayerTest() { + delete blob_bottom_loc_; + delete blob_bottom_conf_; + delete blob_bottom_prior_; + delete blob_top_; + } + + void FillLocData(const bool share_location = true) { + // Fill location offsets. + int num_loc_classes = share_location ? 1 : this->num_classes_; + blob_bottom_loc_->Reshape( + this->num_, this->num_priors_ * num_loc_classes * 4, 1, 1); + Dtype* loc_data = blob_bottom_loc_->mutable_cpu_data(); + int idx = 0; + for (int i = 0; i < this->num_; ++i) { + for (int h = 0; h < 2; ++h) { + for (int w = 0; w < 2; ++w) { + for (int c = 0; c < num_loc_classes; ++c) { + loc_data[idx++] = (w % 2 ? -1 : 1) * (i * 1 + c / 2. + 0.5); + loc_data[idx++] = (h % 2 ? -1 : 1) * (i * 1 + c / 2. + 0.5); + loc_data[idx++] = (w % 2 ? -1 : 1) * (i * 1 + c / 2. + 0.5); + loc_data[idx++] = (h % 2 ? -1 : 1) * (i * 1 + c / 2. + 0.5); + } + } + } + } + } + + void CheckEqual(const Blob& blob, const int num, const string values) { + CHECK_LT(num, blob.height()); + + // Split values to vector of items. + vector items; + std::istringstream iss(values); + std::copy(std::istream_iterator(iss), + std::istream_iterator(), back_inserter(items)); + EXPECT_EQ(items.size(), 7); + + // Check data. + const Dtype* blob_data = blob.cpu_data(); + for (int i = 0; i < 2; ++i) { + EXPECT_EQ(static_cast(blob_data[num * blob.width() + i]), + atoi(items[i].c_str())); + } + for (int i = 2; i < 7; ++i) { + EXPECT_NEAR(blob_data[num * blob.width() + i], + atof(items[i].c_str()), eps); + } + } + + int num_; + int num_priors_; + int num_classes_; + bool share_location_; + int num_loc_classes_; + int background_label_id_; + float nms_threshold_; + int top_k_; + + Blob* const blob_bottom_loc_; + Blob* const blob_bottom_conf_; + Blob* const blob_bottom_prior_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(DetectionOutputLayerTest, TestDtypesAndDevices); + +TYPED_TEST(DetectionOutputLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + DetectionOutputParameter* detection_output_param = + layer_param.mutable_detection_output_param(); + detection_output_param->set_num_classes(this->num_classes_); + DetectionOutputLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 7); +} + +TYPED_TEST(DetectionOutputLayerTest, TestForwardShareLocation) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + DetectionOutputParameter* detection_output_param = + layer_param.mutable_detection_output_param(); + detection_output_param->set_num_classes(this->num_classes_); + detection_output_param->set_share_location(true); + detection_output_param->set_background_label_id(0); + detection_output_param->mutable_nms_param()->set_nms_threshold( + this->nms_threshold_); + DetectionOutputLayer layer(layer_param); + + this->FillLocData(true); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 6); + EXPECT_EQ(this->blob_top_->width(), 7); + + this->CheckEqual(*(this->blob_top_), 0, "0 1 1.0 0.15 0.15 0.45 0.45"); + this->CheckEqual(*(this->blob_top_), 1, "0 1 0.8 0.55 0.15 0.85 0.45"); + this->CheckEqual(*(this->blob_top_), 2, "0 1 0.6 0.15 0.55 0.45 0.85"); + this->CheckEqual(*(this->blob_top_), 3, "0 1 0.4 0.55 0.55 0.85 0.85"); + this->CheckEqual(*(this->blob_top_), 4, "1 1 0.6 0.45 0.45 0.75 0.75"); + this->CheckEqual(*(this->blob_top_), 5, "1 1 0.0 0.25 0.25 0.55 0.55"); +} + +TYPED_TEST(DetectionOutputLayerTest, TestForwardShareLocationTopK) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + DetectionOutputParameter* detection_output_param = + layer_param.mutable_detection_output_param(); + detection_output_param->set_num_classes(this->num_classes_); + detection_output_param->set_share_location(true); + detection_output_param->set_background_label_id(0); + detection_output_param->mutable_nms_param()->set_nms_threshold( + this->nms_threshold_); + detection_output_param->mutable_nms_param()->set_top_k(this->top_k_); + DetectionOutputLayer layer(layer_param); + + this->FillLocData(true); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 7); + + this->CheckEqual(*(this->blob_top_), 0, "0 1 1.0 0.15 0.15 0.45 0.45"); + this->CheckEqual(*(this->blob_top_), 1, "0 1 0.8 0.55 0.15 0.85 0.45"); + this->CheckEqual(*(this->blob_top_), 2, "1 1 0.6 0.45 0.45 0.75 0.75"); +} + +TYPED_TEST(DetectionOutputLayerTest, TestForwardNoShareLocation) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + DetectionOutputParameter* detection_output_param = + layer_param.mutable_detection_output_param(); + detection_output_param->set_num_classes(this->num_classes_); + detection_output_param->set_share_location(false); + detection_output_param->set_background_label_id(-1); + detection_output_param->mutable_nms_param()->set_nms_threshold( + this->nms_threshold_); + DetectionOutputLayer layer(layer_param); + + this->FillLocData(false); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 11); + EXPECT_EQ(this->blob_top_->width(), 7); + + this->CheckEqual(*(this->blob_top_), 0, "0 0 0.6 0.55 0.55 0.85 0.85"); + this->CheckEqual(*(this->blob_top_), 1, "0 0 0.4 0.15 0.55 0.45 0.85"); + this->CheckEqual(*(this->blob_top_), 2, "0 0 0.2 0.55 0.15 0.85 0.45"); + this->CheckEqual(*(this->blob_top_), 3, "0 0 0.0 0.15 0.15 0.45 0.45"); + this->CheckEqual(*(this->blob_top_), 4, "0 1 1.0 0.20 0.20 0.50 0.50"); + this->CheckEqual(*(this->blob_top_), 5, "0 1 0.8 0.50 0.20 0.80 0.50"); + this->CheckEqual(*(this->blob_top_), 6, "0 1 0.6 0.20 0.50 0.50 0.80"); + this->CheckEqual(*(this->blob_top_), 7, "0 1 0.4 0.50 0.50 0.80 0.80"); + this->CheckEqual(*(this->blob_top_), 8, "1 0 1.0 0.25 0.25 0.55 0.55"); + this->CheckEqual(*(this->blob_top_), 9, "1 0 0.4 0.45 0.45 0.75 0.75"); + this->CheckEqual(*(this->blob_top_), 10, "1 1 0.6 0.40 0.40 0.70 0.70"); +} + +TYPED_TEST(DetectionOutputLayerTest, TestForwardNoShareLocationTopK) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + DetectionOutputParameter* detection_output_param = + layer_param.mutable_detection_output_param(); + detection_output_param->set_num_classes(this->num_classes_); + detection_output_param->set_share_location(false); + detection_output_param->set_background_label_id(-1); + detection_output_param->mutable_nms_param()->set_nms_threshold( + this->nms_threshold_); + detection_output_param->mutable_nms_param()->set_top_k(this->top_k_); + DetectionOutputLayer layer(layer_param); + + this->FillLocData(false); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 6); + EXPECT_EQ(this->blob_top_->width(), 7); + + this->CheckEqual(*(this->blob_top_), 0, "0 0 0.6 0.55 0.55 0.85 0.85"); + this->CheckEqual(*(this->blob_top_), 1, "0 0 0.4 0.15 0.55 0.45 0.85"); + this->CheckEqual(*(this->blob_top_), 2, "0 1 1.0 0.20 0.20 0.50 0.50"); + this->CheckEqual(*(this->blob_top_), 3, "0 1 0.8 0.50 0.20 0.80 0.50"); + this->CheckEqual(*(this->blob_top_), 4, "1 0 1.0 0.25 0.25 0.55 0.55"); + this->CheckEqual(*(this->blob_top_), 5, "1 1 0.6 0.40 0.40 0.70 0.70"); +} + +TYPED_TEST(DetectionOutputLayerTest, TestForwardNoShareLocationNeg0) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + DetectionOutputParameter* detection_output_param = + layer_param.mutable_detection_output_param(); + detection_output_param->set_num_classes(this->num_classes_); + detection_output_param->set_share_location(false); + detection_output_param->set_background_label_id(0); + detection_output_param->mutable_nms_param()->set_nms_threshold( + this->nms_threshold_); + DetectionOutputLayer layer(layer_param); + + this->FillLocData(false); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 5); + EXPECT_EQ(this->blob_top_->width(), 7); + + this->CheckEqual(*(this->blob_top_), 0, "0 1 1.0 0.20 0.20 0.50 0.50"); + this->CheckEqual(*(this->blob_top_), 1, "0 1 0.8 0.50 0.20 0.80 0.50"); + this->CheckEqual(*(this->blob_top_), 2, "0 1 0.6 0.20 0.50 0.50 0.80"); + this->CheckEqual(*(this->blob_top_), 3, "0 1 0.4 0.50 0.50 0.80 0.80"); + this->CheckEqual(*(this->blob_top_), 4, "1 1 0.6 0.40 0.40 0.70 0.70"); +} + +TYPED_TEST(DetectionOutputLayerTest, TestForwardNoShareLocationNeg0TopK) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + DetectionOutputParameter* detection_output_param = + layer_param.mutable_detection_output_param(); + detection_output_param->set_num_classes(this->num_classes_); + detection_output_param->set_share_location(false); + detection_output_param->set_background_label_id(0); + detection_output_param->mutable_nms_param()->set_nms_threshold( + this->nms_threshold_); + detection_output_param->mutable_nms_param()->set_top_k(this->top_k_); + DetectionOutputLayer layer(layer_param); + + this->FillLocData(false); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 7); + + this->CheckEqual(*(this->blob_top_), 0, "0 1 1.0 0.20 0.20 0.50 0.50"); + this->CheckEqual(*(this->blob_top_), 1, "0 1 0.8 0.50 0.20 0.80 0.50"); + this->CheckEqual(*(this->blob_top_), 2, "1 1 0.6 0.40 0.40 0.70 0.70"); +} + +} // namespace caffe diff --git a/src/caffe/test/test_dummy_data_layer.cpp b/src/caffe/test/test_dummy_data_layer.cpp index 1a01ca85f89..bb971bea008 100644 --- a/src/caffe/test/test_dummy_data_layer.cpp +++ b/src/caffe/test/test_dummy_data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_eltwise_layer.cpp b/src/caffe/test/test_eltwise_layer.cpp index c06e3baab15..ab9ef7c01f8 100644 --- a/src/caffe/test/test_eltwise_layer.cpp +++ b/src/caffe/test/test_eltwise_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_embed_layer.cpp b/src/caffe/test/test_embed_layer.cpp index 13f13a878d3..ba6fa3da922 100644 --- a/src/caffe/test/test_embed_layer.cpp +++ b/src/caffe/test/test_embed_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_engine_selection.cpp b/src/caffe/test/test_engine_selection.cpp new file mode 100644 index 00000000000..3156efc89b8 --- /dev/null +++ b/src/caffe/test/test_engine_selection.cpp @@ -0,0 +1,489 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/engine_parser.hpp" +#include "caffe/filler.hpp" + +#include "caffe/layer.hpp" +#include "caffe/layer_factory.hpp" +#include "caffe/layers/batch_norm_layer.hpp" +#include "caffe/layers/concat_layer.hpp" +#include "caffe/layers/conv_layer.hpp" +#include "caffe/layers/inner_product_layer.hpp" +#include "caffe/layers/lrn_layer.hpp" +#include "caffe/layers/pooling_layer.hpp" +#include "caffe/layers/relu_layer.hpp" +#include "caffe/layers/sigmoid_layer.hpp" +#include "caffe/layers/softmax_layer.hpp" +#include "caffe/layers/tanh_layer.hpp" +#ifdef MKL2017_SUPPORTED +#include "caffe/layers/mkl_layers.hpp" +#endif +#ifdef MKLDNN_SUPPORTED +#include "caffe/layers/mkldnn_layers.hpp" +#endif + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class TestEngineSelection : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + TestEngineSelection() {} + virtual ~TestEngineSelection() {} + + virtual void InitNetFromProtoString(const string& proto) { + NetParameter param; + CHECK(google::protobuf::TextFormat::ParseFromString(proto, ¶m)); + net_.reset(new Net(param)); + } + + virtual void InitNet(const string& net_engine) { + string proto = + "engine: '" + net_engine + "' " + "layer { " + " name: 'data' " + " type: 'Input' " + " top: 'data' " + " input_param { " + " shape: { dim: 1 dim: 3 dim: 100 dim: 100 } " + " } " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " bottom: 'data' " + " top: 'conv1' " + " convolution_param { " + " num_output: 5 " + " kernel_size: 3 " + " stride: 2 " + " } " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layer { " + " name: 'pool1' " + " type: 'Pooling' " + " bottom: 'conv1' " + " top: 'pool1' " + " pooling_param { " + " pool: MAX " + " kernel_size: 2 " + " stride: 2 " + " } " + "} " + "layer { " + " name: 'norm1' " + " type: 'LRN' " + " bottom: 'pool1' " + " top: 'norm1' " + " lrn_param { " + " local_size: 3 " + " } " + "} " + "layer { " + " name: 'ip1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1 " + " } " + " bottom: 'norm1' " + " top: 'ip1' " + "} " + "layer { " + " name: 'relu2' " + " type: 'ReLU' " + " bottom: 'ip1' " + " top: 'ip1' " + "} " + " layer {" + " bottom: 'ip1'" + " top: 'bn1'" + " name: 'bn1'" + " type: 'BatchNorm'" + " }" + " layer {" + " bottom: 'pool1'" + " bottom: 'norm1'" + " top: 'concat1'" + " name: 'concat1'" + " type: 'Concat'" + " } " + " layer { " + " bottom: 'concat1' " + " bottom: 'concat1' " + " top: 'eltw1' " + " name: 'eltw1' " + " type: 'Eltwise' " + "}" + " layer { " + " bottom: 'eltw1' " + " top: 'split1' " + " name: 'split1' " + " type: 'Split' " + "}"; + + InitNetFromProtoString(proto); + } + + shared_ptr > net_; +}; + +TYPED_TEST_CASE(TestEngineSelection, TestDtypesAndDevices); + +TYPED_TEST(TestEngineSelection, TestEngineParser) { + EngineParser ep1("CAFFE"); + EXPECT_TRUE(ep1.isEngine("CAFFE")); + EXPECT_FALSE(ep1.isEngine("MKLDNN")); + EXPECT_FALSE(ep1.isEngine("MKL2017")); + EXPECT_FALSE(ep1.isEngine("CUDNN")); + +#ifdef MKL2017_SUPPORTED + EngineParser ep2("MKL2017"); + EXPECT_FALSE(ep2.isEngine("CAFFE")); + EXPECT_FALSE(ep2.isEngine("MKLDNN")); + EXPECT_TRUE(ep2.isEngine("MKL2017")); + EXPECT_FALSE(ep2.isEngine("CUDNN")); +#endif + +#ifdef MKLDNN_SUPPORTED + EngineParser ep3("MKLDNN:CPU,FPGA,DLA"); + EXPECT_FALSE(ep3.isEngine("CAFFE")); + EXPECT_TRUE(ep3.isEngine("MKLDNN")); + EXPECT_FALSE(ep3.isEngine("MKL2017")); + EXPECT_FALSE(ep3.isEngine("CUDNN")); + + EXPECT_EQ(3, ep3.getNumberOfSubEngines()); + + EXPECT_EQ(&ep3.getMKLDNNSubEngine(0), &CpuEngine::Instance().get_engine()); +#ifdef FPGA_ENABLED + EXPECT_EQ(&ep3.getMKLDNNSubEngine(1), &FPGAEngine::Instance().get_engine()); +#endif +#ifdef DLA_ENABLED + EXPECT_EQ(&ep3.getMKLDNNSubEngine(2), &DLAEngine::Instance().get_engine()); +#endif + EngineParser ep4("MKLDNN:FPGA,CPU,FPGA"); + EXPECT_FALSE(ep4.isEngine("CAFFE")); + EXPECT_TRUE(ep4.isEngine("MKLDNN")); + EXPECT_FALSE(ep4.isEngine("MKL2017")); + EXPECT_FALSE(ep4.isEngine("CUDNN")); + + EXPECT_EQ(3, ep4.getNumberOfSubEngines()); + + EXPECT_EQ(&ep4.getMKLDNNSubEngine(1), &CpuEngine::Instance().get_engine()); + +#ifdef FPGA_ENABLED + EXPECT_EQ(&ep4.getMKLDNNSubEngine(0), &FPGAEngine::Instance().get_engine()); + EXPECT_EQ(&ep4.getMKLDNNSubEngine(2), &FPGAEngine::Instance().get_engine()); +#endif + +#endif // #ifdef MKLDNN_SUPPORTED + +#ifdef USE_CUDNN + EngineParser ep5("CUDNN"); + EXPECT_FALSE(ep5.isEngine("CAFFE")); + EXPECT_FALSE(ep5.isEngine("MKLDNN")); + EXPECT_FALSE(ep5.isEngine("MKL2017")); + EXPECT_TRUE(ep5.isEngine("CUDNN")); +#endif +} + +TYPED_TEST(TestEngineSelection, TestEngineParserNetCAFFE) { + typedef typename TypeParam::Dtype Dtype; + + void* null_ptr = NULL; + this->InitNet("CAFFE"); + Net* net = this->net_.get(); + + // conv1 verification + Layer* conv1_layer = net->layer_by_name("conv1").get(); + ConvolutionLayer* conv1_caffe = + dynamic_cast* >(conv1_layer); + EXPECT_NE(null_ptr, conv1_caffe); + +#ifdef MKL2017_SUPPORTED + MKLConvolutionLayer* conv1_mkl = + dynamic_cast* >(conv1_layer); + EXPECT_EQ(null_ptr, conv1_mkl); +#endif +#ifdef MKLDNN_SUPPORTED + MKLDNNConvolutionLayer* conv1_mkldnn = + dynamic_cast* >(conv1_layer); + EXPECT_EQ(null_ptr, conv1_mkldnn); +#endif + // relu1 verification + Layer* relu1_layer = net->layer_by_name("relu1").get(); + ReLULayer* relu1_caffe = + dynamic_cast* >(relu1_layer); + EXPECT_NE(null_ptr, relu1_caffe); + + // relu2 verification + Layer* relu2_layer = net->layer_by_name("relu2").get(); + ReLULayer* relu2_caffe = + dynamic_cast* >(relu2_layer); + EXPECT_NE(null_ptr, relu2_caffe); + + // pool1 verification + Layer* pool1_layer = net->layer_by_name("pool1").get(); + PoolingLayer* pool1_caffe = + dynamic_cast* >(pool1_layer); + EXPECT_NE(null_ptr, pool1_caffe); + + // norm1 verification + Layer* norm1_layer = net->layer_by_name("norm1").get(); + LRNLayer* norm1_caffe = + dynamic_cast* >(norm1_layer); + EXPECT_NE(null_ptr, norm1_caffe); + + // ip1 verification + Layer* ip1_layer = net->layer_by_name("ip1").get(); + InnerProductLayer* ip1_caffe = + dynamic_cast* >(ip1_layer); + EXPECT_NE(null_ptr, ip1_caffe); + + // bn1 verification + Layer* bn1_layer = net->layer_by_name("bn1").get(); + BatchNormLayer* bn1_caffe = + dynamic_cast* >(bn1_layer); + EXPECT_NE(null_ptr, bn1_caffe); + + // concat1 verification + Layer* concat1_layer = net->layer_by_name("concat1").get(); + ConcatLayer* concat1_caffe = + dynamic_cast* >(concat1_layer); + EXPECT_NE(null_ptr, concat1_caffe); + + // eltw1 verification + Layer* eltw1_layer = net->layer_by_name("eltw1").get(); + EltwiseLayer* eltw1_caffe = + dynamic_cast* >(eltw1_layer); + EXPECT_NE(null_ptr, eltw1_caffe); + + // Do all the automatically inserted splits have correct engine? + const vector > >& layers = net->layers(); + for (int i = 0; i < layers.size(); i++) { + if (layers[i]->layer_param().type() == "Split") { + string name = layers[i]->layer_param().name(); + Layer* split_layer = net->layer_by_name(name).get(); + SplitLayer* split_caffe = + dynamic_cast* >(split_layer); + EXPECT_NE(null_ptr, split_caffe); + } + } +} + +#ifdef MKL2017_SUPPORTED +TYPED_TEST(TestEngineSelection, TestEngineParserNetMKL2017) { + typedef typename TypeParam::Dtype Dtype; + + void* null_ptr = NULL; + this->InitNet("MKL2017"); + Net* net = this->net_.get(); + + // conv1 verification + Layer* conv1_layer = net->layer_by_name("conv1").get(); + MKLConvolutionLayer* conv1_mkl = + dynamic_cast* >(conv1_layer); + EXPECT_NE(null_ptr, conv1_mkl); + + // ConvolutionLayer is a base for MKLConvolutionLayer, so this is not nullptr + ConvolutionLayer* conv1_caffe = + dynamic_cast* >(conv1_layer); + EXPECT_NE(null_ptr, conv1_caffe); + + // relu1 verification + Layer* relu1_layer = net->layer_by_name("relu1").get(); + MKLReLULayer* relu1_mkl = + dynamic_cast* >(relu1_layer); + EXPECT_NE(null_ptr, relu1_mkl); + + // relu2 verification + Layer* relu2_layer = net->layer_by_name("relu2").get(); + MKLReLULayer* relu2_mkl = + dynamic_cast* >(relu2_layer); + EXPECT_NE(null_ptr, relu2_mkl); + + // pool1 verification + Layer* pool1_layer = net->layer_by_name("pool1").get(); + MKLPoolingLayer* pool1_mkl = + dynamic_cast* >(pool1_layer); + EXPECT_NE(null_ptr, pool1_mkl); + + // norm1 verification + Layer* norm1_layer = net->layer_by_name("norm1").get(); + MKLLRNLayer* norm1_mkl = + dynamic_cast* >(norm1_layer); + EXPECT_NE(null_ptr, norm1_mkl); + + // ip1 verification + Layer* ip1_layer = net->layer_by_name("ip1").get(); + InnerProductLayer* ip1_caffe = + dynamic_cast* >(ip1_layer); + EXPECT_NE(null_ptr, ip1_caffe); + + // bn1 verification + Layer* bn1_layer = net->layer_by_name("bn1").get(); + MKLBatchNormLayer* bn1_mkl = + dynamic_cast* >(bn1_layer); + EXPECT_NE(null_ptr, bn1_mkl); + + // concat1 verification + Layer* concat1_layer = net->layer_by_name("concat1").get(); + MKLConcatLayer* concat1_mkl = + dynamic_cast* >(concat1_layer); + EXPECT_NE(null_ptr, concat1_mkl); + + // eltw1 verification + Layer* eltw1_layer = net->layer_by_name("eltw1").get(); + MKLEltwiseLayer* eltw1_mkl = + dynamic_cast* >(eltw1_layer); + EXPECT_NE(null_ptr, eltw1_mkl); + + // Do all the automatically inserted splits have correct engine? + const vector > >& layers = net->layers(); + for (int i = 0; i < layers.size(); i++) { + if (layers[i]->layer_param().type() == "Split") { + string name = layers[i]->layer_param().name(); + Layer* split_layer = net->layer_by_name(name).get(); + MKLSplitLayer* split_mkl = + dynamic_cast* >(split_layer); + EXPECT_NE(null_ptr, split_mkl); + } + } +} +#endif + +#ifdef MKLDNN_SUPPORTED +TYPED_TEST(TestEngineSelection, TestEngineParserNetMKLDNN) { + typedef typename TypeParam::Dtype Dtype; + + void* null_ptr = NULL; + this->InitNet("MKLDNN:CPU"); + Net* net = this->net_.get(); + + // conv1 verification + Layer* conv1_layer = net->layer_by_name("conv1").get(); + MKLDNNConvolutionLayer* conv1_mkldnn = + dynamic_cast* >(conv1_layer); + EXPECT_NE(null_ptr, conv1_mkldnn); + + // MKLDNNConvolutionLayer is derived from ConvolutionLayer, so this is OK + ConvolutionLayer* conv1_caffe = + dynamic_cast* >(conv1_layer); + EXPECT_NE(null_ptr, conv1_caffe); + + // relu1 verification + Layer* relu1_layer = net->layer_by_name("relu1").get(); + MKLDNNReLULayer* relu1_mkldnn = + dynamic_cast* >(relu1_layer); + EXPECT_EQ(null_ptr, relu1_mkldnn); + + // relu2 verification + Layer* relu2_layer = net->layer_by_name("relu2").get(); + MKLDNNReLULayer* relu2_mkldnn = + dynamic_cast* >(relu2_layer); + EXPECT_NE(null_ptr, relu2_mkldnn); + + // pool1 verification + Layer* pool1_layer = net->layer_by_name("pool1").get(); + MKLDNNPoolingLayer* pool1_mkldnn = + dynamic_cast* >(pool1_layer); + EXPECT_NE(null_ptr, pool1_mkldnn); + + // norm1 verification + Layer* norm1_layer = net->layer_by_name("norm1").get(); + MKLDNNLRNLayer* norm1_mkldnn = + dynamic_cast* >(norm1_layer); + EXPECT_NE(null_ptr, norm1_mkldnn); + + // ip1 verification + Layer* ip1_layer = net->layer_by_name("ip1").get(); + MKLDNNInnerProductLayer* ip1_mkldnn = + dynamic_cast* >(ip1_layer); + EXPECT_NE(null_ptr, ip1_mkldnn); + + // bn1 verification + Layer* bn1_layer = net->layer_by_name("bn1").get(); + MKLDNNBatchNormLayer* bn1_mkldnn = + dynamic_cast* >(bn1_layer); + EXPECT_NE(null_ptr, bn1_mkldnn); + + // concat1 verification + Layer* concat1_layer = net->layer_by_name("concat1").get(); + MKLDNNConcatLayer* concat1_mkldnn = + dynamic_cast* >(concat1_layer); + EXPECT_NE(null_ptr, concat1_mkldnn); + + // eltw1 verification + Layer* eltw1_layer = net->layer_by_name("eltw1").get(); + // TODO: Change to MKLDNNEltwiseLayer when eltwise layer support added + MKLDNNEltwiseLayer* eltw1_mkldnn = + dynamic_cast* >(eltw1_layer); + EXPECT_NE(null_ptr, eltw1_mkldnn); + + // Do all the automatically inserted splits have correct engine? + const vector > >& layers = net->layers(); + for (int i = 0; i < layers.size(); i++) { + if (layers[i]->layer_param().type() == "Split") { + string name = layers[i]->layer_param().name(); + Layer* split_layer = net->layer_by_name(name).get(); + MKLDNNSplitLayer* split_caffe = + dynamic_cast* >(split_layer); + EXPECT_NE(null_ptr, split_caffe); + } + } +} + +#endif +} // namespace caffe diff --git a/src/caffe/test/test_euclidean_loss_layer.cpp b/src/caffe/test/test_euclidean_loss_layer.cpp index f253f9fd393..ef45531fd0e 100644 --- a/src/caffe/test/test_euclidean_loss_layer.cpp +++ b/src/caffe/test/test_euclidean_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_filler.cpp b/src/caffe/test/test_filler.cpp index 26e9b217e35..88f16ade803 100644 --- a/src/caffe/test/test_filler.cpp +++ b/src/caffe/test/test_filler.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include "gtest/gtest.h" #include "caffe/filler.hpp" @@ -190,6 +227,33 @@ TYPED_TEST(XavierFillerTest, TestFillAverage) { } template +class GaborFillerTest : public ::testing::Test { + protected: + GaborFillerTest() + : blob_(new Blob(96, 3, 11, 11)), + filler_param_() { + } + virtual void test_no_crash() { + this->filler_.reset(new GaborFiller(this->filler_param_)); + this->filler_->Fill(blob_); + EXPECT_TRUE(this->blob_); + // const int count = this->blob_->count(); + // const Dtype* data = this->blob_->cpu_data(); + } + + virtual ~GaborFillerTest() { delete blob_; } + Blob* const blob_; + FillerParameter filler_param_; + shared_ptr > filler_; +}; + +TYPED_TEST_CASE(GaborFillerTest, TestDtypes); + +TYPED_TEST(GaborFillerTest, TestNoCrash) { + this->test_no_crash(); +} + +template class MSRAFillerTest : public ::testing::Test { protected: MSRAFillerTest() diff --git a/src/caffe/test/test_filter_layer.cpp b/src/caffe/test/test_filter_layer.cpp index 9ea2b8b2168..094a5844451 100644 --- a/src/caffe/test/test_filter_layer.cpp +++ b/src/caffe/test/test_filter_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_flatten_layer.cpp b/src/caffe/test/test_flatten_layer.cpp index d929ac7a720..2eaf865e137 100644 --- a/src/caffe/test/test_flatten_layer.cpp +++ b/src/caffe/test/test_flatten_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_float_compare.cpp b/src/caffe/test/test_float_compare.cpp new file mode 100644 index 00000000000..313b6c85850 --- /dev/null +++ b/src/caffe/test/test_float_compare.cpp @@ -0,0 +1,106 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include "gtest/gtest.h" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/util/float_compare.hpp" + +namespace caffe { + + class FloatCompareTest : public ::testing::Test {}; + + TEST_F(FloatCompareTest, TestCompareFloatsNans) { + float a = std::nanf(""), b = std::nanf(""); + float epsilon = 1.0e-3f; + float diff = caffe::floatDiff(a, b, epsilon); + EXPECT_TRUE(std::isnan(diff)); + } + + TEST_F(FloatCompareTest, TestCompareFloatsFiniteAndNan) { + float a = std::nanf(""), b = 1.12345f; + float epsilon = 1.0e-3f; + float diff = caffe::floatDiff(a, b, epsilon); + EXPECT_TRUE(std::isnan(diff)); + } + + TEST_F(FloatCompareTest, TestCompareFloatsInfinity) { + float a = std::numeric_limits::infinity(), + b = std::numeric_limits::infinity(); + float epsilon = 1.0e-3f; + float diff = caffe::floatDiff(a, b, epsilon); + EXPECT_TRUE(std::isnan(diff)); + } + + TEST_F(FloatCompareTest, TestCompareFloatsBigNegative) { + float a = 10000.f, epsilon = 1.0e-3f; + float b = boost::math::float_next(boost::math::float_next(a)); + float diff = caffe::floatDiff(a, b, epsilon); + EXPECT_NEAR(diff, 0.00195313f, 0.00000001f); + } + + TEST_F(FloatCompareTest, TestCompareFloatsBigPositive) { + float a = 10000.f, epsilon = 1.0e-3f; + float b = boost::math::float_next(a); + EXPECT_EQ(caffe::floatDiff(a, b, epsilon), FP_ZERO); + } + + TEST_F(FloatCompareTest, TestCompareFloatsSmallPositive) { + float a = 0.2304f, b = 0.2306f, epsilon = 1.0e-3f; + EXPECT_EQ(caffe::floatDiff(a, b, epsilon), FP_ZERO); + } + + TEST_F(FloatCompareTest, TestCompareFloatsSmallNegative) { + float a = 0.12f, b = 0.121f, epsilon = 1.0e-3f; + float diff = caffe::floatDiff(a, b, epsilon); + EXPECT_NEAR(diff, 0.001f, 0.0001f); + } + + TEST_F(FloatCompareTest, TestCompareFloatsNearZeroDifferentSigns) { + float a = -0.2304f, b = 0.2314f, epsilon = 1.0e-3f; + float diff = caffe::floatDiff(a, b, epsilon); + EXPECT_NEAR(diff, 0.4618, 0.0001f); + } + + TEST_F(FloatCompareTest, TestCompareFloatsDifferentSigns) { + float a = -1.f, b = 1.f, epsilon = 1.0e-3f; + float diff = caffe::floatDiff(a, b, epsilon); + EXPECT_NEAR(diff, 2.f, epsilon); + } +} // namespace caffe diff --git a/src/caffe/test/test_gradient_based_solver.cpp b/src/caffe/test/test_gradient_based_solver.cpp index 975a8f0f88a..cc16ab1f380 100644 --- a/src/caffe/test/test_gradient_based_solver.cpp +++ b/src/caffe/test/test_gradient_based_solver.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/test/test_hdf5_output_layer.cpp b/src/caffe/test/test_hdf5_output_layer.cpp index 3833ebff78e..2e49dd86aff 100644 --- a/src/caffe/test/test_hdf5_output_layer.cpp +++ b/src/caffe/test/test_hdf5_output_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -103,10 +140,10 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { file_id, 0)<< "Failed to open HDF5 file" << this->input_file_name_; - Blob* blob_data = new Blob(); + Blob blob_data; hdf5_load_nd_dataset(file_id, HDF5_DATA_DATASET_NAME, 0, 4, - blob_data); - this->CheckBlobEqual(*(this->blob_data_), *blob_data); + &blob_data); + this->CheckBlobEqual(*(this->blob_data_), blob_data); Blob* blob_label = new Blob(); hdf5_load_nd_dataset(file_id, HDF5_DATA_LABEL_NAME, 0, 4, @@ -116,6 +153,8 @@ TYPED_TEST(HDF5OutputLayerTest, TestForward) { status = H5Fclose(file_id); EXPECT_GE(status, 0) << "Failed to close HDF5 file " << this->output_file_name_; + + delete blob_label; } } // namespace caffe diff --git a/src/caffe/test/test_hdf5data_layer.cpp b/src/caffe/test/test_hdf5data_layer.cpp index 8884ce95a23..eb543199275 100644 --- a/src/caffe/test/test_hdf5data_layer.cpp +++ b/src/caffe/test/test_hdf5data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_hinge_loss_layer.cpp b/src/caffe/test/test_hinge_loss_layer.cpp index 8bf89fa6387..d716ee6e206 100644 --- a/src/caffe/test/test_hinge_loss_layer.cpp +++ b/src/caffe/test/test_hinge_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_im2col_layer.cpp b/src/caffe/test/test_im2col_layer.cpp index a7faf18f972..58bfd653c0c 100644 --- a/src/caffe/test/test_im2col_layer.cpp +++ b/src/caffe/test/test_im2col_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_im_transforms.cpp b/src/caffe/test/test_im_transforms.cpp new file mode 100644 index 00000000000..0d303acca88 --- /dev/null +++ b/src/caffe/test/test_im_transforms.cpp @@ -0,0 +1,134 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "gtest/gtest.h" + +#include "caffe/common.hpp" +#include "caffe/util/im_transforms.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +static const float eps = 1e-6; + +class ImTransformsTest : public CPUDeviceTest { +}; + +TEST_F(ImTransformsTest, TestUpdateBBoxByResizePolicy) { + NormalizedBBox bbox; + bbox.set_xmin(0.1); + bbox.set_ymin(0.3); + bbox.set_xmax(0.3); + bbox.set_ymax(0.6); + int img_height = 600; + int img_width = 1000; + ResizeParameter resize_param; + resize_param.set_height(300); + resize_param.set_width(300); + NormalizedBBox out_bbox; + + // Test warp. + out_bbox = bbox; + resize_param.set_resize_mode(ResizeParameter_Resize_mode_WARP); + UpdateBBoxByResizePolicy(resize_param, img_width, img_height, &out_bbox); + EXPECT_NEAR(out_bbox.xmin(), 0.1, eps); + EXPECT_NEAR(out_bbox.ymin(), 0.3, eps); + EXPECT_NEAR(out_bbox.xmax(), 0.3, eps); + EXPECT_NEAR(out_bbox.ymax(), 0.6, eps); + + // Test fit small size. + out_bbox = bbox; + resize_param.set_resize_mode(ResizeParameter_Resize_mode_FIT_SMALL_SIZE); + UpdateBBoxByResizePolicy(resize_param, img_width, img_height, &out_bbox); + EXPECT_NEAR(out_bbox.xmin(), 0.1, eps); + EXPECT_NEAR(out_bbox.ymin(), 0.3, eps); + EXPECT_NEAR(out_bbox.xmax(), 0.3, eps); + EXPECT_NEAR(out_bbox.ymax(), 0.6, eps); + + // Test fit large size and pad. + out_bbox = bbox; + resize_param.set_resize_mode( + ResizeParameter_Resize_mode_FIT_LARGE_SIZE_AND_PAD); + UpdateBBoxByResizePolicy(resize_param, img_width, img_height, &out_bbox); + EXPECT_NEAR(out_bbox.xmin(), 0.1, eps); + EXPECT_NEAR(out_bbox.ymin(), (180 * 0.3 + 60) / 300, eps); + EXPECT_NEAR(out_bbox.xmax(), 0.3, eps); + EXPECT_NEAR(out_bbox.ymax(), (180 * 0.6 + 60) / 300, eps); + + /*** Reverse the image size. ***/ + img_height = 1000; + img_width = 600; + + // Test fit large size and pad. + out_bbox = bbox; + resize_param.set_resize_mode( + ResizeParameter_Resize_mode_FIT_LARGE_SIZE_AND_PAD); + UpdateBBoxByResizePolicy(resize_param, img_width, img_height, &out_bbox); + EXPECT_NEAR(out_bbox.xmin(), (180 * 0.1 + 60) / 300, eps); + EXPECT_NEAR(out_bbox.ymin(), 0.3, eps); + EXPECT_NEAR(out_bbox.xmax(), (180 * 0.3 + 60) / 300, eps); + EXPECT_NEAR(out_bbox.ymax(), 0.6, eps); +} + +#ifdef USE_OPENCV +TEST_F(ImTransformsTest, TestApplyResize) { + cv::Mat in_img(60, 100, CV_8UC3); + cv::Mat out_img; + ResizeParameter resize_param; + resize_param.set_height(30); + resize_param.set_width(30); + + resize_param.set_resize_mode(ResizeParameter_Resize_mode_WARP); + out_img = ApplyResize(in_img, resize_param); + CHECK_EQ(out_img.cols, 30); + CHECK_EQ(out_img.rows, 30); + + resize_param.set_resize_mode(ResizeParameter_Resize_mode_FIT_SMALL_SIZE); + out_img = ApplyResize(in_img, resize_param); + CHECK_EQ(out_img.cols, 50); + CHECK_EQ(out_img.rows, 30); + + resize_param.set_resize_mode( + ResizeParameter_Resize_mode_FIT_LARGE_SIZE_AND_PAD); + out_img = ApplyResize(in_img, resize_param); + CHECK_EQ(out_img.cols, 30); + CHECK_EQ(out_img.rows, 30); +} +#endif // USE_OPENCV + +} // namespace caffe diff --git a/src/caffe/test/test_image_data_layer.cpp b/src/caffe/test/test_image_data_layer.cpp index ce5e0bc62d6..a6400e5dbcc 100644 --- a/src/caffe/test/test_image_data_layer.cpp +++ b/src/caffe/test/test_image_data_layer.cpp @@ -1,4 +1,42 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV +#include #include #include #include @@ -25,6 +63,7 @@ class ImageDataLayerTest : public MultiDeviceTest { : seed_(1701), blob_top_data_(new Blob()), blob_top_label_(new Blob()) {} + virtual void SetUp() { blob_top_vec_.push_back(blob_top_data_); blob_top_vec_.push_back(blob_top_label_); @@ -71,8 +110,47 @@ class ImageDataLayerTest : public MultiDeviceTest { TYPED_TEST_CASE(ImageDataLayerTest, TestDtypesAndDevices); + +template +static void write_blob_to_file(const std::string& file_name, + const Blob& blob) { + std::ofstream file(file_name.c_str(), std::ios::out | std::ios::binary); + if (file.fail()) { + ASSERT_FALSE(true); + return; + } + file.write(reinterpret_cast(&blob.shape()[0]), 4 * sizeof(int)); + ASSERT_FALSE(file.fail()); + file.write(reinterpret_cast(blob.cpu_data()), + blob.count() * sizeof(Dtype)); + ASSERT_FALSE(file.fail()); + file.close(); +} +template +static void read_blob_from_file(const std::string& file_name, + Blob& blob) { + std::ifstream file(file_name.c_str(), std::ifstream::binary); + if (file.fail()) { + ASSERT_FALSE(true); + return; + } + vector shape(4, 0); + file.read(reinterpret_cast(&shape[0]), 4 * sizeof(int)); + ASSERT_FALSE(file.fail()); + blob.Reshape(shape); + file.read(reinterpret_cast(blob.mutable_cpu_data()), + blob.count() * sizeof(Dtype)); + ASSERT_FALSE(file.fail()); +} + +// #define GENERATE_IDL_TEST_DATA + TYPED_TEST(ImageDataLayerTest, TestRead) { typedef typename TypeParam::Dtype Dtype; + std::string file_name = std::string(EXAMPLES_SOURCE_DIR)+ + std::string("test_blobs/ImageDataLayerTest_TestRead_")+ + std::string(typeid(Dtype).name())+std::string(".blob"); + LayerParameter param; ImageDataParameter* image_data_param = param.mutable_image_data_param(); image_data_param->set_batch_size(5); @@ -89,16 +167,28 @@ TYPED_TEST(ImageDataLayerTest, TestRead) { EXPECT_EQ(this->blob_top_label_->height(), 1); EXPECT_EQ(this->blob_top_label_->width(), 1); // Go through the data twice - for (int iter = 0; iter < 2; ++iter) { + for (int iter = 0; iter < 2; ++iter) { layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); } + +#ifdef GENERATE_IDL_TEST_DATA + write_blob_to_file(file_name, *this->blob_top_data_); +#endif + Blob tmp_blob; + read_blob_from_file(file_name, tmp_blob); + EXPECT_EQ(0, memcmp(this->blob_top_data_->cpu_data(), tmp_blob.cpu_data(), + sizeof(Dtype)*this->blob_top_data_->count())); } } TYPED_TEST(ImageDataLayerTest, TestResize) { typedef typename TypeParam::Dtype Dtype; + std::string file_name = std::string(EXAMPLES_SOURCE_DIR) + + std::string("test_blobs/ImageDataLayerTest_TestResize_") + +std::string(typeid(Dtype).name())+std::string(".blob"); + LayerParameter param; ImageDataParameter* image_data_param = param.mutable_image_data_param(); image_data_param->set_batch_size(5); @@ -122,6 +212,13 @@ TYPED_TEST(ImageDataLayerTest, TestResize) { for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, this->blob_top_label_->cpu_data()[i]); } +#ifdef GENERATE_IDL_TEST_DATA + write_blob_to_file(file_name, *this->blob_top_data_); +#endif + Blob tmp_blob; + read_blob_from_file(file_name, tmp_blob); + EXPECT_EQ(0, memcmp(this->blob_top_data_->cpu_data(), tmp_blob.cpu_data(), + sizeof(Dtype)*this->blob_top_data_->count())); } } @@ -154,6 +251,10 @@ TYPED_TEST(ImageDataLayerTest, TestReshape) { TYPED_TEST(ImageDataLayerTest, TestShuffle) { typedef typename TypeParam::Dtype Dtype; + std::string file_name = std::string(EXAMPLES_SOURCE_DIR) + + std::string("test_blobs/ImageDataLayerTest_TestShuffle_")+ + std::string(typeid(Dtype).name())+std::string(".blob"); + LayerParameter param; ImageDataParameter* image_data_param = param.mutable_image_data_param(); image_data_param->set_batch_size(5); @@ -183,6 +284,14 @@ TYPED_TEST(ImageDataLayerTest, TestShuffle) { } EXPECT_EQ(5, values_to_indices.size()); EXPECT_GT(5, num_in_order); + +#ifdef GENERATE_IDL_TEST_DATA + write_blob_to_file(file_name, *this->blob_top_data_); +#endif + Blob tmp_blob; + read_blob_from_file(file_name, tmp_blob); + EXPECT_EQ(0, memcmp(this->blob_top_data_->cpu_data(), tmp_blob.cpu_data(), + sizeof(Dtype)*this->blob_top_data_->count())); } } diff --git a/src/caffe/test/test_infogain_loss_layer.cpp b/src/caffe/test/test_infogain_loss_layer.cpp index a24ac683dc5..8bcca95a742 100644 --- a/src/caffe/test/test_infogain_loss_layer.cpp +++ b/src/caffe/test/test_infogain_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_inner_product_layer.cpp b/src/caffe/test/test_inner_product_layer.cpp index f1ec2333fae..278b1308bb7 100644 --- a/src/caffe/test/test_inner_product_layer.cpp +++ b/src/caffe/test/test_inner_product_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" @@ -33,7 +70,8 @@ class InnerProductLayerTest : public MultiDeviceTest { virtual ~InnerProductLayerTest() { delete blob_bottom_; delete blob_bottom_nobatch_; - delete blob_top_; + std::for_each(this->blob_top_vec_.begin(), this->blob_top_vec_.end(), + [](Blob* pPtr) {delete pPtr; }); } Blob* const blob_bottom_; Blob* const blob_bottom_nobatch_; @@ -168,6 +206,8 @@ TYPED_TEST(InnerProductLayerTest, TestForwardTranspose) { Blob* const top = new Blob(); top->ReshapeLike(*this->blob_top_); caffe_copy(count, this->blob_top_->cpu_data(), top->mutable_cpu_data()); + std::for_each(this->blob_top_vec_.begin(), this->blob_top_vec_.end(), + [](Blob* pPtr) {delete pPtr; }); this->blob_top_vec_.clear(); this->blob_top_vec_.push_back(new Blob()); inner_product_param->set_transpose(true); @@ -191,7 +231,7 @@ TYPED_TEST(InnerProductLayerTest, TestForwardTranspose) { caffe_copy(layer->blobs()[1]->count(), layer->blobs()[1]->cpu_data(), ip_t->blobs()[1]->mutable_cpu_data()); ip_t->Forward(this->blob_bottom_vec_, this->blob_top_vec_); - EXPECT_EQ(count, this->blob_top_->count()) + EXPECT_EQ(count, this->blob_top_vec_[0]->count()) << "Invalid count for top blob for IP with transpose."; Blob* const top_t = new Blob();\ top_t->ReshapeLike(*this->blob_top_vec_[0]); @@ -203,6 +243,8 @@ TYPED_TEST(InnerProductLayerTest, TestForwardTranspose) { for (int i = 0; i < count; ++i) { EXPECT_FLOAT_EQ(data[i], data_t[i]); } + delete top; + delete top_t; } else { LOG(ERROR) << "Skipping test due to old architecture."; } @@ -340,6 +382,8 @@ TYPED_TEST(InnerProductLayerTest, TestBackwardTranspose) { Blob* const bottom_diff = new Blob(); bottom_diff->CopyFrom(*this->blob_bottom_vec_[0], true, true); // repeat original top with tranposed ip + std::for_each(this->blob_top_vec_.begin(), this->blob_top_vec_.end(), + [](Blob* pPtr) {delete pPtr; }); this->blob_top_vec_.clear(); this->blob_top_vec_.push_back(new Blob()); inner_product_param->set_transpose(true); @@ -383,6 +427,10 @@ TYPED_TEST(InnerProductLayerTest, TestBackwardTranspose) { EXPECT_NE(Dtype(0.), data[i]); EXPECT_FLOAT_EQ(data[i], data_t[i]); } + delete bottom_diff; + delete diff; + delete w; + delete top; } else { LOG(ERROR) << "Skipping test due to old architecture."; } diff --git a/src/caffe/test/test_internal_thread.cpp b/src/caffe/test/test_internal_thread.cpp index 93f1cc541cd..f034e886444 100644 --- a/src/caffe/test/test_internal_thread.cpp +++ b/src/caffe/test/test_internal_thread.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include "glog/logging.h" #include "gtest/gtest.h" diff --git a/src/caffe/test/test_io.cpp b/src/caffe/test/test_io.cpp index c2c919e90dc..70413310adb 100644 --- a/src/caffe/test/test_io.cpp +++ b/src/caffe/test/test_io.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include #include diff --git a/src/caffe/test/test_layer_factory.cpp b/src/caffe/test/test_layer_factory.cpp index 7d5d39d8b91..edd22549f71 100644 --- a/src/caffe/test/test_layer_factory.cpp +++ b/src/caffe/test/test_layer_factory.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -30,7 +67,7 @@ TYPED_TEST(LayerFactoryTest, TestCreateLayer) { if (iter->first == "Python") { continue; } LayerParameter layer_param; // Data layers expect a DB - if (iter->first == "Data") { + if (iter->first == "Data" || iter->first == "AnnotatedData") { #ifdef USE_LEVELDB string tmp; MakeTempDir(&tmp); diff --git a/src/caffe/test/test_lrn_layer.cpp b/src/caffe/test/test_lrn_layer.cpp index 4c97b1ae07b..ec2587b3d06 100644 --- a/src/caffe/test/test_lrn_layer.cpp +++ b/src/caffe/test/test_lrn_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_lstm_layer.cpp b/src/caffe/test/test_lstm_layer.cpp index 51905baafac..d51e81cf2d2 100644 --- a/src/caffe/test/test_lstm_layer.cpp +++ b/src/caffe/test/test_lstm_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_math_functions.cpp b/src/caffe/test/test_math_functions.cpp index efc5a2784eb..2bd8a476c69 100644 --- a/src/caffe/test/test_math_functions.cpp +++ b/src/caffe/test/test_math_functions.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include // for uint32_t & uint64_t #include #include // for std::fabs diff --git a/src/caffe/test/test_maxpool_dropout_layers.cpp b/src/caffe/test/test_maxpool_dropout_layers.cpp index 4f0e20ac3a7..3d9b07a80fd 100644 --- a/src/caffe/test/test_maxpool_dropout_layers.cpp +++ b/src/caffe/test/test_maxpool_dropout_layers.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_memory_data_layer.cpp b/src/caffe/test/test_memory_data_layer.cpp index 7998bc18262..e446e6d12e2 100644 --- a/src/caffe/test/test_memory_data_layer.cpp +++ b/src/caffe/test/test_memory_data_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_OPENCV #include #endif // USE_OPENCV diff --git a/src/caffe/test/test_mkl_batch_norm_layer.cpp b/src/caffe/test/test_mkl_batch_norm_layer.cpp new file mode 100644 index 00000000000..11df2b3e9c5 --- /dev/null +++ b/src/caffe/test/test_mkl_batch_norm_layer.cpp @@ -0,0 +1,177 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKL2017_SUPPORTED) +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/batch_norm_layer.hpp" +#include "caffe/layers/mkl_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#define BATCH_SIZE 2 +#define INPUT_DATA_SIZE 3 + +namespace caffe { + + template + class MKLBatchNormLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + MKLBatchNormLayerTest() + : blob_bottom_(new Blob(5, 2, 3, 4)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLBatchNormLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + }; + + typedef ::testing::Types, + CPUDevice > TestDtypesCPU; + TYPED_TEST_CASE(MKLBatchNormLayerTest, TestDtypesCPU); + + TYPED_TEST(MKLBatchNormLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + + MKLBatchNormLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + // Test mean + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int j = 0; j < channels; ++j) { + Dtype sum = 0, var = 0; + for (int i = 0; i < num; ++i) { + for ( int k = 0; k < height; ++k ) { + for ( int l = 0; l < width; ++l ) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + sum += data; + var += data * data; + } + } + } + sum /= height * width * num; + var /= height * width * num; + + const Dtype kErrorBound = 0.001; + // expect zero mean + EXPECT_NEAR(0, sum, kErrorBound); + // expect unit variance + EXPECT_NEAR(1, var, kErrorBound); + } + } + + TYPED_TEST(MKLBatchNormLayerTest, TestForwardInplace) { + typedef typename TypeParam::Dtype Dtype; + Blob blob_inplace(5, 2, 3, 4); + vector*> blob_bottom_vec; + vector*> blob_top_vec; + LayerParameter layer_param; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(&blob_inplace); + blob_bottom_vec.push_back(&blob_inplace); + blob_top_vec.push_back(&blob_inplace); + + MKLBatchNormLayer layer(layer_param); + layer.SetUp(blob_bottom_vec, blob_top_vec); + layer.Forward(blob_bottom_vec, blob_top_vec); + + // Test mean + int num = blob_inplace.num(); + int channels = blob_inplace.channels(); + int height = blob_inplace.height(); + int width = blob_inplace.width(); + + for (int j = 0; j < channels; ++j) { + Dtype sum = 0, var = 0; + for (int i = 0; i < num; ++i) { + for ( int k = 0; k < height; ++k ) { + for ( int l = 0; l < width; ++l ) { + Dtype data = blob_inplace.data_at(i, j, k, l); + sum += data; + var += data * data; + } + } + } + sum /= height * width * num; + var /= height * width * num; + + const Dtype kErrorBound = 0.001; + // expect zero mean + EXPECT_NEAR(0, sum, kErrorBound); + // expect unit variance + EXPECT_NEAR(1, var, kErrorBound); + } + } + + TYPED_TEST(MKLBatchNormLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + + MKLBatchNormLayer layer(layer_param); + //Threshold for the test was changed from 1e-4 to 2e-4 + //due to FMA rounding error in MKL batch normalization. + GradientChecker checker(1e-2, 2e-4); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/test/test_mkl_concat_layer.cpp b/src/caffe/test/test_mkl_concat_layer.cpp new file mode 100644 index 00000000000..df15f7ec251 --- /dev/null +++ b/src/caffe/test/test_mkl_concat_layer.cpp @@ -0,0 +1,195 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKL2017_SUPPORTED) +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLConcatLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLConcatLayerTest() + : blob_bottom_0_(new Blob(2, 3, 6, 5)), + blob_bottom_1_(new Blob(2, 5, 6, 5)), + blob_bottom_2_(new Blob(2, 7, 6, 5)), + blob_top_(new Blob()) {} + virtual void SetUp() { + // fill the values + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_0_); + filler_param.set_value(2.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_1_); + filler_param.set_value(3.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_2_); + blob_bottom_vec_0_.push_back(blob_bottom_0_); + blob_bottom_vec_0_.push_back(blob_bottom_1_); + blob_bottom_vec_1_.push_back(blob_bottom_0_); + blob_bottom_vec_1_.push_back(blob_bottom_2_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~MKLConcatLayerTest() { + delete blob_bottom_0_; delete blob_bottom_1_; + delete blob_bottom_2_; delete blob_top_; + } + + Blob* const blob_bottom_0_; + Blob* const blob_bottom_1_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + vector*> blob_bottom_vec_0_, blob_bottom_vec_1_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types, + CPUDevice > TestDtypesCPU; +TYPED_TEST_CASE(MKLConcatLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLConcatLayerTest, TestSetupChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); +} + +TYPED_TEST(MKLConcatLayerTest, TestSetupChannelsNegativeIndexing) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLConcatLayer layer(layer_param); + // "channels" index is the third one from the end -- test negative indexing + // by setting axis to -3 and checking that we get the same results as above in + // TestSetupChannels. + layer_param.mutable_concat_param()->set_axis(-3); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); +} + +TYPED_TEST(MKLConcatLayerTest, TestForwardTrivial) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLConcatLayer layer(layer_param); + this->blob_bottom_vec_0_.resize(1); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_0_->count(); ++i) { + EXPECT_EQ(this->blob_bottom_0_->cpu_data()[i], + this->blob_top_->cpu_data()[i]); + } +} + +TYPED_TEST(MKLConcatLayerTest, TestForwardChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), + this->blob_bottom_vec_0_[0]->data_at(n, c, h, w)); + } + } + } + for (int c = 0; c < this->blob_bottom_1_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c + 3, h, w), + this->blob_bottom_vec_0_[1]->data_at(n, c, h, w)); + } + } + } + } +} + +TYPED_TEST(MKLConcatLayerTest, TestGradientTrivial) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + this->blob_bottom_vec_0_.resize(1); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_0_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLConcatLayerTest, TestGradientChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradient(&layer, this->blob_bottom_vec_0_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLConcatLayerTest, TestGradientChannelsBottomOneOnly) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradient(&layer, this->blob_bottom_vec_0_, + this->blob_top_vec_, 1); +} + +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/test/test_mkl_convolution_layer.cpp b/src/caffe/test/test_mkl_convolution_layer.cpp new file mode 100644 index 00000000000..42dd123e739 --- /dev/null +++ b/src/caffe/test/test_mkl_convolution_layer.cpp @@ -0,0 +1,897 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkl_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +// Reference convolution for checking results: +// accumulate through explicit loops over input, output, and filters. +template +void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out) { + const bool has_depth = (out->num_axes() == 5); + if (!has_depth) { CHECK_EQ(4, out->num_axes()); } + // Kernel size, stride, and pad + int kernel_h, kernel_w; + if (conv_param->has_kernel_h() || conv_param->has_kernel_w()) { + kernel_h = conv_param->kernel_h(); + kernel_w = conv_param->kernel_w(); + } else { + kernel_h = kernel_w = conv_param->kernel_size(0); + } + int pad_h, pad_w; + if (conv_param->has_pad_h() || conv_param->has_pad_w()) { + pad_h = conv_param->pad_h(); + pad_w = conv_param->pad_w(); + } else { + pad_h = pad_w = conv_param->pad_size() ? conv_param->pad(0) : 0; + } + int stride_h, stride_w; + if (conv_param->has_stride_h() || conv_param->has_stride_w()) { + stride_h = conv_param->stride_h(); + stride_w = conv_param->stride_w(); + } else { + stride_h = stride_w = conv_param->stride_size() ? conv_param->stride(0) : 1; + } + int dilation_h, dilation_w; + dilation_h = dilation_w = conv_param->dilation_size() ? + conv_param->dilation(0) : 1; + int kernel_d, pad_d, stride_d, dilation_d; + if (has_depth) { + kernel_d = kernel_h; + stride_d = stride_h; + pad_d = pad_h; + dilation_d = dilation_h; + } else { + kernel_d = stride_d = dilation_d = 1; + pad_d = 0; + } + // Groups + int groups = conv_param->group(); + int o_g = out->shape(1) / groups; + int k_g = in->shape(1) / groups; + int o_head, k_head; + // Convolution + vector weight_offset(4 + has_depth); + vector in_offset(4 + has_depth); + vector out_offset(4 + has_depth); + Dtype* out_data = out->mutable_cpu_data(); + for (int n = 0; n < out->shape(0); n++) { + for (int g = 0; g < groups; g++) { + o_head = o_g * g; + k_head = k_g * g; + for (int o = 0; o < o_g; o++) { + for (int k = 0; k < k_g; k++) { + for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int y = 0; y < out->shape(2 + has_depth); y++) { + for (int x = 0; x < out->shape(3 + has_depth); x++) { + for (int r = 0; r < kernel_d; r++) { + for (int p = 0; p < kernel_h; p++) { + for (int q = 0; q < kernel_w; q++) { + int in_z = z * stride_d - pad_d + r * dilation_d; + int in_y = y * stride_h - pad_h + p * dilation_h; + int in_x = x * stride_w - pad_w + q * dilation_w; + if (in_z >= 0 && in_z < (has_depth ? in->shape(2) : 1) + && in_y >= 0 && in_y < in->shape(2 + has_depth) + && in_x >= 0 && in_x < in->shape(3 + has_depth)) { + weight_offset[0] = o + o_head; + weight_offset[1] = k; + if (has_depth) { weight_offset[2] = r; } + weight_offset[2 + has_depth] = p; + weight_offset[3 + has_depth] = q; + in_offset[0] = n; + in_offset[1] = k + k_head; + if (has_depth) { in_offset[2] = in_z; } + in_offset[2 + has_depth] = in_y; + in_offset[3 + has_depth] = in_x; + out_offset[0] = n; + out_offset[1] = o + o_head; + if (has_depth) { out_offset[2] = z; } + out_offset[2 + has_depth] = y; + out_offset[3 + has_depth] = x; + out_data[out->offset(out_offset)] += + in->data_at(in_offset) + * weights[0]->data_at(weight_offset); + } + } + } + } + } + } + } + } + } + } + } + // Bias + if (conv_param->bias_term()) { + const Dtype* bias_data = weights[1]->cpu_data(); + for (int n = 0; n < out->shape(0); n++) { + for (int o = 0; o < out->shape(1); o++) { + for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int y = 0; y < out->shape(2 + has_depth); y++) { + for (int x = 0; x < out->shape(3 + has_depth); x++) { + out_offset[0] = n; + out_offset[1] = o; + if (has_depth) { out_offset[2] = z; } + out_offset[2 + has_depth] = y; + out_offset[3 + has_depth] = x; + out_data[out->offset(out_offset)] += bias_data[o]; + } + } + } + } + } + } + //relu + if (conv_param->relu()) { + for (int n = 0; n < out->shape(0); n++) { + for (int o = 0; o < out->shape(1); o++) { + for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int y = 0; y < out->shape(2 + has_depth); y++) { + for (int x = 0; x < out->shape(3 + has_depth); x++) { + out_offset[0] = n; + out_offset[1] = o; + if (has_depth) { out_offset[2] = z; } + out_offset[2 + has_depth] = y; + out_offset[3 + has_depth] = x; + if(out_data[out->offset(out_offset)] < 0) out_data[out->offset(out_offset)] = 0; + } + } + } + } + } + } +} + +template void caffe_conv(const Blob* in, + ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out); +template void caffe_conv(const Blob* in, + ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out); + +template +class MKLConvolutionLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLConvolutionLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 4)), + blob_bottom_2_(new Blob(2, 3, 6, 4)), + blob_top_(new Blob()), + blob_top_2_(new Blob()) {} + virtual void SetUp() { + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_bottom_2_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~MKLConvolutionLayerTest() { + delete blob_bottom_; + delete blob_bottom_2_; + delete blob_top_; + delete blob_top_2_; + } + + virtual Blob* MakeReferenceTop(Blob* top) { + this->ref_blob_top_.reset(new Blob()); + this->ref_blob_top_->ReshapeLike(*top); + return this->ref_blob_top_.get(); + } + + Blob* const blob_bottom_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + Blob* const blob_top_2_; + shared_ptr > ref_blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types, + CPUDevice > TestDtypesCPU; + +TYPED_TEST_CASE(MKLConvolutionLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLConvolutionLayerTest, TestSetupMKL) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 4); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new MKLConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 2); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 3); + EXPECT_EQ(this->blob_top_2_->height(), 2); + EXPECT_EQ(this->blob_top_2_->width(), 1); +} + +TYPED_TEST(MKLConvolutionLayerTest, TestSimpleConvolutionMKL) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + +#if 0 // TODO: improve conv so that it runs on all buffers in bottom vector + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +#endif +} + +#if 0 +TYPED_TEST(MKLConvolutionLayerTest, TestDilatedConvolutionMKL) { + typedef typename TypeParam::Dtype Dtype; + vector bottom_shape; + bottom_shape.push_back(2); + bottom_shape.push_back(3); + bottom_shape.push_back(8); + bottom_shape.push_back(7); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_dilation(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +#if 0 // TODO: improve conv so that it runs on all buffers in bottom vector + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +#endif +} +#endif + +#if 0 +TYPED_TEST(MKLConvolutionLayerTest, Test0DConvolutionMKL) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + const int kNumOutput = 3; + convolution_param->set_num_output(kNumOutput); + convolution_param->set_axis(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + vector top_shape = this->blob_bottom_->shape(); + top_shape[3] = kNumOutput; + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(top_shape, this->blob_top_->shape()); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + vector weight_offset(2); + const Blob* weight = layer->blobs()[0].get(); + const Blob* bias = layer->blobs()[1].get(); + const int num = this->blob_top_->count(3); + const int dim = this->blob_top_->shape(3); + const int bottom_dim = this->blob_bottom_->shape(3); + for (int n = 0; n < num; ++n) { + for (int d = 0; d < dim; ++d) { + weight_offset[0] = d; + Dtype value = bias->cpu_data()[d]; + for (int bottom_d = 0; bottom_d < bottom_dim; ++bottom_d) { + weight_offset[1] = bottom_d; + value += weight->data_at(weight_offset) * + this->blob_bottom_->cpu_data()[n * bottom_dim + bottom_d]; + } + EXPECT_NEAR(value, this->blob_top_->cpu_data()[n * dim + d], 1e-4); + } + } +} +#endif + +#if 0 +TYPED_TEST(MKLConvolutionLayerTest, TestSimple3DConvolution) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + vector bottom_shape(5); + bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); + bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); + bottom_shape[2] = 5; + bottom_shape[3] = this->blob_bottom_vec_[0]->shape(2); + bottom_shape[4] = this->blob_bottom_vec_[0]->shape(3); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + +#if 0 // TODO: improve conv so that it runs on all buffers in bottom vector + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +#endif +} +#endif + +#if 0 +TYPED_TEST(MKLConvolutionLayerTest, TestDilated3DConvolution) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + vector bottom_shape(5); + bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); + bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); + bottom_shape[2] = 6; + bottom_shape[3] = 7; + bottom_shape[4] = 8; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_dilation(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} +#endif + +TYPED_TEST(MKLConvolutionLayerTest, Test1x1Convolution) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(1); + convolution_param->add_stride(1); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(MKLConvolutionLayerTest, TestSimpleConvolutionGroup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +#if 0 +TYPED_TEST(MKLConvolutionLayerTest, TestSobelConvolution) { + // Test separable convolution by computing the Sobel operator + // as a single filter then comparing the result + // as the convolution of two rectangular filters. + typedef typename TypeParam::Dtype Dtype; + // Fill bottoms with identical Gaussian noise. + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new GaussianFiller(filler_param)); + filler->Fill(this->blob_bottom_); + this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); + // Compute Sobel G_x operator as 3 x 3 convolution. + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + shared_ptr > layer( + new MKLConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); + Dtype* weights = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 9; // 3 x 3 filter + weights[i + 0] = -1; + weights[i + 1] = 0; + weights[i + 2] = 1; + weights[i + 3] = -2; + weights[i + 4] = 0; + weights[i + 5] = 2; + weights[i + 6] = -1; + weights[i + 7] = 0; + weights[i + 8] = 1; + } + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. + // (1) the [1 2 1] column filter + vector*> sep_blob_bottom_vec; + vector*> sep_blob_top_vec; + shared_ptr > blob_sep(new Blob()); + sep_blob_bottom_vec.push_back(this->blob_bottom_2_); + sep_blob_top_vec.push_back(this->blob_top_2_); + convolution_param->clear_kernel_size(); + convolution_param->clear_stride(); + convolution_param->set_kernel_h(3); + convolution_param->set_kernel_w(1); + convolution_param->set_stride_h(2); + convolution_param->set_stride_w(1); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new MKLConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); + Dtype* weights_1 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 3 x 1 filter + weights_1[i + 0] = 1; + weights_1[i + 1] = 2; + weights_1[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // (2) the [-1 0 1] row filter + blob_sep->CopyFrom(*this->blob_top_2_, false, true); + sep_blob_bottom_vec.clear(); + sep_blob_bottom_vec.push_back(blob_sep.get()); + convolution_param->set_kernel_h(1); + convolution_param->set_kernel_w(3); + convolution_param->set_stride_h(1); + convolution_param->set_stride_w(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new MKLConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 1, 1, 3)); + Dtype* weights_2 = layer->blobs()[0]->mutable_cpu_data(); + weights_2[0] = -1; + weights_2[1] = 0; + weights_2[2] = 1; + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // Test equivalence of full and separable filters. + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype* sep_top_data = this->blob_top_2_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); + } +} +#endif + +#if 0 +TYPED_TEST(MKLConvolutionLayerTest, TestNDAgainst2D) { + typedef typename TypeParam::Dtype Dtype; + const int kernel_h = 11; + const int kernel_w = 13; + vector bottom_shape(4); + bottom_shape[0] = 15; + bottom_shape[1] = 18; + bottom_shape[2] = kernel_h * 2; + bottom_shape[3] = kernel_w * 2; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_num_output(12); + convolution_param->set_bias_term(false); + convolution_param->set_group(6); + convolution_param->set_kernel_h(kernel_h); + convolution_param->set_kernel_w(kernel_w); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + Blob weights; + Blob top_diff; + // Shape and fill weights and top_diff. + bool copy_diff; + bool reshape; + { + MKLConvolutionLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + top_diff.ReshapeLike(*this->blob_top_); + filler.Fill(&top_diff); + ASSERT_EQ(1, layer.blobs().size()); + copy_diff = false; reshape = true; + weights.CopyFrom(*layer.blobs()[0], copy_diff, reshape); + } + vector propagate_down(1, true); + Blob result_2d; + Blob backward_result_2d; + Blob backward_weight_result_2d; + // Test with 2D im2col + { + caffe_set(this->blob_top_->count(), Dtype(0), + this->blob_top_->mutable_cpu_data()); + caffe_set(this->blob_bottom_->count(), Dtype(0), + this->blob_bottom_->mutable_cpu_diff()); + caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); + // Do SetUp and Forward; save Forward result in result_2d. + convolution_param->set_force_nd_im2col(false); + MKLConvolutionLayer layer_2d(layer_param); + layer_2d.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(1, layer_2d.blobs().size()); + copy_diff = false; reshape = false; + layer_2d.blobs()[0]->CopyFrom(weights, copy_diff, reshape); + layer_2d.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + copy_diff = false; reshape = true; + result_2d.CopyFrom(*this->blob_top_, copy_diff, reshape); + // Copy pre-generated top diff into actual top diff; + // do Backward and save result in backward_result_2d. + ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); + caffe_copy(top_diff.count(), top_diff.cpu_data(), + this->blob_top_->mutable_cpu_diff()); + layer_2d.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + copy_diff = true; reshape = true; + backward_result_2d.CopyFrom(*this->blob_bottom_, copy_diff, reshape); + backward_weight_result_2d.CopyFrom(weights, copy_diff, reshape); + } + Blob result_nd; + Blob backward_result_nd; + Blob backward_weight_result_nd; + // Test with ND im2col + { + caffe_set(this->blob_top_->count(), Dtype(0), + this->blob_top_->mutable_cpu_data()); + caffe_set(this->blob_bottom_->count(), Dtype(0), + this->blob_bottom_->mutable_cpu_diff()); + caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); + // Do SetUp and Forward; save Forward result in result_nd. + convolution_param->set_force_nd_im2col(true); + MKLConvolutionLayer layer_nd(layer_param); + layer_nd.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(1, layer_nd.blobs().size()); + copy_diff = false; reshape = false; + layer_nd.blobs()[0]->CopyFrom(weights, copy_diff, reshape); + layer_nd.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + copy_diff = false; reshape = true; + result_nd.CopyFrom(*this->blob_top_, copy_diff, reshape); + // Copy pre-generated top diff into actual top diff; + // do Backward and save result in backward_result_nd. + ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); + caffe_copy(top_diff.count(), top_diff.cpu_data(), + this->blob_top_->mutable_cpu_diff()); + layer_nd.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + copy_diff = true; reshape = true; + backward_result_nd.CopyFrom(*this->blob_bottom_, copy_diff, reshape); + backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape); + } + ASSERT_EQ(result_nd.count(), result_2d.count()); + for (int i = 0; i < result_2d.count(); ++i) { + EXPECT_EQ(result_2d.cpu_data()[i], result_nd.cpu_data()[i]); + } + ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); + for (int i = 0; i < backward_result_2d.count(); ++i) { + EXPECT_EQ(backward_result_2d.cpu_diff()[i], + backward_result_nd.cpu_diff()[i]); + } + ASSERT_EQ(backward_weight_result_nd.count(), + backward_weight_result_2d.count()); + for (int i = 0; i < backward_weight_result_2d.count(); ++i) { + EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i], + backward_weight_result_nd.cpu_diff()[i]); + } +} +#endif + +TYPED_TEST(MKLConvolutionLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + +// TODO: improve conv so that it runs on all buffers in bottom vector + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +#if 0 +TYPED_TEST(MKLConvolutionLayerTest, TestDilatedGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + vector bottom_shape; + bottom_shape.push_back(2); + bottom_shape.push_back(3); + bottom_shape.push_back(5); + bottom_shape.push_back(6); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + } + convolution_param->add_kernel_size(3); + convolution_param->add_dilation(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif + +#if 0 +TYPED_TEST(MKLConvolutionLayerTest, TestGradient3D) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + vector bottom_shape(5); + bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); + bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); + bottom_shape[2] = 5; + bottom_shape[3] = this->blob_bottom_vec_[0]->shape(2); + bottom_shape[4] = this->blob_bottom_vec_[0]->shape(3); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif + +TYPED_TEST(MKLConvolutionLayerTest, Test1x1Gradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->add_kernel_size(1); + convolution_param->add_stride(1); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLConvolutionLayerTest, TestGradientGroup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(3); + convolution_param->set_group(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/test/test_mkl_deconvolution_layer.cpp b/src/caffe/test/test_mkl_deconvolution_layer.cpp new file mode 100644 index 00000000000..9ea1214d74c --- /dev/null +++ b/src/caffe/test/test_mkl_deconvolution_layer.cpp @@ -0,0 +1,419 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/layers/deconv_layer.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +// Since ConvolutionLayerTest checks the shared conv/deconv code in detail, +// we'll just do a simple forward test and a gradient check. +template +class MKLDeconvolutionLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLDeconvolutionLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 4)), + ref_blob_bottom_(new Blob(2, 3, 6, 4)), + blob_bottom_2_(new Blob(2, 3, 6, 4)), + blob_top_(new Blob()), + ref_blob_top_(new Blob()), + blob_top_2_(new Blob()) {} + virtual void SetUp() { + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->ref_blob_bottom_); + filler.Fill(this->blob_bottom_2_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + ref_blob_bottom_vec_.push_back(ref_blob_bottom_); + ref_blob_top_vec_.push_back(ref_blob_top_); + + } + + virtual ~MKLDeconvolutionLayerTest() { + delete blob_bottom_; + delete ref_blob_bottom_; + delete blob_bottom_2_; + delete blob_top_; + delete ref_blob_top_; + delete blob_top_2_; + } + + Blob* const blob_bottom_; + Blob* const ref_blob_bottom_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + Blob* const ref_blob_top_; + Blob* const blob_top_2_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + vector*> ref_blob_bottom_vec_; + vector*> ref_blob_top_vec_; + +}; + +TYPED_TEST_CASE(MKLDeconvolutionLayerTest, TestDtypesAndDevices); + +TYPED_TEST(MKLDeconvolutionLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new MKLDeconvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 13); + EXPECT_EQ(this->blob_top_->width(), 9); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 4); + EXPECT_EQ(this->blob_top_2_->height(), 13); + EXPECT_EQ(this->blob_top_2_->width(), 9); + // setting group should not change the shape + convolution_param->set_num_output(3); + convolution_param->set_group(3); + layer.reset(new MKLDeconvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 13); + EXPECT_EQ(this->blob_top_->width(), 9); + EXPECT_EQ(this->blob_top_2_->num(), 2); + EXPECT_EQ(this->blob_top_2_->channels(), 3); + EXPECT_EQ(this->blob_top_2_->height(), 13); + EXPECT_EQ(this->blob_top_2_->width(), 9); +} + +TYPED_TEST(MKLDeconvolutionLayerTest, TestSimpleMKLDeconvolution) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("constant"); + convolution_param->mutable_weight_filler()->set_value(1); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLDeconvolutionLayer(layer_param)); + shared_ptr > ref_layer( + new DeconvolutionLayer(layer_param)); + + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ref_layer->SetUp(this->ref_blob_bottom_vec_, this->ref_blob_top_vec_); + // constant-fill the bottom blobs + FillerParameter filler_param; + filler_param.set_value(1.); + + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->ref_blob_bottom_); + + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + ref_layer->Forward(this->ref_blob_bottom_vec_, this->ref_blob_top_vec_); + // simply check that accumulation works with overlapping filters + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype* ref_top_data = this->ref_blob_top_->cpu_data(); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_top_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + Dtype expected = 3.1; + bool h_overlap = h % 2 == 0 && h > 0 + && h < this->blob_top_->height() - 1; + bool w_overlap = w % 2 == 0 && w > 0 + && w < this->blob_top_->width() - 1; + if (h_overlap && w_overlap) { + expected += 9; + } else if (h_overlap || w_overlap) { + expected += 3; + } + + EXPECT_NEAR(top_data[this->blob_top_->offset(n, c, h, w)], + expected, 1e-4); + EXPECT_NEAR(ref_top_data[this->blob_top_->offset(n, c, h, w)], + expected, 1e-4); + } + } + } + } + + // set top_diff + Dtype* top_diff = this->blob_top_->mutable_cpu_diff(); + Dtype* ref_top_diff = this->ref_blob_top_->mutable_cpu_diff(); + for( int n = 0; n < this->blob_top_->num(); ++n) { + for( int c = 0; c < this->blob_top_->channels(); ++c) { + for( int h=0; h < this->blob_top_->height(); ++h) { + for(int w = 0; w < this->blob_top_->width(); ++w) { + top_diff[this->blob_top_->offset(n, c, h, w)] = ref_top_data[this->blob_top_->offset(n, c, h, w)]; + ref_top_diff[this->blob_top_->offset(n, c, h, w)] = ref_top_data[this->blob_top_->offset(n, c, h, w)]; + } + } + } + } + + + vector need_backward({true}); + layer->Backward(this->blob_top_vec_, need_backward, this->blob_bottom_vec_); + ref_layer->Backward(this->ref_blob_top_vec_, need_backward, this->ref_blob_bottom_vec_); + + // check backward data + const Dtype* bottom_diff = this->blob_bottom_->cpu_diff(); + const Dtype* ref_bottom_diff = this->ref_blob_bottom_->cpu_diff(); + for( int n = 0; n < this->blob_bottom_->num(); ++n) { + for( int c = 0; c < this->blob_bottom_->channels(); ++c) { + for( int h=0; h < this->blob_bottom_->height(); ++h) { + for(int w = 0; w < this->blob_bottom_->width(); ++w) { + EXPECT_NEAR(bottom_diff[this->blob_bottom_->offset(n, c, h, w)], + ref_bottom_diff[this->blob_bottom_->offset(n, c, h, w)], + 1e-4); + } + } + } + } + // check backward weights + for (int i = 0; i < layer->blobs().size(); ++i) { + Blob* blob = layer->blobs()[i].get(); + Blob* ref_blob = ref_layer->blobs()[i].get(); + const Dtype* weights_diff = blob->cpu_diff(); + const Dtype* ref_weights_diff = ref_blob->cpu_diff(); + for( int n = 0; n < blob->num(); ++n) { + for( int c = 0; c channels(); ++c) { + for( int h = 0; h < blob->height(); ++h) { + for( int w =0; w < blob->width(); ++w) { + //printf("%.4f ", weights_diff[blob->offset(n, c, h, w)] - ref_weights_diff[ref_blob->offset(n, c, h, w)]); + EXPECT_NEAR(weights_diff[blob->offset(n, c, h, w)], + ref_weights_diff[ref_blob->offset(n, c, h, w)], + 1e-3); + } + } + } + } + } +} +TYPED_TEST(MKLDeconvolutionLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->add_kernel_size(2); + convolution_param->add_stride(1); + convolution_param->set_num_output(1); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLDeconvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +TYPED_TEST(MKLDeconvolutionLayerTest, TestNDAgainst2D) { + typedef typename TypeParam::Dtype Dtype; + const int kernel_h = 11; + const int kernel_w = 13; + vector bottom_shape(4); + bottom_shape[0] = 15; + bottom_shape[1] = 12; + bottom_shape[2] = kernel_h * 2; + bottom_shape[3] = kernel_w * 2; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_num_output(18); + convolution_param->set_bias_term(false); + convolution_param->set_group(6); + convolution_param->set_kernel_h(kernel_h); + convolution_param->set_kernel_w(kernel_w); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + Blob weights; + Blob top_diff; + // Shape and fill weights and top_diff. + bool copy_diff; + bool reshape; + { + MKLDeconvolutionLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + top_diff.ReshapeLike(*this->blob_top_); + filler.Fill(&top_diff); + ASSERT_EQ(1, layer.blobs().size()); + copy_diff = false; reshape = true; + weights.CopyFrom(*layer.blobs()[0], copy_diff, reshape); + } + vector propagate_down(1, true); + Blob result_2d; + Blob backward_result_2d; + Blob backward_weight_result_2d; + // Test with 2D im2col + { + caffe_set(this->blob_top_->count(), Dtype(0), + this->blob_top_->mutable_cpu_data()); + caffe_set(this->blob_bottom_->count(), Dtype(0), + this->blob_bottom_->mutable_cpu_diff()); + caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); + // Do SetUp and Forward; save Forward result in result_2d. + convolution_param->set_force_nd_im2col(false); + MKLDeconvolutionLayer layer_2d(layer_param); + layer_2d.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(1, layer_2d.blobs().size()); + copy_diff = false; reshape = false; + layer_2d.blobs()[0]->CopyFrom(weights, copy_diff, reshape); + layer_2d.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + copy_diff = false; reshape = true; + result_2d.CopyFrom(*this->blob_top_, copy_diff, reshape); + // Copy pre-generated top diff into actual top diff; + // do Backward and save result in backward_result_2d. + ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); + caffe_copy(top_diff.count(), top_diff.cpu_data(), + this->blob_top_->mutable_cpu_diff()); + layer_2d.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + copy_diff = true; reshape = true; + backward_result_2d.CopyFrom(*this->blob_bottom_, copy_diff, reshape); + backward_weight_result_2d.CopyFrom(weights, copy_diff, reshape); + } + Blob result_nd; + Blob backward_result_nd; + Blob backward_weight_result_nd; + // Test with ND im2col + { + caffe_set(this->blob_top_->count(), Dtype(0), + this->blob_top_->mutable_cpu_data()); + caffe_set(this->blob_bottom_->count(), Dtype(0), + this->blob_bottom_->mutable_cpu_diff()); + caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); + // Do SetUp and Forward; save Forward result in result_nd. + convolution_param->set_force_nd_im2col(true); + MKLDeconvolutionLayer layer_nd(layer_param); + layer_nd.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(1, layer_nd.blobs().size()); + copy_diff = false; reshape = false; + layer_nd.blobs()[0]->CopyFrom(weights, copy_diff, reshape); + layer_nd.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + copy_diff = false; reshape = true; + result_nd.CopyFrom(*this->blob_top_, copy_diff, reshape); + // Copy pre-generated top diff into actual top diff; + // do Backward and save result in backward_result_nd. + ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); + caffe_copy(top_diff.count(), top_diff.cpu_data(), + this->blob_top_->mutable_cpu_diff()); + layer_nd.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + copy_diff = true; reshape = true; + backward_result_nd.CopyFrom(*this->blob_bottom_, copy_diff, reshape); + backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape); + } + ASSERT_EQ(result_nd.count(), result_2d.count()); + for (int i = 0; i < result_2d.count(); ++i) { + EXPECT_EQ(result_2d.cpu_data()[i], result_nd.cpu_data()[i]); + } + ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); + for (int i = 0; i < backward_result_2d.count(); ++i) { + EXPECT_EQ(backward_result_2d.cpu_diff()[i], + backward_result_nd.cpu_diff()[i]); + } + ASSERT_EQ(backward_weight_result_nd.count(), + backward_weight_result_2d.count()); + for (int i = 0; i < backward_weight_result_2d.count(); ++i) { + EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i], + backward_weight_result_nd.cpu_diff()[i]); + } +} + +#if 0 +TYPED_TEST(MKLDeconvolutionLayerTest, TestGradient3D) { + typedef typename TypeParam::Dtype Dtype; + vector bottom_shape(5); + bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); + bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); + bottom_shape[2] = 2; + bottom_shape[3] = 3; + bottom_shape[4] = 2; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(2); + convolution_param->add_stride(2); + convolution_param->add_pad(1); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLDeconvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif +} // namespace caffe + +#endif diff --git a/src/caffe/test/test_mkl_eltwise_layer.cpp b/src/caffe/test/test_mkl_eltwise_layer.cpp new file mode 100644 index 00000000000..55a82e670c9 --- /dev/null +++ b/src/caffe/test/test_mkl_eltwise_layer.cpp @@ -0,0 +1,252 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKL2017_SUPPORTED) +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLEltwiseLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLEltwiseLayerTest() + : blob_bottom_a_(new Blob(2, 3, 4, 5)), + blob_bottom_b_(new Blob(2, 3, 4, 5)), + blob_bottom_c_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + // fill the values + Caffe::set_random_seed(1701); + FillerParameter filler_param; + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_a_); + filler.Fill(this->blob_bottom_b_); + filler.Fill(this->blob_bottom_c_); + blob_bottom_vec_.push_back(blob_bottom_a_); + blob_bottom_vec_.push_back(blob_bottom_b_); + blob_bottom_vec_.push_back(blob_bottom_c_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLEltwiseLayerTest() { + delete blob_bottom_a_; + delete blob_bottom_b_; + delete blob_bottom_c_; + delete blob_top_; + } + Blob* const blob_bottom_a_; + Blob* const blob_bottom_b_; + Blob* const blob_bottom_c_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types, + CPUDevice > TestDtypesCPU; +TYPED_TEST_CASE(MKLEltwiseLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLEltwiseLayerTest, TestSetUp) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + shared_ptr > layer( + new MKLEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 5); +} + +/* +TYPED_TEST(MKLEltwiseLayerTest, TestProd) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + shared_ptr > layer( + new MKLEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i], 1e-4); + } +} +*/ + +TYPED_TEST(MKLEltwiseLayerTest, TestSum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + shared_ptr > layer( + new MKLEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i], 1e-4); + } +} + +/* +TYPED_TEST(MKLEltwiseLayerTest, TestSumCoeff) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + shared_ptr > layer( + new MKLEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i], + 1e-4); + } +} + +TYPED_TEST(MKLEltwiseLayerTest, TestStableProdGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + eltwise_param->set_stable_prod_grad(true); + MKLEltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLEltwiseLayerTest, TestUnstableProdGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + eltwise_param->set_stable_prod_grad(false); + MKLEltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +*/ +TYPED_TEST(MKLEltwiseLayerTest, TestSumGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + MKLEltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +/* +TYPED_TEST(MKLEltwiseLayerTest, TestSumCoeffGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + MKLEltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLEltwiseLayerTest, TestMax) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX); + shared_ptr > layer( + new MKLEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], + std::max(in_data_a[i], std::max(in_data_b[i], in_data_c[i]))); + } +} + +TYPED_TEST(MKLEltwiseLayerTest, TestMaxGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX); + MKLEltwiseLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +*/ +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/test/test_mkl_lrn_layer.cpp b/src/caffe/test/test_mkl_lrn_layer.cpp new file mode 100644 index 00000000000..23ba6af7850 --- /dev/null +++ b/src/caffe/test/test_mkl_lrn_layer.cpp @@ -0,0 +1,292 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkl_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +using std::min; +using std::max; + +namespace caffe { + +template +class MKLLRNLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLLRNLayerTest() + : epsilon_(Dtype(1e-5)), + blob_bottom_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 7, 3, 3); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLLRNLayerTest() { delete blob_bottom_; delete blob_top_; } + void ReferenceLRNForward(const Blob& blob_bottom, + const LayerParameter& layer_param, Blob* blob_top); + + Dtype epsilon_; + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +template +void MKLLRNLayerTest::ReferenceLRNForward( + const Blob& blob_bottom, const LayerParameter& layer_param, + Blob* blob_top) { + typedef typename TypeParam::Dtype Dtype; + blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(), + blob_bottom.height(), blob_bottom.width()); + Dtype* top_data = blob_top->mutable_cpu_data(); + LRNParameter lrn_param = layer_param.lrn_param(); + Dtype alpha = lrn_param.alpha(); + Dtype beta = lrn_param.beta(); + int size = lrn_param.local_size(); + switch (lrn_param.norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + for (int n = 0; n < blob_bottom.num(); ++n) { + for (int c = 0; c < blob_bottom.channels(); ++c) { + for (int h = 0; h < blob_bottom.height(); ++h) { + for (int w = 0; w < blob_bottom.width(); ++w) { + int c_start = c - (size - 1) / 2; + int c_end = min(c_start + size, blob_bottom.channels()); + c_start = max(c_start, 0); + Dtype scale = 1.; + for (int i = c_start; i < c_end; ++i) { + Dtype value = blob_bottom.data_at(n, i, h, w); + scale += value * value * alpha / size; + } + *(top_data + blob_top->offset(n, c, h, w)) = + blob_bottom.data_at(n, c, h, w) / pow(scale, beta); + } + } + } + } + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + for (int n = 0; n < blob_bottom.num(); ++n) { + for (int c = 0; c < blob_bottom.channels(); ++c) { + for (int h = 0; h < blob_bottom.height(); ++h) { + int h_start = h - (size - 1) / 2; + int h_end = min(h_start + size, blob_bottom.height()); + h_start = max(h_start, 0); + for (int w = 0; w < blob_bottom.width(); ++w) { + Dtype scale = 1.; + int w_start = w - (size - 1) / 2; + int w_end = min(w_start + size, blob_bottom.width()); + w_start = max(w_start, 0); + for (int nh = h_start; nh < h_end; ++nh) { + for (int nw = w_start; nw < w_end; ++nw) { + Dtype value = blob_bottom.data_at(n, c, nh, nw); + scale += value * value * alpha / (size * size); + } + } + *(top_data + blob_top->offset(n, c, h, w)) = + blob_bottom.data_at(n, c, h, w) / pow(scale, beta); + } + } + } + } + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +typedef ::testing::Types, + CPUDevice > TestDtypesCPU; +TYPED_TEST_CASE(MKLLRNLayerTest, TestDtypesCPU); + + +TYPED_TEST(MKLLRNLayerTest, TestSetupAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 7); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); +} +#if 0 +TYPED_TEST(MKLLRNLayerTest, TestForwardAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} +#endif +TYPED_TEST(MKLLRNLayerTest, TestForwardAcrossChannelsLargeRegion) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + MKLLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} +#if 0 +TYPED_TEST(MKLLRNLayerTest, TestGradientAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLLRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif +TYPED_TEST(MKLLRNLayerTest, TestGradientAcrossChannelsLargeRegion) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + MKLLRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + + +#if 0 +TYPED_TEST(MKLLRNLayerTest, TestSetupWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + MKLLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 7); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(MKLLRNLayerTest, TestForwardWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + MKLLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(MKLLRNLayerTest, TestGradientWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + MKLLRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/test/test_mkl_neuron_layers.cpp b/src/caffe/test/test_mkl_neuron_layers.cpp new file mode 100644 index 00000000000..edc7b73c174 --- /dev/null +++ b/src/caffe/test/test_mkl_neuron_layers.cpp @@ -0,0 +1,108 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" + +#include "caffe/layers/mkl_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLNeuronLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLNeuronLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLNeuronLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types, + CPUDevice > TestDtypesCPU; +TYPED_TEST_CASE(MKLNeuronLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLNeuronLayerTest, TestReLU) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); + } +} + +TYPED_TEST(MKLNeuronLayerTest, TestReLUGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/test/test_mkl_pooling_layer.cpp b/src/caffe/test/test_mkl_pooling_layer.cpp new file mode 100644 index 00000000000..3df1590f1c3 --- /dev/null +++ b/src/caffe/test/test_mkl_pooling_layer.cpp @@ -0,0 +1,655 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKL2017_SUPPORTED +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkl_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLPoolingLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLPoolingLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()), + blob_top_mask_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLPoolingLayerTest() { + delete blob_bottom_; + delete blob_top_; + delete blob_top_mask_; + } + Blob* const blob_bottom_; + Blob* const blob_top_; + Blob* const blob_top_mask_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + // Test for 2x 2 square pooling layer + void TestForwardSquare() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 3, 5); + // Input: 2x 2 channels of: + // [1 2 5 2 3] + // [9 4 1 4 8] + // [1 2 5 2 3] + for (int i = 0; i < 15 * num * channels; i += 15) { + blob_bottom_->mutable_cpu_data()[i + 0] = 1; + blob_bottom_->mutable_cpu_data()[i + 1] = 2; + blob_bottom_->mutable_cpu_data()[i + 2] = 5; + blob_bottom_->mutable_cpu_data()[i + 3] = 2; + blob_bottom_->mutable_cpu_data()[i + 4] = 3; + blob_bottom_->mutable_cpu_data()[i + 5] = 9; + blob_bottom_->mutable_cpu_data()[i + 6] = 4; + blob_bottom_->mutable_cpu_data()[i + 7] = 1; + blob_bottom_->mutable_cpu_data()[i + 8] = 4; + blob_bottom_->mutable_cpu_data()[i + 9] = 8; + blob_bottom_->mutable_cpu_data()[i + 10] = 1; + blob_bottom_->mutable_cpu_data()[i + 11] = 2; + blob_bottom_->mutable_cpu_data()[i + 12] = 5; + blob_bottom_->mutable_cpu_data()[i + 13] = 2; + blob_bottom_->mutable_cpu_data()[i + 14] = 3; + } + MKLPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 2); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 2); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [9 5 5 8] + // [9 5 5 8] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 8); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 8); + } + if (blob_top_vec_.size() > 1) { + // Expected mask output: 2x 2 channels of: + // [5 2 2 9] + // [5 12 12 9] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 5); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 2); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 2); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 9); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 5); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 9); + } + } + } + // Test for 3x 2 rectangular pooling layer with kernel_h > kernel_w + void TestForwardRectHigh() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(3); + pooling_param->set_kernel_w(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 6, 6); + // Input: 2x 2 channels of: + // [35 1 6 26 19 24] + // [ 3 32 7 21 23 25] + // [31 9 2 22 27 20] + // [ 8 28 33 17 10 15] + // [30 5 34 12 14 16] + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) + for (int i = 0; i < 36 * num * channels; i += 36) { + blob_bottom_->mutable_cpu_data()[i + 0] = 35; + blob_bottom_->mutable_cpu_data()[i + 1] = 1; + blob_bottom_->mutable_cpu_data()[i + 2] = 6; + blob_bottom_->mutable_cpu_data()[i + 3] = 26; + blob_bottom_->mutable_cpu_data()[i + 4] = 19; + blob_bottom_->mutable_cpu_data()[i + 5] = 24; + blob_bottom_->mutable_cpu_data()[i + 6] = 3; + blob_bottom_->mutable_cpu_data()[i + 7] = 32; + blob_bottom_->mutable_cpu_data()[i + 8] = 7; + blob_bottom_->mutable_cpu_data()[i + 9] = 21; + blob_bottom_->mutable_cpu_data()[i + 10] = 23; + blob_bottom_->mutable_cpu_data()[i + 11] = 25; + blob_bottom_->mutable_cpu_data()[i + 12] = 31; + blob_bottom_->mutable_cpu_data()[i + 13] = 9; + blob_bottom_->mutable_cpu_data()[i + 14] = 2; + blob_bottom_->mutable_cpu_data()[i + 15] = 22; + blob_bottom_->mutable_cpu_data()[i + 16] = 27; + blob_bottom_->mutable_cpu_data()[i + 17] = 20; + blob_bottom_->mutable_cpu_data()[i + 18] = 8; + blob_bottom_->mutable_cpu_data()[i + 19] = 28; + blob_bottom_->mutable_cpu_data()[i + 20] = 33; + blob_bottom_->mutable_cpu_data()[i + 21] = 17; + blob_bottom_->mutable_cpu_data()[i + 22] = 10; + blob_bottom_->mutable_cpu_data()[i + 23] = 15; + blob_bottom_->mutable_cpu_data()[i + 24] = 30; + blob_bottom_->mutable_cpu_data()[i + 25] = 5; + blob_bottom_->mutable_cpu_data()[i + 26] = 34; + blob_bottom_->mutable_cpu_data()[i + 27] = 12; + blob_bottom_->mutable_cpu_data()[i + 28] = 14; + blob_bottom_->mutable_cpu_data()[i + 29] = 16; + blob_bottom_->mutable_cpu_data()[i + 30] = 4; + blob_bottom_->mutable_cpu_data()[i + 31] = 36; + blob_bottom_->mutable_cpu_data()[i + 32] = 29; + blob_bottom_->mutable_cpu_data()[i + 33] = 13; + blob_bottom_->mutable_cpu_data()[i + 34] = 18; + blob_bottom_->mutable_cpu_data()[i + 35] = 11; + } + MKLPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 4); + EXPECT_EQ(blob_top_->width(), 5); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 4); + EXPECT_EQ(blob_top_mask_->width(), 5); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [35 32 26 27 27] + // [32 33 33 27 27] + // [31 34 34 27 27] + // [36 36 34 18 18] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 8], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 9], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 10], 31); + EXPECT_EQ(blob_top_->cpu_data()[i + 11], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 12], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 13], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 14], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 15], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 16], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 17], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 18], 18); + EXPECT_EQ(blob_top_->cpu_data()[i + 19], 18); + } + if (blob_top_vec_.size() > 1) { + // [ 1 8 4 17 17] + // [ 8 21 21 17 17] + // [13 27 27 17 17] + // [32 32 27 35 35] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 8], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 9], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 10], 12); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 11], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 12], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 13], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 14], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 15], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 16], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 17], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 18], 34); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 19], 34); + } + } + } + // Test for rectangular pooling layer with kernel_w > kernel_h + void TestForwardRectWide() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(2); + pooling_param->set_kernel_w(3); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 6, 6); + // Input: 2x 2 channels of: + // [35 1 6 26 19 24] + // [ 3 32 7 21 23 25] + // [31 9 2 22 27 20] + // [ 8 28 33 17 10 15] + // [30 5 34 12 14 16] + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) + for (int i = 0; i < 36 * num * channels; i += 36) { + blob_bottom_->mutable_cpu_data()[i + 0] = 35; + blob_bottom_->mutable_cpu_data()[i + 1] = 1; + blob_bottom_->mutable_cpu_data()[i + 2] = 6; + blob_bottom_->mutable_cpu_data()[i + 3] = 26; + blob_bottom_->mutable_cpu_data()[i + 4] = 19; + blob_bottom_->mutable_cpu_data()[i + 5] = 24; + blob_bottom_->mutable_cpu_data()[i + 6] = 3; + blob_bottom_->mutable_cpu_data()[i + 7] = 32; + blob_bottom_->mutable_cpu_data()[i + 8] = 7; + blob_bottom_->mutable_cpu_data()[i + 9] = 21; + blob_bottom_->mutable_cpu_data()[i + 10] = 23; + blob_bottom_->mutable_cpu_data()[i + 11] = 25; + blob_bottom_->mutable_cpu_data()[i + 12] = 31; + blob_bottom_->mutable_cpu_data()[i + 13] = 9; + blob_bottom_->mutable_cpu_data()[i + 14] = 2; + blob_bottom_->mutable_cpu_data()[i + 15] = 22; + blob_bottom_->mutable_cpu_data()[i + 16] = 27; + blob_bottom_->mutable_cpu_data()[i + 17] = 20; + blob_bottom_->mutable_cpu_data()[i + 18] = 8; + blob_bottom_->mutable_cpu_data()[i + 19] = 28; + blob_bottom_->mutable_cpu_data()[i + 20] = 33; + blob_bottom_->mutable_cpu_data()[i + 21] = 17; + blob_bottom_->mutable_cpu_data()[i + 22] = 10; + blob_bottom_->mutable_cpu_data()[i + 23] = 15; + blob_bottom_->mutable_cpu_data()[i + 24] = 30; + blob_bottom_->mutable_cpu_data()[i + 25] = 5; + blob_bottom_->mutable_cpu_data()[i + 26] = 34; + blob_bottom_->mutable_cpu_data()[i + 27] = 12; + blob_bottom_->mutable_cpu_data()[i + 28] = 14; + blob_bottom_->mutable_cpu_data()[i + 29] = 16; + blob_bottom_->mutable_cpu_data()[i + 30] = 4; + blob_bottom_->mutable_cpu_data()[i + 31] = 36; + blob_bottom_->mutable_cpu_data()[i + 32] = 29; + blob_bottom_->mutable_cpu_data()[i + 33] = 13; + blob_bottom_->mutable_cpu_data()[i + 34] = 18; + blob_bottom_->mutable_cpu_data()[i + 35] = 11; + } + MKLPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 5); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 5); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [35 32 26 26] + // [32 32 27 27] + // [33 33 33 27] + // [34 34 34 17] + // [36 36 34 18] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 8], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 9], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 10], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 11], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 12], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 13], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 14], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 15], 17); + EXPECT_EQ(blob_top_->cpu_data()[i + 16], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 17], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 18], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 19], 18); + } + if (blob_top_vec_.size() > 1) { + // [ 1 8 4 4] + // [ 8 8 17 17] + // [21 21 21 17] + // [27 27 27 22] + // [32 32 27 35] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 0], 0); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 1], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 2], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 3], 3); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 4], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 5], 7); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 6], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 7], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 8], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 9], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 10], 20); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 11], 16); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 12], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 13], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 14], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 15], 21); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 16], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 17], 31); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 18], 26); + EXPECT_EQ(blob_top_mask_->cpu_data()[i + 19], 34); + } + } + } +}; + +typedef ::testing::Types, + CPUDevice > TestDtypesCPU; +TYPED_TEST_CASE(MKLPoolingLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLPoolingLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + MKLPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(MKLPoolingLayerTest, TestSetupPadded) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + MKLPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(MKLPoolingLayerTest, TestSetupGlobalPooling) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_global_pooling(true); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + MKLPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +/* +TYPED_TEST(MKLPoolingLayerTest, PrintBackward) { + LayerParameter layer_param; + layer_param.set_kernelsize(3); + layer_param.set_stride(2); + layer_param.set_pool(LayerParameter_PoolMethod_MAX); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; + } + for (int i = 0; i < this->blob_top_->count(); ++i) { + cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl; + } + + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = i; + } + layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; + } +} +*/ + +TYPED_TEST(MKLPoolingLayerTest, TestForwardMax) { + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); +} + +TYPED_TEST(MKLPoolingLayerTest, TestForwardMaxTopMask) { + typedef typename TypeParam::Dtype Dtype; + this->blob_top_vec_.push_back(reinterpret_cast* > + (this->blob_top_mask_)); + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); +} + +TYPED_TEST(MKLPoolingLayerTest, TestGradientMax) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + MKLPoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +TYPED_TEST(MKLPoolingLayerTest, TestForwardMaxPadded) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_bottom_->Reshape(1, 1, 3, 3); + // Input: + // [ 1 2 4 ] + // [ 2 3 2 ] + // [ 4 2 1 ] + this->blob_bottom_->mutable_cpu_data()[0] = 1; + this->blob_bottom_->mutable_cpu_data()[1] = 2; + this->blob_bottom_->mutable_cpu_data()[2] = 4; + this->blob_bottom_->mutable_cpu_data()[3] = 2; + this->blob_bottom_->mutable_cpu_data()[4] = 3; + this->blob_bottom_->mutable_cpu_data()[5] = 2; + this->blob_bottom_->mutable_cpu_data()[6] = 4; + this->blob_bottom_->mutable_cpu_data()[7] = 2; + this->blob_bottom_->mutable_cpu_data()[8] = 1; + MKLPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Dtype epsilon = 1e-8; + // Output: + // [ 1 4 4 ] + // [ 4 4 4 ] + // [ 4 4 1 ] + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); +} + +#if 0 +TYPED_TEST(MKLPoolingLayerTest, TestGradientMaxTopMask) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_top_vec_.push_back(reinterpret_cast* > + (this->blob_top_mask_)); + MKLPoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + this->blob_top_vec_.pop_back(); + } + } +} +#endif + +#if 0 // Average Pooling +TYPED_TEST(MKLPoolingLayerTest, TestForwardAve) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(1); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + this->blob_bottom_->Reshape(1, 1, 3, 3); + FillerParameter filler_param; + filler_param.set_value(Dtype(2)); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + MKLPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Dtype epsilon = 1e-5; + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 2.0 , epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4.0 / 3, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon); +} + +TYPED_TEST(MKLPoolingLayerTest, TestGradientAve) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + MKLPoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +TYPED_TEST(MKLPoolingLayerTest, TestGradientAvePadded) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + MKLPoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} +#endif +} // namespace caffe +#endif // #ifdef MKL2017_SUPPORTED diff --git a/src/caffe/test/test_mkl_split_layer.cpp b/src/caffe/test/test_mkl_split_layer.cpp new file mode 100644 index 00000000000..0dd6a5c5411 --- /dev/null +++ b/src/caffe/test/test_mkl_split_layer.cpp @@ -0,0 +1,1024 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKL2017_SUPPORTED) +#include +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkl_layers.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/insert_splits.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLSplitLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLSplitLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_a_(new Blob()), + blob_top_b_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_a_); + blob_top_vec_.push_back(blob_top_b_); + } + virtual ~MKLSplitLayerTest() { + delete blob_bottom_; + delete blob_top_a_; + delete blob_top_b_; + } + Blob* const blob_bottom_; + Blob* const blob_top_a_; + Blob* const blob_top_b_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types, + CPUDevice > TestDtypesCPU; +TYPED_TEST_CASE(MKLSplitLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLSplitLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLSplitLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 2); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 6); + EXPECT_EQ(this->blob_top_a_->width(), 5); + EXPECT_EQ(this->blob_top_b_->num(), 2); + EXPECT_EQ(this->blob_top_b_->channels(), 3); + EXPECT_EQ(this->blob_top_b_->height(), 6); + EXPECT_EQ(this->blob_top_b_->width(), 5); +} + +TYPED_TEST(MKLSplitLayerTest, Test) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLSplitLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + Dtype bottom_value = this->blob_bottom_->cpu_data()[i]; + EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]); + EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]); + } +} + +TYPED_TEST(MKLSplitLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLSplitLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + + +class MKLSplitLayerInsertionTest : public ::testing::Test { + protected: + void RunInsertionTest( + const string& input_param_string, const string& output_param_string) { + // Test that InsertSplits called on the proto specified by + // input_param_string results in the proto specified by + // output_param_string. + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_output_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + output_param_string, &expected_output_param)); + NetParameter actual_output_param; + InsertSplits(input_param, &actual_output_param); + EXPECT_EQ(expected_output_param.DebugString(), + actual_output_param.DebugString()); + // Also test idempotence. + NetParameter double_split_insert_param; + InsertSplits(actual_output_param, &double_split_insert_param); + EXPECT_EQ(actual_output_param.DebugString(), + double_split_insert_param.DebugString()); + } +}; + +TEST_F(MKLSplitLayerInsertionTest, TestNoInsertion1) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(MKLSplitLayerInsertionTest, TestNoInsertion2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'data_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_split_0' " + " top: 'data_split_1' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'data_split_1' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(MKLSplitLayerInsertionTest, TestNoInsertionImageNet) { + const string& input_proto = + "name: 'CaffeNet' " + "layer { " + " name: 'data' " + " type: 'Data' " + " data_param { " + " source: '/home/jiayq/Data/ILSVRC12/train-leveldb' " + " batch_size: 256 " + " } " + " transform_param { " + " crop_size: 227 " + " mirror: true " + " mean_file: '/home/jiayq/Data/ILSVRC12/image_mean.binaryproto' " + " } " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'conv1' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 96 " + " kernel_size: 11 " + " stride: 4 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'data' " + " top: 'conv1' " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'conv1' " + " top: 'conv1' " + "} " + "layer { " + " name: 'pool1' " + " type: 'Pooling' " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv1' " + " top: 'pool1' " + "} " + "layer { " + " name: 'norm1' " + " type: 'LRN' " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool1' " + " top: 'norm1' " + "} " + "layer { " + " name: 'conv2' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 5 " + " pad: 2 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'norm1' " + " top: 'conv2' " + "} " + "layer { " + " name: 'relu2' " + " type: 'ReLU' " + " bottom: 'conv2' " + " top: 'conv2' " + "} " + "layer { " + " name: 'pool2' " + " type: 'Pooling' " + " pooling_param { " + " pool: MAX " + " kernel_size: 3 " + " stride: 2 " + " } " + " bottom: 'conv2' " + " top: 'pool2' " + "} " + "layer { " + " name: 'norm2' " + " type: 'LRN' " + " lrn_param { " + " local_size: 5 " + " alpha: 0.0001 " + " beta: 0.75 " + " } " + " bottom: 'pool2' " + " top: 'norm2' " + "} " + "layer { " + " name: 'conv3' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 384 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'norm2' " + " top: 'conv3' " + "} " + "layer { " + " name: 'relu3' " + " type: 'ReLU' " + " bottom: 'conv3' " + " top: 'conv3' " + "} " + "layer { " + " name: 'conv4' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 384 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'conv3' " + " top: 'conv4' " + "} " + "layer { " + " name: 'relu4' " + " type: 'ReLU' " + " bottom: 'conv4' " + " top: 'conv4' " + "} " + "layer { " + " name: 'conv5' " + " type: 'Convolution' " + " convolution_param { " + " num_output: 256 " + " group: 2 " + " kernel_size: 3 " + " pad: 1 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'conv4' " + " top: 'conv5' " + "} " + "layer { " + " name: 'relu5' " + " type: 'ReLU' " + " bottom: 'conv5' " + " top: 'conv5' " + "} " + "layer { " + " name: 'pool5' " + " type: 'Pooling' " + " pooling_param { " + " kernel_size: 3 " + " pool: MAX " + " stride: 2 " + " } " + " bottom: 'conv5' " + " top: 'pool5' " + "} " + "layer { " + " name: 'fc6' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'pool5' " + " top: 'fc6' " + "} " + "layer { " + " name: 'relu6' " + " type: 'ReLU' " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layer { " + " name: 'drop6' " + " type: 'Dropout' " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc6' " + " top: 'fc6' " + "} " + "layer { " + " name: 'fc7' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 4096 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.005 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 1. " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'fc6' " + " top: 'fc7' " + "} " + "layer { " + " name: 'relu7' " + " type: 'ReLU' " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layer { " + " name: 'drop7' " + " type: 'Dropout' " + " dropout_param { " + " dropout_ratio: 0.5 " + " } " + " bottom: 'fc7' " + " top: 'fc7' " + "} " + "layer { " + " name: 'fc8' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 1000 " + " weight_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " bias_filler { " + " type: 'constant' " + " value: 0 " + " } " + " } " + " param { " + " lr_mult: 1 " + " decay_mult: 1 " + " } " + " param { " + " lr_mult: 2 " + " decay_mult: 0 " + " } " + " bottom: 'fc7' " + " top: 'fc8' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'fc8' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(MKLSplitLayerInsertionTest, TestNoInsertionWithInPlace) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'relu' " + " type: 'ReLU' " + " bottom: 'innerprod' " + " top: 'innerprod' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'innerprod' " + " bottom: 'label' " + "} "; + this->RunInsertionTest(input_proto, input_proto); +} + +TEST_F(MKLSplitLayerInsertionTest, TestLossInsertion) { + const string& input_proto = + "name: 'UnsharedWeightsNetwork' " + "force_backward: true " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'unsharedweights1' } " + " bottom: 'data' " + " top: 'innerproduct1' " + " loss_weight: 2.5 " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'unsharedweights2' } " + " bottom: 'data' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct1' " + " bottom: 'innerproduct2' " + "} "; + const string& expected_output_proto = + "name: 'UnsharedWeightsNetwork' " + "force_backward: true " + "layer { " + " name: 'data' " + " type: 'DummyData' " + " dummy_data_param { " + " num: 5 " + " channels: 2 " + " height: 3 " + " width: 4 " + " data_filler { " + " type: 'gaussian' " + " std: 0.01 " + " } " + " } " + " top: 'data' " + "} " + "layer { " + " name: 'data_data_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_data_0_split_0' " + " top: 'data_data_0_split_1' " + "} " + "layer { " + " name: 'innerproduct1' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'unsharedweights1' } " + " bottom: 'data_data_0_split_0' " + " top: 'innerproduct1' " + "} " + "layer { " + " name: 'innerproduct1_innerproduct1_0_split' " + " type: 'Split' " + " bottom: 'innerproduct1' " + " top: 'innerproduct1_innerproduct1_0_split_0' " + " top: 'innerproduct1_innerproduct1_0_split_1' " + " loss_weight: 2.5 " + " loss_weight: 0 " + "} " + "layer { " + " name: 'innerproduct2' " + " type: 'InnerProduct' " + " inner_product_param { " + " num_output: 10 " + " bias_term: false " + " weight_filler { " + " type: 'gaussian' " + " std: 10 " + " } " + " } " + " param { name: 'unsharedweights2' } " + " bottom: 'data_data_0_split_1' " + " top: 'innerproduct2' " + "} " + "layer { " + " name: 'loss' " + " type: 'EuclideanLoss' " + " bottom: 'innerproduct1_innerproduct1_0_split_1' " + " bottom: 'innerproduct2' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(MKLSplitLayerInsertionTest, TestInsertion) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'innerprod3' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod3' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'innerprod3' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'data_data_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_data_0_split_0' " + " top: 'data_data_0_split_1' " + " top: 'data_data_0_split_2' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_1' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'innerprod2_innerprod2_0_split' " + " type: 'Split' " + " bottom: 'innerprod2' " + " top: 'innerprod2_innerprod2_0_split_0' " + " top: 'innerprod2_innerprod2_0_split_1' " + "} " + "layer { " + " name: 'innerprod3' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_2' " + " top: 'innerprod3' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod2_innerprod2_0_split_0' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2_innerprod2_0_split_1' " + " bottom: 'innerprod3' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(MKLSplitLayerInsertionTest, TestInsertionTwoTop) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'label' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'innerprod3' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod3' " + "} " + "layer { " + " name: 'innerprod4' " + " type: 'InnerProduct' " + " bottom: 'label' " + " top: 'innerprod4' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod3' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'innerprod4' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'data_data_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_data_0_split_0' " + " top: 'data_data_0_split_1' " + "} " + "layer { " + " name: 'label_data_1_split' " + " type: 'Split' " + " bottom: 'label' " + " top: 'label_data_1_split_0' " + " top: 'label_data_1_split_1' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'label_data_1_split_0' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'innerprod3' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_1' " + " top: 'innerprod3' " + "} " + "layer { " + " name: 'innerprod4' " + " type: 'InnerProduct' " + " bottom: 'label_data_1_split_1' " + " top: 'innerprod4' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'innerprod3' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'innerprod4' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +TEST_F(MKLSplitLayerInsertionTest, TestWithInPlace) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'innerprod1' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'innerprod1' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1' " + " bottom: 'label' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'data' " + "} "; + const string& expected_output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " name: 'data_data_0_split' " + " type: 'Split' " + " bottom: 'data' " + " top: 'data_data_0_split_0' " + " top: 'data_data_0_split_1' " + "} " + "layer { " + " name: 'innerprod1' " + " type: 'InnerProduct' " + " bottom: 'data_data_0_split_0' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'relu1' " + " type: 'ReLU' " + " bottom: 'innerprod1' " + " top: 'innerprod1' " + "} " + "layer { " + " name: 'innerprod1_relu1_0_split' " + " type: 'Split' " + " bottom: 'innerprod1' " + " top: 'innerprod1_relu1_0_split_0' " + " top: 'innerprod1_relu1_0_split_1' " + "} " + "layer { " + " name: 'innerprod2' " + " type: 'InnerProduct' " + " bottom: 'innerprod1_relu1_0_split_0' " + " top: 'innerprod2' " + "} " + "layer { " + " name: 'loss1' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod1_relu1_0_split_1' " + " bottom: 'label' " + "} " + "layer { " + " name: 'loss2' " + " type: 'EuclideanLoss' " + " bottom: 'innerprod2' " + " bottom: 'data_data_0_split_1' " + "} "; + this->RunInsertionTest(input_proto, expected_output_proto); +} + +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/test/test_mkldnn_batch_norm_layer.cpp b/src/caffe/test/test_mkldnn_batch_norm_layer.cpp new file mode 100644 index 00000000000..5caf1d9bd4d --- /dev/null +++ b/src/caffe/test/test_mkldnn_batch_norm_layer.cpp @@ -0,0 +1,180 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKLDNN_SUPPORTED) +#include +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/batch_norm_layer.hpp" +#include "caffe/layers/mkldnn_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +#define MB 2 +#define IC 4 +#define IH 5 +#define IW 5 +#define LS 3 + + +namespace caffe { + + template + class MKLDNNBatchNormLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + MKLDNNBatchNormLayerTest() + : blob_bottom_(new Blob(MB, IC, IH, IW)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLDNNBatchNormLayerTest() {delete blob_bottom_; delete blob_top_;} + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + }; + + typedef ::testing::Types/*, + CPUDevice*/ > TestDtypesCPU; + TYPED_TEST_CASE(MKLDNNBatchNormLayerTest, TestDtypesCPU); + + TYPED_TEST(MKLDNNBatchNormLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + + MKLDNNBatchNormLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + // Test mean + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int j = 0; j < channels; ++j) { + Dtype sum = 0, var = 0; + for (int i = 0; i < num; ++i) { + for ( int k = 0; k < height; ++k ) { + for ( int l = 0; l < width; ++l ) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + sum += data; + var += data * data; + } + } + } + sum /= height * width * num; + var /= height * width * num; + + const Dtype kErrorBound = 0.001; + // expect zero mean + EXPECT_NEAR(0, sum, kErrorBound); + // expect unit variance + EXPECT_NEAR(1, var, kErrorBound); + } + } + + TYPED_TEST(MKLDNNBatchNormLayerTest, TestForwardInplace) { + typedef typename TypeParam::Dtype Dtype; + Blob blob_inplace(MB, IC, IH, IW); + vector*> blob_bottom_vec; + vector*> blob_top_vec; + LayerParameter layer_param; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(&blob_inplace); + blob_bottom_vec.push_back(&blob_inplace); + blob_top_vec.push_back(&blob_inplace); + + MKLDNNBatchNormLayer layer(layer_param); + layer.SetUp(blob_bottom_vec, blob_top_vec); + layer.Forward(blob_bottom_vec, blob_top_vec); + + // Test mean + int num = blob_inplace.num(); + int channels = blob_inplace.channels(); + int height = blob_inplace.height(); + int width = blob_inplace.width(); + + for (int j = 0; j < channels; ++j) { + Dtype sum = 0, var = 0; + for (int i = 0; i < num; ++i) { + for ( int k = 0; k < height; ++k ) { + for ( int l = 0; l < width; ++l ) { + Dtype data = blob_inplace.data_at(i, j, k, l); + sum += data; + var += data * data; + } + } + } + sum /= height * width * num; + var /= height * width * num; + + const Dtype kErrorBound = 0.001; + // expect zero mean + EXPECT_NEAR(0, sum, kErrorBound); + // expect unit variance + EXPECT_NEAR(1, var, kErrorBound); + } + } + + TYPED_TEST(MKLDNNBatchNormLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + + MKLDNNBatchNormLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + + +} // namespace caffe +#endif // #if defined(MKLDNN_SUPPORTED) diff --git a/src/caffe/test/test_mkldnn_concat_layer.cpp b/src/caffe/test/test_mkldnn_concat_layer.cpp new file mode 100644 index 00000000000..64af0207ee6 --- /dev/null +++ b/src/caffe/test/test_mkldnn_concat_layer.cpp @@ -0,0 +1,194 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKLDNN_SUPPORTED) +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkldnn_layers.hpp" +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLDNNConcatLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLDNNConcatLayerTest() + : blob_bottom_0_(new Blob(2, 3, 6, 5)), + blob_bottom_1_(new Blob(2, 5, 6, 5)), + blob_bottom_2_(new Blob(2, 7, 6, 5)), + blob_top_(new Blob()) {} + virtual void SetUp() { + // fill the values + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_0_); + filler_param.set_value(2.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_1_); + filler_param.set_value(3.); + filler.reset(new ConstantFiller(filler_param)); + filler->Fill(this->blob_bottom_2_); + blob_bottom_vec_0_.push_back(blob_bottom_0_); + blob_bottom_vec_0_.push_back(blob_bottom_1_); + blob_bottom_vec_1_.push_back(blob_bottom_0_); + blob_bottom_vec_1_.push_back(blob_bottom_2_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~MKLDNNConcatLayerTest() { + delete blob_bottom_0_; delete blob_bottom_1_; + delete blob_bottom_2_; delete blob_top_; + } + + Blob* const blob_bottom_0_; + Blob* const blob_bottom_1_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + vector*> blob_bottom_vec_0_, blob_bottom_vec_1_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types > TestDtypesCPU; +TYPED_TEST_CASE(MKLDNNConcatLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLDNNConcatLayerTest, TestSetupChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); +} + +TYPED_TEST(MKLDNNConcatLayerTest, TestSetupChannelsNegativeIndexing) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNConcatLayer layer(layer_param); + // "channels" index is the third one from the end -- test negative indexing + // by setting axis to -3 and checking that we get the same results as above in + // TestSetupChannels. + layer_param.mutable_concat_param()->set_axis(-3); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_0_->num()); + EXPECT_EQ(this->blob_top_->channels(), + this->blob_bottom_0_->channels() + this->blob_bottom_1_->channels()); + EXPECT_EQ(this->blob_top_->height(), this->blob_bottom_0_->height()); + EXPECT_EQ(this->blob_top_->width(), this->blob_bottom_0_->width()); +} + +TYPED_TEST(MKLDNNConcatLayerTest, TestForwardTrivial) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNConcatLayer layer(layer_param); + this->blob_bottom_vec_0_.resize(1); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_0_->count(); ++i) { + EXPECT_EQ(this->blob_bottom_0_->cpu_data()[i], + this->blob_top_->cpu_data()[i]); + } +} + +TYPED_TEST(MKLDNNConcatLayerTest, TestForwardChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNConcatLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_0_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_0_, this->blob_top_vec_); + for (int n = 0; n < this->blob_top_->num(); ++n) { + for (int c = 0; c < this->blob_bottom_0_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c, h, w), + this->blob_bottom_vec_0_[0]->data_at(n, c, h, w)); + } + } + } + for (int c = 0; c < this->blob_bottom_1_->channels(); ++c) { + for (int h = 0; h < this->blob_top_->height(); ++h) { + for (int w = 0; w < this->blob_top_->width(); ++w) { + EXPECT_EQ(this->blob_top_->data_at(n, c + 3, h, w), + this->blob_bottom_vec_0_[1]->data_at(n, c, h, w)); + } + } + } + } +} + +TYPED_TEST(MKLDNNConcatLayerTest, TestGradientTrivial) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + this->blob_bottom_vec_0_.resize(1); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_0_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNConcatLayerTest, TestGradientChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradient(&layer, this->blob_bottom_vec_0_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNConcatLayerTest, TestGradientChannelsBottomOneOnly) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNConcatLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradient(&layer, this->blob_bottom_vec_0_, + this->blob_top_vec_, 1); +} + +} // namespace caffe +#endif // #if defined(MKL2017_SUPPORTED) diff --git a/src/caffe/test/test_mkldnn_convolution_layer.cpp b/src/caffe/test/test_mkldnn_convolution_layer.cpp new file mode 100644 index 00000000000..551ab7bd270 --- /dev/null +++ b/src/caffe/test/test_mkldnn_convolution_layer.cpp @@ -0,0 +1,1051 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkldnn_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +// Reference convolution for checking results: +// accumulate through explicit loops over input, output, and filters. +template +void caffe_conv(const Blob* in, ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out) { + const bool has_depth = (out->num_axes() == 5); + if (!has_depth) { CHECK_EQ(4, out->num_axes()); } + // Kernel size, stride, and pad + int kernel_h, kernel_w; + if (conv_param->has_kernel_h() || conv_param->has_kernel_w()) { + kernel_h = conv_param->kernel_h(); + kernel_w = conv_param->kernel_w(); + } else { + kernel_h = kernel_w = conv_param->kernel_size(0); + } + int pad_h, pad_w; + if (conv_param->has_pad_h() || conv_param->has_pad_w()) { + pad_h = conv_param->pad_h(); + pad_w = conv_param->pad_w(); + } else { + pad_h = pad_w = conv_param->pad_size() ? conv_param->pad(0) : 0; + } + int stride_h, stride_w; + if (conv_param->has_stride_h() || conv_param->has_stride_w()) { + stride_h = conv_param->stride_h(); + stride_w = conv_param->stride_w(); + } else { + stride_h = stride_w = conv_param->stride_size() ? conv_param->stride(0) : 1; + } + int dilation_h, dilation_w; + dilation_h = dilation_w = conv_param->dilation_size() ? + conv_param->dilation(0) : 1; + int kernel_d, pad_d, stride_d, dilation_d; + if (has_depth) { + kernel_d = kernel_h; + stride_d = stride_h; + pad_d = pad_h; + dilation_d = dilation_h; + } else { + kernel_d = stride_d = dilation_d = 1; + pad_d = 0; + } + // Groups + int groups = conv_param->group(); + int o_g = out->shape(1) / groups; + int k_g = in->shape(1) / groups; + int o_head, k_head; + // Convolution + vector weight_offset(4 + has_depth); + vector in_offset(4 + has_depth); + vector out_offset(4 + has_depth); + Dtype* out_data = out->mutable_cpu_data(); + for (int n = 0; n < out->shape(0); n++) { + for (int g = 0; g < groups; g++) { + o_head = o_g * g; + k_head = k_g * g; + for (int o = 0; o < o_g; o++) { + for (int k = 0; k < k_g; k++) { + for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int y = 0; y < out->shape(2 + has_depth); y++) { + for (int x = 0; x < out->shape(3 + has_depth); x++) { + for (int r = 0; r < kernel_d; r++) { + for (int p = 0; p < kernel_h; p++) { + for (int q = 0; q < kernel_w; q++) { + int in_z = z * stride_d - pad_d + r * dilation_d; + int in_y = y * stride_h - pad_h + p * dilation_h; + int in_x = x * stride_w - pad_w + q * dilation_w; + if (in_z >= 0 && in_z < (has_depth ? in->shape(2) : 1) + && in_y >= 0 && in_y < in->shape(2 + has_depth) + && in_x >= 0 && in_x < in->shape(3 + has_depth)) { + weight_offset[0] = o + o_head; + weight_offset[1] = k; + if (has_depth) { weight_offset[2] = r; } + weight_offset[2 + has_depth] = p; + weight_offset[3 + has_depth] = q; + in_offset[0] = n; + in_offset[1] = k + k_head; + if (has_depth) { in_offset[2] = in_z; } + in_offset[2 + has_depth] = in_y; + in_offset[3 + has_depth] = in_x; + out_offset[0] = n; + out_offset[1] = o + o_head; + if (has_depth) { out_offset[2] = z; } + out_offset[2 + has_depth] = y; + out_offset[3 + has_depth] = x; + out_data[out->offset(out_offset)] += + in->data_at(in_offset) + * weights[0]->data_at(weight_offset); + } + } + } + } + } + } + } + } + } + } + } + // Bias + if (conv_param->bias_term()) { + const Dtype* bias_data = weights[1]->cpu_data(); + for (int n = 0; n < out->shape(0); n++) { + for (int o = 0; o < out->shape(1); o++) { + for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int y = 0; y < out->shape(2 + has_depth); y++) { + for (int x = 0; x < out->shape(3 + has_depth); x++) { + out_offset[0] = n; + out_offset[1] = o; + if (has_depth) { out_offset[2] = z; } + out_offset[2 + has_depth] = y; + out_offset[3 + has_depth] = x; + out_data[out->offset(out_offset)] += bias_data[o]; + } + } + } + } + } + } + //relu + if (conv_param->relu()){ + for (int n = 0; n < out->shape(0); n++) { + for (int o = 0; o < out->shape(1); o++) { + for (int z = 0; z < (has_depth ? out->shape(2) : 1); z++) { + for (int y = 0; y < out->shape(2 + has_depth); y++) { + for (int x = 0; x < out->shape(3 + has_depth); x++) { + out_offset[0] = n; + out_offset[1] = o; + if (has_depth) { out_offset[2] = z; } + out_offset[2 + has_depth] = y; + out_offset[3 + has_depth] = x; + if(out_data[out->offset(out_offset)] < 0) out_data[out->offset(out_offset)] = 0; + } + } + } + } + } + } +} + +template void caffe_conv(const Blob* in, + ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out); +template void caffe_conv(const Blob* in, + ConvolutionParameter* conv_param, + const vector > >& weights, + Blob* out); + +template +class MKLDNNConvolutionLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + +#define MB 2 +#define IC 8 +#define OC 8 +#define IH 5 +#define IW 5 +#define OH 5 +#define OW 5 +#define KH 3 +#define KW 3 +#define CS 1 +#define GR 2 +#define PD 1 + + protected: + MKLDNNConvolutionLayerTest() + : blob_bottom_(new Blob(MB, IC, IH, IW)), + blob_bottom_2_(new Blob(MB, IC, IH, IW)), + blob_top_(new Blob()), + blob_top_2_(new Blob()) {} + virtual void SetUp() { + // fill the values + FillerParameter filler_param; + filler_param.set_value(1.); + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_bottom_2_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~MKLDNNConvolutionLayerTest() { + delete blob_bottom_; + delete blob_bottom_2_; + delete blob_top_; + delete blob_top_2_; + } + + virtual Blob* MakeReferenceTop(Blob* top) { + this->ref_blob_top_.reset(new Blob()); + this->ref_blob_top_->ReshapeLike(*top); + return this->ref_blob_top_.get(); + } + + Blob* const blob_bottom_; + Blob* const blob_bottom_2_; + Blob* const blob_top_; + Blob* const blob_top_2_; + shared_ptr > ref_blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types +// ,CPUDevice + > TestDtypesCPU; + +TYPED_TEST_CASE(MKLDNNConvolutionLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLDNNConvolutionLayerTest, TestSetupMKLDNN) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(KH); + convolution_param->add_stride(CS); + convolution_param->set_num_output(OC); + convolution_param->add_pad(PD); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), MB); + EXPECT_EQ(this->blob_top_->channels(), OC); + EXPECT_EQ(this->blob_top_->height(), OH); + EXPECT_EQ(this->blob_top_->width(), OW); + EXPECT_EQ(this->blob_top_2_->num(), MB); + EXPECT_EQ(this->blob_top_2_->channels(), OC ); + EXPECT_EQ(this->blob_top_2_->height(), OH); + EXPECT_EQ(this->blob_top_2_->width(), OW); + // setting group should not change the shape + convolution_param->set_num_output(OC); + convolution_param->set_group(GR); + layer.reset(new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), MB); + EXPECT_EQ(this->blob_top_->channels(), OC); + EXPECT_EQ(this->blob_top_->height(), OH); + EXPECT_EQ(this->blob_top_->width(), OW); + EXPECT_EQ(this->blob_top_2_->num(), MB); + EXPECT_EQ(this->blob_top_2_->channels(), OC); + EXPECT_EQ(this->blob_top_2_->height(), OH); + EXPECT_EQ(this->blob_top_2_->width(), OW); +} + +TYPED_TEST(MKLDNNConvolutionLayerTest, TestSetupMKLDNNWithRectangeKernelStridePad) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_kernel_h(4); + convolution_param->set_kernel_w(1); + convolution_param->set_stride_h(3); + convolution_param->set_stride_w(1); + convolution_param->set_num_output(OC); + convolution_param->set_pad_h(2); + convolution_param->set_pad_w(1); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(convolution_param->kernel_h(), 4); + EXPECT_EQ(layer->GetKernelHeight(), 4); + EXPECT_EQ(convolution_param->kernel_w(), 1); + EXPECT_EQ(layer->GetKernelWidth(), 1); + EXPECT_EQ(convolution_param->stride_h(), 3); + EXPECT_EQ(layer->GetStrideHeight(), 3); + EXPECT_EQ(convolution_param->stride_w(), 1); + EXPECT_EQ(layer->GetStrideWidth(), 1); + EXPECT_EQ(convolution_param->pad_h(), 2); + EXPECT_EQ(layer->GetPadHeight(), 2); + EXPECT_EQ(convolution_param->pad_w(), 1); + EXPECT_EQ(layer->GetPadWidth(), 1); + // setting group should not change the shape + convolution_param->set_num_output(OC); + convolution_param->set_group(GR); + layer.reset(new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(convolution_param->kernel_h(), 4); + EXPECT_EQ(layer->GetKernelHeight(), 4); + EXPECT_EQ(convolution_param->kernel_w(), 1); + EXPECT_EQ(layer->GetKernelWidth(), 1); + EXPECT_EQ(convolution_param->stride_h(), 3); + EXPECT_EQ(layer->GetStrideHeight(), 3); + EXPECT_EQ(convolution_param->stride_w(), 1); + EXPECT_EQ(layer->GetStrideWidth(), 1); + EXPECT_EQ(convolution_param->pad_h(), 2); + EXPECT_EQ(layer->GetPadHeight(), 2); + EXPECT_EQ(convolution_param->pad_w(), 1); + EXPECT_EQ(layer->GetPadWidth(), 1); +} + +TYPED_TEST(MKLDNNConvolutionLayerTest, TestSimpleConvolutionMKLDNN) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(KH); + convolution_param->add_stride(CS); + convolution_param->set_num_output(OC); + convolution_param->add_pad(PD); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + +#if 0 // TODO: improve conv so that it runs on all buffers in bottom vector + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +#endif +} + +TYPED_TEST(MKLDNNConvolutionLayerTest, TestSimpleConvolutionReLUMKLDNN) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(OC); + convolution_param->set_relu(true); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +#if 0 +TYPED_TEST(MKLDNNConvolutionLayerTest, TestDilatedConvolutionMKLDNN) { + typedef typename TypeParam::Dtype Dtype; + vector bottom_shape; + bottom_shape.push_back(2); + bottom_shape.push_back(3); + bottom_shape.push_back(8); + bottom_shape.push_back(7); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_dilation(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +#if 0 // TODO: improve conv so that it runs on all buffers in bottom vector + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +#endif +} +#endif + +#if 0 +TYPED_TEST(MKLDNNConvolutionLayerTest, Test0DConvolutionMKLDNN) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + const int kNumOutput = 3; + convolution_param->set_num_output(kNumOutput); + convolution_param->set_axis(3); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + vector top_shape = this->blob_bottom_->shape(); + top_shape[3] = kNumOutput; + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(top_shape, this->blob_top_->shape()); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + vector weight_offset(2); + const Blob* weight = layer->blobs()[0].get(); + const Blob* bias = layer->blobs()[1].get(); + const int num = this->blob_top_->count(3); + const int dim = this->blob_top_->shape(3); + const int bottom_dim = this->blob_bottom_->shape(3); + for (int n = 0; n < num; ++n) { + for (int d = 0; d < dim; ++d) { + weight_offset[0] = d; + Dtype value = bias->cpu_data()[d]; + for (int bottom_d = 0; bottom_d < bottom_dim; ++bottom_d) { + weight_offset[1] = bottom_d; + value += weight->data_at(weight_offset) * + this->blob_bottom_->cpu_data()[n * bottom_dim + bottom_d]; + } + EXPECT_NEAR(value, this->blob_top_->cpu_data()[n * dim + d], 1e-4); + } + } +} +#endif + +#if 0 +TYPED_TEST(MKLDNNConvolutionLayerTest, TestSimple3DConvolution) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + vector bottom_shape(5); + bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); + bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); + bottom_shape[2] = 5; + bottom_shape[3] = this->blob_bottom_vec_[0]->shape(2); + bottom_shape[4] = this->blob_bottom_vec_[0]->shape(3); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + +#if 0 // TODO: improve conv so that it runs on all buffers in bottom vector + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +#endif +} +#endif + +#if 0 +TYPED_TEST(MKLDNNConvolutionLayerTest, TestDilated3DConvolution) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + vector bottom_shape(5); + bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); + bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); + bottom_shape[2] = 6; + bottom_shape[3] = 7; + bottom_shape[4] = 8; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_dilation(2); + convolution_param->set_num_output(4); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } + caffe_conv(this->blob_bottom_2_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_2_)); + top_data = this->blob_top_2_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} +#endif + +TYPED_TEST(MKLDNNConvolutionLayerTest, Test1x1Convolution) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(1); + convolution_param->add_stride(1); + convolution_param->set_num_output(OC); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(MKLDNNConvolutionLayerTest, Test1x1ConvolutionReLU) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(1); + convolution_param->add_stride(1); + convolution_param->set_num_output(OC); + convolution_param->set_relu(true); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(MKLDNNConvolutionLayerTest, TestSimpleConvolutionGroup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(KH); + convolution_param->add_stride(CS); + convolution_param->set_num_output(OC); + convolution_param->set_group(GR); + convolution_param->add_pad(PD); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +TYPED_TEST(MKLDNNConvolutionLayerTest, TestSimpleConvolutionReLUGroup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(OC); + convolution_param->set_relu(true); + convolution_param->set_group(GR); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Check against reference convolution. + const Dtype* top_data; + const Dtype* ref_top_data; + caffe_conv(this->blob_bottom_, convolution_param, layer->blobs(), + this->MakeReferenceTop(this->blob_top_)); + top_data = this->blob_top_->cpu_data(); + ref_top_data = this->ref_blob_top_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], ref_top_data[i], 1e-4); + } +} + +#if 0 +TYPED_TEST(MKLDNNConvolutionLayerTest, TestSobelConvolution) { + // Test separable convolution by computing the Sobel operator + // as a single filter then comparing the result + // as the convolution of two rectangular filters. + typedef typename TypeParam::Dtype Dtype; + // Fill bottoms with identical Gaussian noise. + shared_ptr > filler; + FillerParameter filler_param; + filler_param.set_value(1.); + filler.reset(new GaussianFiller(filler_param)); + filler->Fill(this->blob_bottom_); + this->blob_bottom_2_->CopyFrom(*this->blob_bottom_); + // Compute Sobel G_x operator as 3 x 3 convolution. + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + shared_ptr > layer( + new MKLDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 3)); + Dtype* weights = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 9; // 3 x 3 filter + weights[i + 0] = -1; + weights[i + 1] = 0; + weights[i + 2] = 1; + weights[i + 3] = -2; + weights[i + 4] = 0; + weights[i + 5] = 2; + weights[i + 6] = -1; + weights[i + 7] = 0; + weights[i + 8] = 1; + } + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Compute Sobel G_x operator as separable 3 x 1 and 1 x 3 convolutions. + // (1) the [1 2 1] column filter + vector*> sep_blob_bottom_vec; + vector*> sep_blob_top_vec; + shared_ptr > blob_sep(new Blob()); + sep_blob_bottom_vec.push_back(this->blob_bottom_2_); + sep_blob_top_vec.push_back(this->blob_top_2_); + convolution_param->clear_kernel_size(); + convolution_param->clear_stride(); + convolution_param->set_kernel_h(3); + convolution_param->set_kernel_w(1); + convolution_param->set_stride_h(2); + convolution_param->set_stride_w(1); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new MKLDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 3, 3, 1)); + Dtype* weights_1 = layer->blobs()[0]->mutable_cpu_data(); + for (int c = 0; c < 3; ++c) { + int i = c * 3; // 3 x 1 filter + weights_1[i + 0] = 1; + weights_1[i + 1] = 2; + weights_1[i + 2] = 1; + } + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // (2) the [-1 0 1] row filter + blob_sep->CopyFrom(*this->blob_top_2_, false, true); + sep_blob_bottom_vec.clear(); + sep_blob_bottom_vec.push_back(blob_sep.get()); + convolution_param->set_kernel_h(1); + convolution_param->set_kernel_w(3); + convolution_param->set_stride_h(1); + convolution_param->set_stride_w(2); + convolution_param->set_num_output(1); + convolution_param->set_bias_term(false); + layer.reset(new MKLDNNConvolutionLayer(layer_param)); + layer->blobs().resize(1); + layer->blobs()[0].reset(new Blob(1, 1, 1, 3)); + Dtype* weights_2 = layer->blobs()[0]->mutable_cpu_data(); + weights_2[0] = -1; + weights_2[1] = 0; + weights_2[2] = 1; + layer->SetUp(sep_blob_bottom_vec, sep_blob_top_vec); + layer->Forward(sep_blob_bottom_vec, sep_blob_top_vec); + // Test equivalence of full and separable filters. + const Dtype* top_data = this->blob_top_->cpu_data(); + const Dtype* sep_top_data = this->blob_top_2_->cpu_data(); + for (int i = 0; i < this->blob_top_->count(); ++i) { + EXPECT_NEAR(top_data[i], sep_top_data[i], 1e-4); + } +} +#endif + +#if 0 +TYPED_TEST(MKLDNNConvolutionLayerTest, TestNDAgainst2D) { + typedef typename TypeParam::Dtype Dtype; + const int kernel_h = 11; + const int kernel_w = 13; + vector bottom_shape(4); + bottom_shape[0] = 15; + bottom_shape[1] = 18; + bottom_shape[2] = kernel_h * 2; + bottom_shape[3] = kernel_w * 2; + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->set_num_output(12); + convolution_param->set_bias_term(false); + convolution_param->set_group(6); + convolution_param->set_kernel_h(kernel_h); + convolution_param->set_kernel_w(kernel_w); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + Blob weights; + Blob top_diff; + // Shape and fill weights and top_diff. + bool copy_diff; + bool reshape; + { + MKLDNNConvolutionLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + top_diff.ReshapeLike(*this->blob_top_); + filler.Fill(&top_diff); + ASSERT_EQ(1, layer.blobs().size()); + copy_diff = false; reshape = true; + weights.CopyFrom(*layer.blobs()[0], copy_diff, reshape); + } + vector propagate_down(1, true); + Blob result_2d; + Blob backward_result_2d; + Blob backward_weight_result_2d; + // Test with 2D im2col + { + caffe_set(this->blob_top_->count(), Dtype(0), + this->blob_top_->mutable_cpu_data()); + caffe_set(this->blob_bottom_->count(), Dtype(0), + this->blob_bottom_->mutable_cpu_diff()); + caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); + // Do SetUp and Forward; save Forward result in result_2d. + convolution_param->set_force_nd_im2col(false); + MKLDNNConvolutionLayer layer_2d(layer_param); + layer_2d.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(1, layer_2d.blobs().size()); + copy_diff = false; reshape = false; + layer_2d.blobs()[0]->CopyFrom(weights, copy_diff, reshape); + layer_2d.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + copy_diff = false; reshape = true; + result_2d.CopyFrom(*this->blob_top_, copy_diff, reshape); + // Copy pre-generated top diff into actual top diff; + // do Backward and save result in backward_result_2d. + ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); + caffe_copy(top_diff.count(), top_diff.cpu_data(), + this->blob_top_->mutable_cpu_diff()); + layer_2d.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + copy_diff = true; reshape = true; + backward_result_2d.CopyFrom(*this->blob_bottom_, copy_diff, reshape); + backward_weight_result_2d.CopyFrom(weights, copy_diff, reshape); + } + Blob result_nd; + Blob backward_result_nd; + Blob backward_weight_result_nd; + // Test with ND im2col + { + caffe_set(this->blob_top_->count(), Dtype(0), + this->blob_top_->mutable_cpu_data()); + caffe_set(this->blob_bottom_->count(), Dtype(0), + this->blob_bottom_->mutable_cpu_diff()); + caffe_set(weights.count(), Dtype(0), weights.mutable_cpu_diff()); + // Do SetUp and Forward; save Forward result in result_nd. + convolution_param->set_force_nd_im2col(true); + MKLDNNConvolutionLayer layer_nd(layer_param); + layer_nd.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + ASSERT_EQ(1, layer_nd.blobs().size()); + copy_diff = false; reshape = false; + layer_nd.blobs()[0]->CopyFrom(weights, copy_diff, reshape); + layer_nd.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + copy_diff = false; reshape = true; + result_nd.CopyFrom(*this->blob_top_, copy_diff, reshape); + // Copy pre-generated top diff into actual top diff; + // do Backward and save result in backward_result_nd. + ASSERT_EQ(this->blob_top_->shape(), top_diff.shape()); + caffe_copy(top_diff.count(), top_diff.cpu_data(), + this->blob_top_->mutable_cpu_diff()); + layer_nd.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + copy_diff = true; reshape = true; + backward_result_nd.CopyFrom(*this->blob_bottom_, copy_diff, reshape); + backward_weight_result_nd.CopyFrom(weights, copy_diff, reshape); + } + ASSERT_EQ(result_nd.count(), result_2d.count()); + for (int i = 0; i < result_2d.count(); ++i) { + EXPECT_EQ(result_2d.cpu_data()[i], result_nd.cpu_data()[i]); + } + ASSERT_EQ(backward_result_nd.count(), backward_result_2d.count()); + for (int i = 0; i < backward_result_2d.count(); ++i) { + EXPECT_EQ(backward_result_2d.cpu_diff()[i], + backward_result_nd.cpu_diff()[i]); + } + ASSERT_EQ(backward_weight_result_nd.count(), + backward_weight_result_2d.count()); + for (int i = 0; i < backward_weight_result_2d.count(); ++i) { + EXPECT_EQ(backward_weight_result_2d.cpu_diff()[i], + backward_weight_result_nd.cpu_diff()[i]); + } +} +#endif + +TYPED_TEST(MKLDNNConvolutionLayerTest, DISABLED_TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + +// TODO: improve conv so that it runs on all buffers in bottom vector + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->add_kernel_size(KH); + convolution_param->add_stride(CS); + convolution_param->set_num_output(OC); + convolution_param->add_pad(PD); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +#if 0 +TYPED_TEST(MKLDNNConvolutionLayerTest, TestDilatedGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + vector bottom_shape; + bottom_shape.push_back(2); + bottom_shape.push_back(3); + bottom_shape.push_back(5); + bottom_shape.push_back(6); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + } + convolution_param->add_kernel_size(3); + convolution_param->add_dilation(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif + +#if 0 +TYPED_TEST(MKLDNNConvolutionLayerTest, TestGradient3D) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + vector bottom_shape(5); + bottom_shape[0] = this->blob_bottom_vec_[0]->shape(0); + bottom_shape[1] = this->blob_bottom_vec_[0]->shape(1); + bottom_shape[2] = 5; + bottom_shape[3] = this->blob_bottom_vec_[0]->shape(2); + bottom_shape[4] = this->blob_bottom_vec_[0]->shape(3); + FillerParameter filler_param; + GaussianFiller filler(filler_param); + for (int i = 0; i < this->blob_bottom_vec_.size(); ++i) { + this->blob_bottom_vec_[i]->Reshape(bottom_shape); + filler.Fill(this->blob_bottom_vec_[i]); + } + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif + +TYPED_TEST(MKLDNNConvolutionLayerTest, Test1x1Gradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + this->blob_bottom_vec_.push_back(this->blob_bottom_2_); + this->blob_top_vec_.push_back(this->blob_top_2_); + convolution_param->add_kernel_size(1); + convolution_param->add_stride(1); + convolution_param->set_num_output(2); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNConvolutionLayerTest, TestGradientGroup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_kernel_size(3); + convolution_param->add_stride(2); + convolution_param->set_num_output(2); + convolution_param->set_group(GR); + convolution_param->mutable_weight_filler()->set_type("gaussian"); + convolution_param->mutable_bias_filler()->set_type("gaussian"); + MKLDNNConvolutionLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/test/test_mkldnn_eltwise_layer.cpp b/src/caffe/test/test_mkldnn_eltwise_layer.cpp new file mode 100644 index 00000000000..863a696971a --- /dev/null +++ b/src/caffe/test/test_mkldnn_eltwise_layer.cpp @@ -0,0 +1,260 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" + +#include "caffe/layers/mkldnn_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLDNNEltwiseLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLDNNEltwiseLayerTest() + : blob_bottom_a_(new Blob(2, 3, 4, 5)), + blob_bottom_b_(new Blob(2, 3, 4, 5)), + blob_bottom_c_(new Blob(2, 3, 4, 5)), + blob_top_(new Blob()) { + // fill the values + Caffe::set_random_seed(1701); + FillerParameter filler_param; + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_a_); + filler.Fill(this->blob_bottom_b_); + filler.Fill(this->blob_bottom_c_); + blob_bottom_vec_.push_back(blob_bottom_a_); + blob_bottom_vec_.push_back(blob_bottom_b_); + blob_bottom_vec_.push_back(blob_bottom_c_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLDNNEltwiseLayerTest() { + delete blob_bottom_a_; + delete blob_bottom_b_; + delete blob_bottom_c_; + delete blob_top_; + } + Blob* const blob_bottom_a_; + Blob* const blob_bottom_b_; + Blob* const blob_bottom_c_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types > TestDtypesCPU; +TYPED_TEST_CASE(MKLDNNEltwiseLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLDNNEltwiseLayerTest, TestSetUp) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + shared_ptr > layer( + new MKLDNNEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 5); +} + +/* +TYPED_TEST(MKLDNNEltwiseLayerTest, TestProd) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + shared_ptr > layer( + new MKLDNNEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_NEAR(data[i], in_data_a[i] * in_data_b[i] * in_data_c[i], 1e-4); + } +} +*/ + +TYPED_TEST(MKLDNNEltwiseLayerTest, TestSum) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + shared_ptr > layer( + new MKLDNNEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + //cout << "Debug info: " << "count of top blob: " << count << endl; + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + //cout << "Debug info: " << "data[" << i << "]: " << data[i] << endl; + //cout << "Debug info: " << "in_data_a[" << i << "]: " << in_data_a[i] << endl; + //cout << "Debug info: " << "in_data_b[" << i << "]: " << in_data_b[i] << endl; + //cout << "Debug info: " << "in_data_c[" << i << "]: " << in_data_c[i] << endl; + //cout << "Debug info: " << "sum result: " << in_data_a[i] + in_data_b[i] + in_data_c[i] << endl; + //cout << "Debug info: " << "End of sum execution of data[" << i << "]" << endl; + EXPECT_NEAR(data[i], in_data_a[i] + in_data_b[i] + in_data_c[i], 1e-4); + } +} + +TYPED_TEST(MKLDNNEltwiseLayerTest, TestSumCoeff) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + shared_ptr > layer( + new MKLDNNEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_NEAR(data[i], in_data_a[i] - 0.5*in_data_b[i] + 2*in_data_c[i], + 1e-4); + } +} + +/* +TYPED_TEST(MKLDNNEltwiseLayerTest, TestStableProdGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + eltwise_param->set_stable_prod_grad(true); + MKLDNNEltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNEltwiseLayerTest, TestUnstableProdGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_PROD); + eltwise_param->set_stable_prod_grad(false); + MKLDNNEltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +*/ +TYPED_TEST(MKLDNNEltwiseLayerTest, TestSumGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + MKLDNNEltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +/* +TYPED_TEST(MKLDNNEltwiseLayerTest, TestSumCoeffGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_SUM); + eltwise_param->add_coeff(1); + eltwise_param->add_coeff(-0.5); + eltwise_param->add_coeff(2); + MKLDNNEltwiseLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNEltwiseLayerTest, TestMax) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX); + shared_ptr > layer( + new MKLDNNEltwiseLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + const Dtype* in_data_a = this->blob_bottom_a_->cpu_data(); + const Dtype* in_data_b = this->blob_bottom_b_->cpu_data(); + const Dtype* in_data_c = this->blob_bottom_c_->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_EQ(data[i], + std::max(in_data_a[i], std::max(in_data_b[i], in_data_c[i]))); + } +} + +TYPED_TEST(MKLDNNEltwiseLayerTest, TestMaxGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + EltwiseParameter* eltwise_param = layer_param.mutable_eltwise_param(); + eltwise_param->set_operation(EltwiseParameter_EltwiseOp_MAX); + MKLDNNEltwiseLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-3); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +*/ +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/test/test_mkldnn_inner_product_layer.cpp b/src/caffe/test/test_mkldnn_inner_product_layer.cpp new file mode 100644 index 00000000000..a45d94b32ab --- /dev/null +++ b/src/caffe/test/test_mkldnn_inner_product_layer.cpp @@ -0,0 +1,332 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkldnn_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLDNNInnerProductLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLDNNInnerProductLayerTest() + : blob_bottom_(new Blob(2, 3, 4, 5)), + blob_bottom_nobatch_(new Blob(1, 2, 3, 4)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + UniformFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLDNNInnerProductLayerTest() { + delete blob_bottom_; + delete blob_bottom_nobatch_; + delete blob_top_; + } + Blob* const blob_bottom_; + Blob* const blob_bottom_nobatch_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types +// ,CPUDevice + > TestDtypesCPU; + +TYPED_TEST_CASE(MKLDNNInnerProductLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLDNNInnerProductLayerTest, TestSetupMKLDNN) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); + EXPECT_EQ(this->blob_top_->channels(), 10); +} + +/** @brief TestSetUp while toggling tranpose flag + */ +TYPED_TEST(MKLDNNInnerProductLayerTest, TestSetUpTranposeFalse) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->set_transpose(false); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(2, this->blob_top_->num()); + EXPECT_EQ(1, this->blob_top_->height()); + EXPECT_EQ(1, this->blob_top_->width()); + EXPECT_EQ(10, this->blob_top_->channels()); + EXPECT_EQ(2, layer->blobs()[0]->num_axes()); + EXPECT_EQ(10, layer->blobs()[0]->shape(0)); + EXPECT_EQ(60, layer->blobs()[0]->shape(1)); +} + +/** @brief TestSetUp while toggling tranpose flag + */ +TYPED_TEST(MKLDNNInnerProductLayerTest, TestSetUpTranposeTrue) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->set_transpose(true); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(2, this->blob_top_->num()); + EXPECT_EQ(1, this->blob_top_->height()); + EXPECT_EQ(1, this->blob_top_->width()); + EXPECT_EQ(10, this->blob_top_->channels()); + EXPECT_EQ(2, layer->blobs()[0]->num_axes()); + EXPECT_EQ(60, layer->blobs()[0]->shape(0)); + EXPECT_EQ(10, layer->blobs()[0]->shape(1)); +} + +TYPED_TEST(MKLDNNInnerProductLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); + } +} + +TYPED_TEST(MKLDNNInnerProductLayerTest, TestForwardNoBias) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->set_bias_term(false); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); + } +} + +// TODO: add support for transposed weights in MKLDNNInnerProduct +// layer and then enable following test (check if it was ported properly) +#if 0 +/** + * @brief Init. an IP layer without transpose + random weights, + * run Forward, save the result. + * Init. another IP layer with transpose. + * manually copy and transpose the weights from the first IP layer, + * then run Forward on the same input and check that the result is the same + */ +TYPED_TEST(MKLDNNInnerProductLayerTest, TestForwardTranspose) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + inner_product_param->set_transpose(false); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const int count = this->blob_top_->count(); + Blob* const top = new Blob(); + top->ReshapeLike(*this->blob_top_); + caffe_copy(count, this->blob_top_->cpu_data(), top->mutable_cpu_data()); + this->blob_top_vec_.clear(); + this->blob_top_vec_.push_back(new Blob()); + inner_product_param->set_transpose(true); + shared_ptr > ip_t( + new MKLDNNInnerProductLayer(layer_param)); + ip_t->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + const int count_w = layer->blobs()[0]->count(); + EXPECT_EQ(count_w, ip_t->blobs()[0]->count()); + // manually copy and transpose the weights from 1st IP layer into 2nd + const Dtype* w = layer->blobs()[0]->cpu_data(); + Dtype* w_t = ip_t->blobs()[0]->mutable_cpu_data(); + const int width = layer->blobs()[0]->shape(1); + const int width_t = ip_t->blobs()[0]->shape(1); + for (int i = 0; i < count_w; ++i) { + int r = i / width; + int c = i % width; + w_t[c*width_t+r] = w[r*width+c]; // copy while transposing + } + // copy bias from 1st IP layer to 2nd IP layer + ASSERT_EQ(layer->blobs()[1]->count(), ip_t->blobs()[1]->count()); + caffe_copy(layer->blobs()[1]->count(), layer->blobs()[1]->cpu_data(), + ip_t->blobs()[1]->mutable_cpu_data()); + ip_t->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(count, this->blob_top_->count()) + << "Invalid count for top blob for IP with transpose."; + Blob* const top_t = new Blob();\ + top_t->ReshapeLike(*this->blob_top_vec_[0]); + caffe_copy(count, + this->blob_top_vec_[0]->cpu_data(), + top_t->mutable_cpu_data()); + const Dtype* data = top->cpu_data(); + const Dtype* data_t = top_t->cpu_data(); + for (int i = 0; i < count; ++i) { + EXPECT_FLOAT_EQ(data[i], data_t[i]); + } +} +#endif + +TYPED_TEST(MKLDNNInnerProductLayerTest, TestForwardNoBatch) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_nobatch_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_type("uniform"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + layer->SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer->Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype* data = this->blob_top_->cpu_data(); + const int count = this->blob_top_->count(); + for (int i = 0; i < count; ++i) { + EXPECT_GE(data[i], 1.); + } +} + +TYPED_TEST(MKLDNNInnerProductLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(layer.get(), this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNInnerProductLayerTest, TestGradientNoBias) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(10); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->set_bias_term(false); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(layer.get(), this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNInnerProductLayerTest, TestGradientTranspose) { + typedef typename TypeParam::Dtype Dtype; + this->blob_bottom_vec_.push_back(this->blob_bottom_); + LayerParameter layer_param; + InnerProductParameter* inner_product_param = + layer_param.mutable_inner_product_param(); + inner_product_param->set_num_output(11); + inner_product_param->mutable_weight_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_type("gaussian"); + inner_product_param->mutable_bias_filler()->set_min(1); + inner_product_param->mutable_bias_filler()->set_max(2); + inner_product_param->set_transpose(true); + shared_ptr > layer( + new MKLDNNInnerProductLayer(layer_param)); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(layer.get(), this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/test/test_mkldnn_lrn_layer.cpp b/src/caffe/test/test_mkldnn_lrn_layer.cpp new file mode 100644 index 00000000000..e8dfaa90435 --- /dev/null +++ b/src/caffe/test/test_mkldnn_lrn_layer.cpp @@ -0,0 +1,300 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkldnn_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +using std::min; +using std::max; + +namespace caffe { + +#define MB 2 +#define IC 4 +#define IH 5 +#define IW 5 +#define LS 3 + +template +class MKLDNNLRNLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLDNNLRNLayerTest() + : epsilon_(Dtype(1e-5)), + blob_bottom_(new Blob()), + blob_top_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(MB, IC, IH, IW); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLDNNLRNLayerTest() { delete blob_bottom_; delete blob_top_; } + void ReferenceLRNForward(const Blob& blob_bottom, + const LayerParameter& layer_param, Blob* blob_top); + + Dtype epsilon_; + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +template +void MKLDNNLRNLayerTest::ReferenceLRNForward( + const Blob& blob_bottom, const LayerParameter& layer_param, + Blob* blob_top) { + typedef typename TypeParam::Dtype Dtype; + blob_top->Reshape(blob_bottom.num(), blob_bottom.channels(), + blob_bottom.height(), blob_bottom.width()); + Dtype* top_data = blob_top->mutable_cpu_data(); + LRNParameter lrn_param = layer_param.lrn_param(); + Dtype alpha = lrn_param.alpha(); + Dtype beta = lrn_param.beta(); + int size = lrn_param.local_size(); + switch (lrn_param.norm_region()) { + case LRNParameter_NormRegion_ACROSS_CHANNELS: + for (int n = 0; n < blob_bottom.num(); ++n) { + for (int c = 0; c < blob_bottom.channels(); ++c) { + for (int h = 0; h < blob_bottom.height(); ++h) { + for (int w = 0; w < blob_bottom.width(); ++w) { + int c_start = c - (size - 1) / 2; + int c_end = min(c_start + size, blob_bottom.channels()); + c_start = max(c_start, 0); + Dtype scale = 1.; + for (int i = c_start; i < c_end; ++i) { + Dtype value = blob_bottom.data_at(n, i, h, w); + scale += value * value * alpha / size; + } + *(top_data + blob_top->offset(n, c, h, w)) = + blob_bottom.data_at(n, c, h, w) / pow(scale, beta); + } + } + } + } + break; + case LRNParameter_NormRegion_WITHIN_CHANNEL: + for (int n = 0; n < blob_bottom.num(); ++n) { + for (int c = 0; c < blob_bottom.channels(); ++c) { + for (int h = 0; h < blob_bottom.height(); ++h) { + int h_start = h - (size - 1) / 2; + int h_end = min(h_start + size, blob_bottom.height()); + h_start = max(h_start, 0); + for (int w = 0; w < blob_bottom.width(); ++w) { + Dtype scale = 1.; + int w_start = w - (size - 1) / 2; + int w_end = min(w_start + size, blob_bottom.width()); + w_start = max(w_start, 0); + for (int nh = h_start; nh < h_end; ++nh) { + for (int nw = w_start; nw < w_end; ++nw) { + Dtype value = blob_bottom.data_at(n, c, nh, nw); + scale += value * value * alpha / (size * size); + } + } + *(top_data + blob_top->offset(n, c, h, w)) = + blob_bottom.data_at(n, c, h, w) / pow(scale, beta); + } + } + } + } + break; + default: + LOG(FATAL) << "Unknown normalization region."; + } +} + +typedef ::testing::Types /*,CPUDevice*/ > TestDtypesCPU; +TYPED_TEST_CASE(MKLDNNLRNLayerTest, TestDtypesCPU); + + +TYPED_TEST(MKLDNNLRNLayerTest, TestSetupAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), MB); + EXPECT_EQ(this->blob_top_->channels(), IC); + EXPECT_EQ(this->blob_top_->height(), IH); + EXPECT_EQ(this->blob_top_->width(), IW); +} + +TYPED_TEST(MKLDNNLRNLayerTest, TestForwardAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} + +TYPED_TEST(MKLDNNLRNLayerTest, TestForwardAcrossChannelsLargeRegion) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(LS); + MKLDNNLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} +TYPED_TEST(MKLDNNLRNLayerTest, TestGradientAcrossChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNLRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNLRNLayerTest, TestGradientAcrossChannelsLargeRegion) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_local_size(15); + MKLDNNLRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + vector propagate_down(this->blob_bottom_vec_.size(), true); + layer.Backward(this->blob_top_vec_, propagate_down, + this->blob_bottom_vec_); + // for (int i = 0; i < this->blob_bottom_->count(); ++i) { + // std::cout << "CPU diff " << this->blob_bottom_->cpu_diff()[i] + // << std::endl; + // } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNLRNLayerTest, TestSetupWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + MKLDNNLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), MB); + EXPECT_EQ(this->blob_top_->channels(), IC); + EXPECT_EQ(this->blob_top_->height(), IH); + EXPECT_EQ(this->blob_top_->width(), IW); +} + +TYPED_TEST(MKLDNNLRNLayerTest, TestForwardWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + + // Note: MKLDNN supports only WITHIN channel LRN only in TEST mode + // TODO: following line can removed if MKLDNN within channel backward is supported + layer_param.set_phase(TEST); + + MKLDNNLRNLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Blob top_reference; + this->ReferenceLRNForward(*(this->blob_bottom_), layer_param, + &top_reference); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_top_->cpu_data()[i], top_reference.cpu_data()[i], + this->epsilon_); + } +} +// TODO: Enable this test when MKLDNN supports Backward for WINTHIN channel LRN +#if 0 +TYPED_TEST(MKLDNNLRNLayerTest, TestGradientWithinChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.mutable_lrn_param()->set_norm_region( + LRNParameter_NormRegion_WITHIN_CHANNEL); + layer_param.mutable_lrn_param()->set_local_size(3); + MKLDNNLRNLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = 1.; + } + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} +#endif +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/test/test_mkldnn_neuron_layers.cpp b/src/caffe/test/test_mkldnn_neuron_layers.cpp new file mode 100644 index 00000000000..07033c1ecc7 --- /dev/null +++ b/src/caffe/test/test_mkldnn_neuron_layers.cpp @@ -0,0 +1,119 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include +#include + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" + +#include "caffe/layers/mkldnn_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLDNNNeuronLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLDNNNeuronLayerTest() + : blob_bottom_(new Blob(2, 4, 5, 5)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLDNNNeuronLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types > TestDtypesCPU; +TYPED_TEST_CASE(MKLDNNNeuronLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLDNNNeuronLayerTest, TestReLU) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNReLULayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const Dtype* bottom_data = this->blob_bottom_->cpu_data(); + const Dtype* top_data = this->blob_top_->cpu_data(); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_GE(top_data[i], 0.); + EXPECT_TRUE(top_data[i] == 0 || top_data[i] == bottom_data[i]); + } +} + + +TYPED_TEST(MKLDNNNeuronLayerTest, TestReLUGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(MKLDNNNeuronLayerTest, TestReLUGradientWithNegativeSlope) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + "relu_param { negative_slope: 0.01 }", &layer_param)); + MKLDNNReLULayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3, 1701, 0., 0.01); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/test/test_mkldnn_pooling_layer.cpp b/src/caffe/test/test_mkldnn_pooling_layer.cpp new file mode 100644 index 00000000000..a9d37010a45 --- /dev/null +++ b/src/caffe/test/test_mkldnn_pooling_layer.cpp @@ -0,0 +1,687 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef MKLDNN_SUPPORTED +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkldnn_layers.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class MKLDNNPoolingLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLDNNPoolingLayerTest() + : blob_bottom_(new Blob()), + blob_top_(new Blob()), + blob_top_mask_(new Blob()) {} + virtual void SetUp() { + Caffe::set_random_seed(1701); + blob_bottom_->Reshape(2, 3, 6, 5); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~MKLDNNPoolingLayerTest() { + delete blob_bottom_; + delete blob_top_; + delete blob_top_mask_; + } + Blob* const blob_bottom_; + Blob* const blob_top_; + Blob* const blob_top_mask_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + // Test for 2x 2 square pooling layer + void TestForwardSquare() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 3, 5); + // Input: 2x 2 channels of: + // [1 2 5 2 3] + // [9 4 1 4 8] + // [1 2 5 2 3] + for (int i = 0; i < 15 * num * channels; i += 15) { + blob_bottom_->mutable_cpu_data()[i + 0] = 1; + blob_bottom_->mutable_cpu_data()[i + 1] = 2; + blob_bottom_->mutable_cpu_data()[i + 2] = 5; + blob_bottom_->mutable_cpu_data()[i + 3] = 2; + blob_bottom_->mutable_cpu_data()[i + 4] = 3; + blob_bottom_->mutable_cpu_data()[i + 5] = 9; + blob_bottom_->mutable_cpu_data()[i + 6] = 4; + blob_bottom_->mutable_cpu_data()[i + 7] = 1; + blob_bottom_->mutable_cpu_data()[i + 8] = 4; + blob_bottom_->mutable_cpu_data()[i + 9] = 8; + blob_bottom_->mutable_cpu_data()[i + 10] = 1; + blob_bottom_->mutable_cpu_data()[i + 11] = 2; + blob_bottom_->mutable_cpu_data()[i + 12] = 5; + blob_bottom_->mutable_cpu_data()[i + 13] = 2; + blob_bottom_->mutable_cpu_data()[i + 14] = 3; + } + MKLDNNPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 2); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 2); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [9 5 5 8] + // [9 5 5 8] + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 8); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 9); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 5); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 8); + } + if (blob_top_vec_.size() > 1) { + // Expected mask output: 2x 2 channels of: + // [5 2 2 9] + // [5 12 12 9] + uint32_t *mask = reinterpret_cast(blob_top_mask_->mutable_cpu_data()); + for (int i = 0; i < 8 * num * channels; i += 8) { + EXPECT_EQ(mask[i + 0], 5); + EXPECT_EQ(mask[i + 1], 2); + EXPECT_EQ(mask[i + 2], 2); + EXPECT_EQ(mask[i + 3], 9); + EXPECT_EQ(mask[i + 4], 5); + EXPECT_EQ(mask[i + 5], 12); + EXPECT_EQ(mask[i + 6], 12); + EXPECT_EQ(mask[i + 7], 9); + } + } + } + // Test for 3x 2 rectangular pooling layer with kernel_h > kernel_w + void TestForwardRectHigh() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(3); + pooling_param->set_kernel_w(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 6, 6); + // Input: 2x 2 channels of: + // [35 1 6 26 19 24] + // [ 3 32 7 21 23 25] + // [31 9 2 22 27 20] + // [ 8 28 33 17 10 15] + // [30 5 34 12 14 16] + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) + for (int i = 0; i < 36 * num * channels; i += 36) { + blob_bottom_->mutable_cpu_data()[i + 0] = 35; + blob_bottom_->mutable_cpu_data()[i + 1] = 1; + blob_bottom_->mutable_cpu_data()[i + 2] = 6; + blob_bottom_->mutable_cpu_data()[i + 3] = 26; + blob_bottom_->mutable_cpu_data()[i + 4] = 19; + blob_bottom_->mutable_cpu_data()[i + 5] = 24; + blob_bottom_->mutable_cpu_data()[i + 6] = 3; + blob_bottom_->mutable_cpu_data()[i + 7] = 32; + blob_bottom_->mutable_cpu_data()[i + 8] = 7; + blob_bottom_->mutable_cpu_data()[i + 9] = 21; + blob_bottom_->mutable_cpu_data()[i + 10] = 23; + blob_bottom_->mutable_cpu_data()[i + 11] = 25; + blob_bottom_->mutable_cpu_data()[i + 12] = 31; + blob_bottom_->mutable_cpu_data()[i + 13] = 9; + blob_bottom_->mutable_cpu_data()[i + 14] = 2; + blob_bottom_->mutable_cpu_data()[i + 15] = 22; + blob_bottom_->mutable_cpu_data()[i + 16] = 27; + blob_bottom_->mutable_cpu_data()[i + 17] = 20; + blob_bottom_->mutable_cpu_data()[i + 18] = 8; + blob_bottom_->mutable_cpu_data()[i + 19] = 28; + blob_bottom_->mutable_cpu_data()[i + 20] = 33; + blob_bottom_->mutable_cpu_data()[i + 21] = 17; + blob_bottom_->mutable_cpu_data()[i + 22] = 10; + blob_bottom_->mutable_cpu_data()[i + 23] = 15; + blob_bottom_->mutable_cpu_data()[i + 24] = 30; + blob_bottom_->mutable_cpu_data()[i + 25] = 5; + blob_bottom_->mutable_cpu_data()[i + 26] = 34; + blob_bottom_->mutable_cpu_data()[i + 27] = 12; + blob_bottom_->mutable_cpu_data()[i + 28] = 14; + blob_bottom_->mutable_cpu_data()[i + 29] = 16; + blob_bottom_->mutable_cpu_data()[i + 30] = 4; + blob_bottom_->mutable_cpu_data()[i + 31] = 36; + blob_bottom_->mutable_cpu_data()[i + 32] = 29; + blob_bottom_->mutable_cpu_data()[i + 33] = 13; + blob_bottom_->mutable_cpu_data()[i + 34] = 18; + blob_bottom_->mutable_cpu_data()[i + 35] = 11; + } + MKLDNNPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 4); + EXPECT_EQ(blob_top_->width(), 5); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 4); + EXPECT_EQ(blob_top_mask_->width(), 5); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [35 32 26 27 27] + // [32 33 33 27 27] + // [31 34 34 27 27] + // [36 36 34 18 18] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 8], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 9], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 10], 31); + EXPECT_EQ(blob_top_->cpu_data()[i + 11], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 12], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 13], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 14], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 15], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 16], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 17], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 18], 18); + EXPECT_EQ(blob_top_->cpu_data()[i + 19], 18); + } + if (blob_top_vec_.size() > 1) { + // [ 1 8 4 17 17] + // [ 8 21 21 17 17] + // [13 27 27 17 17] + // [32 32 27 35 35] + uint32_t *mask = reinterpret_cast(blob_top_mask_->mutable_cpu_data()); + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(mask[i + 0], 0); + EXPECT_EQ(mask[i + 1], 7); + EXPECT_EQ(mask[i + 2], 3); + EXPECT_EQ(mask[i + 3], 16); + EXPECT_EQ(mask[i + 4], 16); + EXPECT_EQ(mask[i + 5], 7); + EXPECT_EQ(mask[i + 6], 20); + EXPECT_EQ(mask[i + 7], 20); + EXPECT_EQ(mask[i + 8], 16); + EXPECT_EQ(mask[i + 9], 16); + EXPECT_EQ(mask[i + 10], 12); + EXPECT_EQ(mask[i + 11], 26); + EXPECT_EQ(mask[i + 12], 26); + EXPECT_EQ(mask[i + 13], 16); + EXPECT_EQ(mask[i + 14], 16); + EXPECT_EQ(mask[i + 15], 31); + EXPECT_EQ(mask[i + 16], 31); + EXPECT_EQ(mask[i + 17], 26); + EXPECT_EQ(mask[i + 18], 34); + EXPECT_EQ(mask[i + 19], 34); + } + } + } + // Test for rectangular pooling layer with kernel_w > kernel_h + void TestForwardRectWide() { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(2); + pooling_param->set_kernel_w(3); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + const int num = 2; + const int channels = 2; + blob_bottom_->Reshape(num, channels, 6, 6); + // Input: 2x 2 channels of: + // [35 1 6 26 19 24] + // [ 3 32 7 21 23 25] + // [31 9 2 22 27 20] + // [ 8 28 33 17 10 15] + // [30 5 34 12 14 16] + // [ 4 36 29 13 18 11] + // (this is generated by magic(6) in MATLAB) + for (int i = 0; i < 36 * num * channels; i += 36) { + blob_bottom_->mutable_cpu_data()[i + 0] = 35; + blob_bottom_->mutable_cpu_data()[i + 1] = 1; + blob_bottom_->mutable_cpu_data()[i + 2] = 6; + blob_bottom_->mutable_cpu_data()[i + 3] = 26; + blob_bottom_->mutable_cpu_data()[i + 4] = 19; + blob_bottom_->mutable_cpu_data()[i + 5] = 24; + blob_bottom_->mutable_cpu_data()[i + 6] = 3; + blob_bottom_->mutable_cpu_data()[i + 7] = 32; + blob_bottom_->mutable_cpu_data()[i + 8] = 7; + blob_bottom_->mutable_cpu_data()[i + 9] = 21; + blob_bottom_->mutable_cpu_data()[i + 10] = 23; + blob_bottom_->mutable_cpu_data()[i + 11] = 25; + blob_bottom_->mutable_cpu_data()[i + 12] = 31; + blob_bottom_->mutable_cpu_data()[i + 13] = 9; + blob_bottom_->mutable_cpu_data()[i + 14] = 2; + blob_bottom_->mutable_cpu_data()[i + 15] = 22; + blob_bottom_->mutable_cpu_data()[i + 16] = 27; + blob_bottom_->mutable_cpu_data()[i + 17] = 20; + blob_bottom_->mutable_cpu_data()[i + 18] = 8; + blob_bottom_->mutable_cpu_data()[i + 19] = 28; + blob_bottom_->mutable_cpu_data()[i + 20] = 33; + blob_bottom_->mutable_cpu_data()[i + 21] = 17; + blob_bottom_->mutable_cpu_data()[i + 22] = 10; + blob_bottom_->mutable_cpu_data()[i + 23] = 15; + blob_bottom_->mutable_cpu_data()[i + 24] = 30; + blob_bottom_->mutable_cpu_data()[i + 25] = 5; + blob_bottom_->mutable_cpu_data()[i + 26] = 34; + blob_bottom_->mutable_cpu_data()[i + 27] = 12; + blob_bottom_->mutable_cpu_data()[i + 28] = 14; + blob_bottom_->mutable_cpu_data()[i + 29] = 16; + blob_bottom_->mutable_cpu_data()[i + 30] = 4; + blob_bottom_->mutable_cpu_data()[i + 31] = 36; + blob_bottom_->mutable_cpu_data()[i + 32] = 29; + blob_bottom_->mutable_cpu_data()[i + 33] = 13; + blob_bottom_->mutable_cpu_data()[i + 34] = 18; + blob_bottom_->mutable_cpu_data()[i + 35] = 11; + } + MKLDNNPoolingLayer layer(layer_param); + layer.SetUp(blob_bottom_vec_, blob_top_vec_); + EXPECT_EQ(blob_top_->num(), num); + EXPECT_EQ(blob_top_->channels(), channels); + EXPECT_EQ(blob_top_->height(), 5); + EXPECT_EQ(blob_top_->width(), 4); + if (blob_top_vec_.size() > 1) { + EXPECT_EQ(blob_top_mask_->num(), num); + EXPECT_EQ(blob_top_mask_->channels(), channels); + EXPECT_EQ(blob_top_mask_->height(), 5); + EXPECT_EQ(blob_top_mask_->width(), 4); + } + layer.Forward(blob_bottom_vec_, blob_top_vec_); + // Expected output: 2x 2 channels of: + // [35 32 26 26] + // [32 32 27 27] + // [33 33 33 27] + // [34 34 34 17] + // [36 36 34 18] + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(blob_top_->cpu_data()[i + 0], 35); + EXPECT_EQ(blob_top_->cpu_data()[i + 1], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 2], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 3], 26); + EXPECT_EQ(blob_top_->cpu_data()[i + 4], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 5], 32); + EXPECT_EQ(blob_top_->cpu_data()[i + 6], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 7], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 8], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 9], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 10], 33); + EXPECT_EQ(blob_top_->cpu_data()[i + 11], 27); + EXPECT_EQ(blob_top_->cpu_data()[i + 12], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 13], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 14], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 15], 17); + EXPECT_EQ(blob_top_->cpu_data()[i + 16], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 17], 36); + EXPECT_EQ(blob_top_->cpu_data()[i + 18], 34); + EXPECT_EQ(blob_top_->cpu_data()[i + 19], 18); + } + if (blob_top_vec_.size() > 1) { + // [ 1 8 4 4] + // [ 8 8 17 17] + // [21 21 21 17] + // [27 27 27 22] + // [32 32 27 35] + uint32_t *mask = reinterpret_cast(blob_top_mask_->mutable_cpu_data()); + for (int i = 0; i < 20 * num * channels; i += 20) { + EXPECT_EQ(mask[i + 0], 0); + EXPECT_EQ(mask[i + 1], 7); + EXPECT_EQ(mask[i + 2], 3); + EXPECT_EQ(mask[i + 3], 3); + EXPECT_EQ(mask[i + 4], 7); + EXPECT_EQ(mask[i + 5], 7); + EXPECT_EQ(mask[i + 6], 16); + EXPECT_EQ(mask[i + 7], 16); + EXPECT_EQ(mask[i + 8], 20); + EXPECT_EQ(mask[i + 9], 20); + EXPECT_EQ(mask[i + 10], 20); + EXPECT_EQ(mask[i + 11], 16); + EXPECT_EQ(mask[i + 12], 26); + EXPECT_EQ(mask[i + 13], 26); + EXPECT_EQ(mask[i + 14], 26); + EXPECT_EQ(mask[i + 15], 21); + EXPECT_EQ(mask[i + 16], 31); + EXPECT_EQ(mask[i + 17], 31); + EXPECT_EQ(mask[i + 18], 26); + EXPECT_EQ(mask[i + 19], 34); + } + } + } +}; + +typedef ::testing::Types > TestDtypesCPU; +TYPED_TEST_CASE(MKLDNNPoolingLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLDNNPoolingLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + MKLDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 2); +} + +TYPED_TEST(MKLDNNPoolingLayerTest, TestSetupPadded) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + MKLDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(MKLDNNPoolingLayerTest, TestSetupGlobalPooling) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_global_pooling(true); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + MKLDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), this->blob_bottom_->num()); + EXPECT_EQ(this->blob_top_->channels(), this->blob_bottom_->channels()); + EXPECT_EQ(this->blob_top_->height(), 1); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +/* +TYPED_TEST(MKLDNNPoolingLayerTest, PrintBackward) { + LayerParameter layer_param; + layer_param.set_kernelsize(3); + layer_param.set_stride(2); + layer_param.set_pool(LayerParameter_PoolMethod_MAX); + PoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom data " << i << " " << this->blob_bottom_->cpu_data()[i] << endl; + } + for (int i = 0; i < this->blob_top_->count(); ++i) { + cout << "top data " << i << " " << this->blob_top_->cpu_data()[i] << endl; + } + + for (int i = 0; i < this->blob_top_->count(); ++i) { + this->blob_top_->mutable_cpu_diff()[i] = i; + } + layer.Backward(this->blob_top_vec_, true, this->blob_bottom_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + cout << "bottom diff " << i << " " << this->blob_bottom_->cpu_diff()[i] << endl; + } +} +*/ + +TYPED_TEST(MKLDNNPoolingLayerTest, TestForwardMax) { + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); +} + +#if 0 +TYPED_TEST(MKLDNNPoolingLayerTest, TestForwardMaxTopMask) { + typedef typename TypeParam::Dtype Dtype; + this->blob_top_vec_.push_back(reinterpret_cast* > + (this->blob_top_mask_)); + this->TestForwardSquare(); + this->TestForwardRectHigh(); + this->TestForwardRectWide(); +} +#endif + +TYPED_TEST(MKLDNNPoolingLayerTest, TestGradientMax) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + MKLDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +TYPED_TEST(MKLDNNPoolingLayerTest, TestForwardMaxPadded) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_bottom_->Reshape(1, 1, 3, 3); + // Input: + // [ 1 2 4 ] + // [ 2 3 2 ] + // [ 4 2 1 ] + this->blob_bottom_->mutable_cpu_data()[0] = 1; + this->blob_bottom_->mutable_cpu_data()[1] = 2; + this->blob_bottom_->mutable_cpu_data()[2] = 4; + this->blob_bottom_->mutable_cpu_data()[3] = 2; + this->blob_bottom_->mutable_cpu_data()[4] = 3; + this->blob_bottom_->mutable_cpu_data()[5] = 2; + this->blob_bottom_->mutable_cpu_data()[6] = 4; + this->blob_bottom_->mutable_cpu_data()[7] = 2; + this->blob_bottom_->mutable_cpu_data()[8] = 1; + MKLDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Dtype epsilon = 1e-8; + // Output: + // [ 1 4 4 ] + // [ 4 4 4 ] + // [ 4 4 1 ] + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 1, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 4, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 1, epsilon); +} + +#if 0 +TYPED_TEST(MKLDNNPoolingLayerTest, TestGradientMaxTopMask) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_MAX); + this->blob_top_vec_.push_back(reinterpret_cast* > + (this->blob_top_mask_)); + MKLDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-4, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + this->blob_top_vec_.pop_back(); + } + } +} +#endif + +// Average Pooling +TYPED_TEST(MKLDNNPoolingLayerTest, TestForwardAve) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_size(3); + pooling_param->set_stride(1); + pooling_param->set_pad(1); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + this->blob_bottom_->Reshape(1, 1, 3, 3); + FillerParameter filler_param; + filler_param.set_value(Dtype(2)); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + MKLDNNPoolingLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 1); + EXPECT_EQ(this->blob_top_->height(), 3); + EXPECT_EQ(this->blob_top_->width(), 3); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + Dtype epsilon = 1e-5; +#if 0 + //For debugging usage + cout << "bottom blob [0]: " << this->blob_bottom_->cpu_data()[0] << endl; + cout << "bottom blob [1]: " << this->blob_bottom_->cpu_data()[1] << endl; + cout << "bottom blob [2]: " << this->blob_bottom_->cpu_data()[2] << endl; + cout << "bottom blob [3]: " << this->blob_bottom_->cpu_data()[3] << endl; + cout << "bottom blob [4]: " << this->blob_bottom_->cpu_data()[4] << endl; + cout << "bottom blob [5]: " << this->blob_bottom_->cpu_data()[5] << endl; + cout << "bottom blob [6]: " << this->blob_bottom_->cpu_data()[6] << endl; + cout << "bottom blob [7]: " << this->blob_bottom_->cpu_data()[7] << endl; + cout << "bottom blob [8]: " << this->blob_bottom_->cpu_data()[8] << endl; + cout << "top blob [0]: " << this->blob_top_->cpu_data()[0] << endl; + cout << "top blob [1]: " << this->blob_top_->cpu_data()[1] << endl; + cout << "top blob [2]: " << this->blob_top_->cpu_data()[2] << endl; + cout << "top blob [3]: " << this->blob_top_->cpu_data()[3] << endl; + cout << "top blob [4]: " << this->blob_top_->cpu_data()[4] << endl; + cout << "top blob [5]: " << this->blob_top_->cpu_data()[5] << endl; + cout << "top blob [6]: " << this->blob_top_->cpu_data()[6] << endl; + cout << "top blob [7]: " << this->blob_top_->cpu_data()[7] << endl; + cout << "top blob [8]: " << this->blob_top_->cpu_data()[8] << endl; +#endif + EXPECT_NEAR(this->blob_top_->cpu_data()[0], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[1], 12.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[2], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[3], 12.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[4], 2.0 , epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[5], 12.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[6], 8.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[7], 12.0 / 9, epsilon); + EXPECT_NEAR(this->blob_top_->cpu_data()[8], 8.0 / 9, epsilon); +} + +#if 0 +// This unit test is commented because when user do not define padding +// bottom[0]->height/width() + kernel_h/w_ cannot be exact division by stride_h/w_ +// use the exclude padding to align with the result of Caffe +// but when bottom[0]->height/width() + kernel_h/w_ can be exact division by stride_h/w_ +// use the include padding +TYPED_TEST(MKLDNNPoolingLayerTest, DISABLED_TestGradientAve) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + MKLDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} +#endif + +TYPED_TEST(MKLDNNPoolingLayerTest, TestGradientAvePadded) { + typedef typename TypeParam::Dtype Dtype; + for (int kernel_h = 3; kernel_h <= 4; kernel_h++) { + for (int kernel_w = 3; kernel_w <= 4; kernel_w++) { + LayerParameter layer_param; + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_kernel_h(kernel_h); + pooling_param->set_kernel_w(kernel_w); + pooling_param->set_stride(2); + pooling_param->set_pad(2); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + MKLDNNPoolingLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); + } + } +} + +} // namespace caffe +#endif // #ifdef MKLDNN_SUPPORTED diff --git a/src/caffe/test/test_mkldnn_split_layer.cpp b/src/caffe/test/test_mkldnn_split_layer.cpp new file mode 100644 index 00000000000..04dc0b40c01 --- /dev/null +++ b/src/caffe/test/test_mkldnn_split_layer.cpp @@ -0,0 +1,120 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if defined(MKLDNN_SUPPORTED) +#include + +#include "gtest/gtest.h" +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/mkldnn_layers.hpp" +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { +template +class MKLDNNSplitLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MKLDNNSplitLayerTest() + : blob_bottom_(new Blob(2, 3, 6, 5)), + blob_top_a_(new Blob()), + blob_top_b_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_a_); + blob_top_vec_.push_back(blob_top_b_); + } + virtual ~MKLDNNSplitLayerTest() { + delete blob_bottom_; + delete blob_top_a_; + delete blob_top_b_; + } + Blob* const blob_bottom_; + Blob* const blob_top_a_; + Blob* const blob_top_b_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +typedef ::testing::Types > TestDtypesCPU; +TYPED_TEST_CASE(MKLDNNSplitLayerTest, TestDtypesCPU); + +TYPED_TEST(MKLDNNSplitLayerTest, TestSetup) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNSplitLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_a_->num(), 2); + EXPECT_EQ(this->blob_top_a_->channels(), 3); + EXPECT_EQ(this->blob_top_a_->height(), 6); + EXPECT_EQ(this->blob_top_a_->width(), 5); + EXPECT_EQ(this->blob_top_b_->num(), 2); + EXPECT_EQ(this->blob_top_b_->channels(), 3); + EXPECT_EQ(this->blob_top_b_->height(), 6); + EXPECT_EQ(this->blob_top_b_->width(), 5); +} + +TYPED_TEST(MKLDNNSplitLayerTest, Test) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNSplitLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + Dtype bottom_value = this->blob_bottom_->cpu_data()[i]; + EXPECT_EQ(bottom_value, this->blob_top_a_->cpu_data()[i]); + EXPECT_EQ(bottom_value, this->blob_top_b_->cpu_data()[i]); + } +} + +TYPED_TEST(MKLDNNSplitLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MKLDNNSplitLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientEltwise(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} +#endif // #if defined(MKLDNN_SUPPORTED) diff --git a/src/caffe/test/test_multibox_loss_layer.cpp b/src/caffe/test/test_multibox_loss_layer.cpp new file mode 100644 index 00000000000..96dd65f25fe --- /dev/null +++ b/src/caffe/test/test_multibox_loss_layer.cpp @@ -0,0 +1,494 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include + +#include "boost/scoped_ptr.hpp" +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/annotated_data_layer.hpp" +#include "caffe/layers/conv_layer.hpp" +#include "caffe/layers/flatten_layer.hpp" +#include "caffe/layers/multibox_loss_layer.hpp" +#include "caffe/layers/permute_layer.hpp" +#include "caffe/layers/pooling_layer.hpp" +#include "caffe/layers/prior_box_layer.hpp" +#include "caffe/proto/caffe.pb.h" +#include "caffe/util/db.hpp" +#include "caffe/util/io.hpp" + +#ifdef USE_CUDNN +#include "caffe/layers/cudnn_conv_layer.hpp" +#endif + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +using boost::scoped_ptr; + +namespace caffe { + +static bool kBoolChoices[] = {true, false}; +static MultiBoxLossParameter_LocLossType kLocLossTypes[] = { + MultiBoxLossParameter_LocLossType_L2, + MultiBoxLossParameter_LocLossType_SMOOTH_L1}; +static MultiBoxLossParameter_ConfLossType kConfLossTypes[] = { + MultiBoxLossParameter_ConfLossType_SOFTMAX, + MultiBoxLossParameter_ConfLossType_LOGISTIC}; +static MultiBoxLossParameter_MatchType kMatchTypes[] = { + MultiBoxLossParameter_MatchType_BIPARTITE, + MultiBoxLossParameter_MatchType_PER_PREDICTION}; +static LossParameter_NormalizationMode kNormalizationModes[] = { + LossParameter_NormalizationMode_BATCH_SIZE, + LossParameter_NormalizationMode_FULL, + LossParameter_NormalizationMode_VALID, + LossParameter_NormalizationMode_NONE}; +static MultiBoxLossParameter_MiningType kMiningType[] = { + MultiBoxLossParameter_MiningType_NONE, + MultiBoxLossParameter_MiningType_MAX_NEGATIVE, + MultiBoxLossParameter_MiningType_HARD_EXAMPLE}; + +template +class MultiBoxLossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + MultiBoxLossLayerTest() + : num_(3), + num_classes_(3), + width_(2), + height_(2), + num_priors_per_location_(4), + num_priors_(width_ * height_ * num_priors_per_location_), + blob_bottom_loc_(new Blob(num_, num_priors_ * 4, 1, 1)), + blob_bottom_conf_(new Blob( + num_, num_priors_ * num_classes_, 1, 1)), + blob_bottom_prior_(new Blob(num_, 2, num_priors_ * 4, 1)), + blob_bottom_gt_(new Blob(1, 1, 4, 7)), + blob_top_loss_(new Blob()) { + blob_bottom_vec_.push_back(blob_bottom_loc_); + blob_bottom_vec_.push_back(blob_bottom_conf_); + blob_bottom_vec_.push_back(blob_bottom_prior_); + blob_bottom_vec_.push_back(blob_bottom_gt_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~MultiBoxLossLayerTest() { + delete blob_bottom_prior_; + delete blob_bottom_loc_; + delete blob_bottom_conf_; + delete blob_bottom_gt_; + delete blob_top_loss_; + } + + void FillItem(Dtype* blob_data, const string values) { + // Split values to vector of items. + vector items; + std::istringstream iss(values); + std::copy(std::istream_iterator(iss), + std::istream_iterator(), back_inserter(items)); + int num_items = items.size(); + CHECK_EQ(num_items, 8); + + for (int i = 0; i < 8; ++i) { + if (i >= 3 && i <= 6) { + blob_data[i] = atof(items[i].c_str()); + } else { + blob_data[i] = atoi(items[i].c_str()); + } + } + } + + // Fill the bottom blobs. + void Fill(bool share_location) { + int loc_classes = share_location ? 1 : num_classes_; + // Create fake network which simulates a simple multi box network. + vector*> fake_bottom_vec; + vector*> fake_top_vec; + LayerParameter layer_param; + // Fake input (image) of size 20 x 20 + Blob* fake_input = new Blob(num_, 3, 20, 20); + + // 1) Fill ground truth. +#ifdef USE_LMDB + string filename; + GetTempDirname(&filename); + DataParameter_DB backend = DataParameter_DB_LMDB; + scoped_ptr db(db::GetDB(backend)); + db->Open(filename, db::NEW); + scoped_ptr txn(db->NewTransaction()); + for (int i = 0; i < num_; ++i) { + AnnotatedDatum anno_datum; + // Fill data. + Datum* datum = anno_datum.mutable_datum(); + datum->set_channels(3); + datum->set_height(20); + datum->set_width(20); + std::string* data = datum->mutable_data(); + for (int j = 0; j < 3*20*20; ++j) { + data->push_back(static_cast(j/100.)); + } + anno_datum.set_type(AnnotatedDatum_AnnotationType_BBOX); + if (i == 0 || i == 2) { + AnnotationGroup* anno_group = anno_datum.add_annotation_group(); + anno_group->set_group_label(1); + Annotation* anno = anno_group->add_annotation(); + anno->set_instance_id(0); + NormalizedBBox* bbox = anno->mutable_bbox(); + bbox->set_xmin(0.1); + bbox->set_ymin(0.1); + bbox->set_xmax(0.3); + bbox->set_ymax(0.3); + bbox->set_difficult(i % 2); + } + if (i == 2) { + AnnotationGroup* anno_group = anno_datum.add_annotation_group(); + anno_group->set_group_label(2); + Annotation* anno = anno_group->add_annotation(); + anno->set_instance_id(0); + NormalizedBBox* bbox = anno->mutable_bbox(); + bbox->set_xmin(0.2); + bbox->set_ymin(0.2); + bbox->set_xmax(0.4); + bbox->set_ymax(0.4); + bbox->set_difficult(i % 2); + anno = anno_group->add_annotation(); + anno->set_instance_id(1); + bbox = anno->mutable_bbox(); + bbox->set_xmin(0.6); + bbox->set_ymin(0.6); + bbox->set_xmax(0.8); + bbox->set_ymax(0.9); + bbox->set_difficult((i + 1) % 2); + } + string key_str = caffe::format_int(i, 3); + string out; + CHECK(anno_datum.SerializeToString(&out)); + txn->Put(key_str, out); + } + txn->Commit(); + db->Close(); + DataParameter* data_param = layer_param.mutable_data_param(); + data_param->set_batch_size(num_); + data_param->set_source(filename.c_str()); + data_param->set_backend(backend); + AnnotatedDataLayer anno_data_layer(layer_param); + fake_top_vec.clear(); + fake_top_vec.push_back(fake_input); + fake_top_vec.push_back(blob_bottom_gt_); + anno_data_layer.SetUp(fake_bottom_vec, fake_top_vec); + anno_data_layer.Forward(fake_bottom_vec, fake_top_vec); +#else + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(fake_input); + vector gt_shape(4, 1); + gt_shape[2] = 4; + gt_shape[3] = 8; + blob_bottom_gt_->Reshape(gt_shape); + Dtype* gt_data = blob_bottom_gt_->mutable_cpu_data(); + FillItem(gt_data, "0 1 0 0.1 0.1 0.3 0.3 0"); + FillItem(gt_data + 8, "2 1 0 0.1 0.1 0.3 0.3 0"); + FillItem(gt_data + 8 * 2, "2 2 0 0.2 0.2 0.4 0.4 0"); + FillItem(gt_data + 8 * 3, "2 2 1 0.6 0.6 0.8 0.9 1"); +#endif // USE_LMDB + + // Fake layer + PoolingParameter* pooling_param = layer_param.mutable_pooling_param(); + pooling_param->set_pool(PoolingParameter_PoolMethod_AVE); + pooling_param->set_kernel_size(10); + pooling_param->set_stride(10); + + PoolingLayer pooling_layer(layer_param); + Blob* fake_blob = new Blob(num_, 5, height_, width_); + fake_bottom_vec.clear(); + fake_bottom_vec.push_back(fake_input); + fake_top_vec.clear(); + fake_top_vec.push_back(fake_blob); + pooling_layer.SetUp(fake_bottom_vec, fake_top_vec); + pooling_layer.Forward(fake_bottom_vec, fake_top_vec); + + // 2) Fill bbox location predictions. + ConvolutionParameter* convolution_param = + layer_param.mutable_convolution_param(); + convolution_param->add_pad(0); + convolution_param->add_kernel_size(1); + convolution_param->add_stride(1); + int num_output = num_priors_per_location_ * loc_classes * 4; + convolution_param->set_num_output(num_output); + convolution_param->mutable_weight_filler()->set_type("xavier"); + convolution_param->mutable_bias_filler()->set_type("constant"); + convolution_param->mutable_bias_filler()->set_value(0.1); + ConvolutionLayer conv_layer_loc(layer_param); + fake_bottom_vec.clear(); + fake_bottom_vec.push_back(fake_blob); + Blob fake_output_loc; + fake_top_vec.clear(); + fake_top_vec.push_back(&fake_output_loc); + conv_layer_loc.SetUp(fake_bottom_vec, fake_top_vec); + conv_layer_loc.Forward(fake_bottom_vec, fake_top_vec); + + // Use Permute and Flatten layer to prepare for MultiBoxLoss layer. + PermuteParameter* permute_param = layer_param.mutable_permute_param(); + permute_param->add_order(0); + permute_param->add_order(2); + permute_param->add_order(3); + permute_param->add_order(1); + PermuteLayer permute_layer(layer_param); + fake_bottom_vec.clear(); + fake_bottom_vec.push_back(&fake_output_loc); + fake_top_vec.clear(); + Blob fake_permute_loc; + fake_top_vec.push_back(&fake_permute_loc); + permute_layer.SetUp(fake_bottom_vec, fake_top_vec); + permute_layer.Forward(fake_bottom_vec, fake_top_vec); + + FlattenParameter* flatten_param = layer_param.mutable_flatten_param(); + flatten_param->set_axis(1); + FlattenLayer flatten_layer(layer_param); + vector loc_shape(4, 1); + loc_shape[0] = num_; + loc_shape[1] = num_output * height_ * width_; + blob_bottom_loc_->Reshape(loc_shape); + fake_bottom_vec.clear(); + fake_bottom_vec.push_back(&fake_permute_loc); + fake_top_vec.clear(); + fake_top_vec.push_back(blob_bottom_loc_); + flatten_layer.SetUp(fake_bottom_vec, fake_top_vec); + flatten_layer.Forward(fake_bottom_vec, fake_top_vec); + + // 3) Fill bbox confidence predictions. + convolution_param->set_num_output(num_priors_per_location_ * num_classes_); + ConvolutionLayer conv_layer_conf(layer_param); + fake_bottom_vec.clear(); + fake_bottom_vec.push_back(fake_blob); + num_output = num_priors_per_location_ * num_classes_; + Blob fake_output_conf; + fake_top_vec.clear(); + fake_top_vec.push_back(&fake_output_conf); + conv_layer_conf.SetUp(fake_bottom_vec, fake_top_vec); + conv_layer_conf.Forward(fake_bottom_vec, fake_top_vec); + + fake_bottom_vec.clear(); + fake_bottom_vec.push_back(&fake_output_conf); + fake_top_vec.clear(); + Blob fake_permute_conf; + fake_top_vec.push_back(&fake_permute_conf); + permute_layer.SetUp(fake_bottom_vec, fake_top_vec); + permute_layer.Forward(fake_bottom_vec, fake_top_vec); + + vector conf_shape(4, 1); + conf_shape[0] = num_; + conf_shape[1] = num_output * height_ * width_; + blob_bottom_conf_->Reshape(conf_shape); + fake_bottom_vec.clear(); + fake_bottom_vec.push_back(&fake_permute_conf); + fake_top_vec.clear(); + fake_top_vec.push_back(blob_bottom_conf_); + flatten_layer.SetUp(fake_bottom_vec, fake_top_vec); + flatten_layer.Forward(fake_bottom_vec, fake_top_vec); + + // 4) Fill prior bboxes. + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(5); + prior_box_param->add_max_size(10); + prior_box_param->add_aspect_ratio(3.); + prior_box_param->set_flip(true); + + PriorBoxLayer prior_layer(layer_param); + fake_bottom_vec.clear(); + fake_bottom_vec.push_back(fake_blob); + fake_bottom_vec.push_back(fake_input); + fake_top_vec.clear(); + fake_top_vec.push_back(blob_bottom_prior_); + prior_layer.SetUp(fake_bottom_vec, fake_top_vec); + prior_layer.Forward(fake_bottom_vec, fake_top_vec); + + delete fake_blob; + delete fake_input; + } + int num_; + int num_classes_; + int width_; + int height_; + int num_priors_per_location_; + int num_priors_; + Blob* const blob_bottom_loc_; + Blob* const blob_bottom_conf_; + Blob* const blob_bottom_prior_; + Blob* const blob_bottom_gt_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(MultiBoxLossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(MultiBoxLossLayerTest, TestSetUp) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + MultiBoxLossParameter* multibox_loss_param = + layer_param.mutable_multibox_loss_param(); + multibox_loss_param->set_num_classes(3); + for (int i = 0; i < 2; ++i) { + bool share_location = kBoolChoices[i]; + this->Fill(share_location); + for (int j = 0; j < 2; ++j) { + MultiBoxLossParameter_MatchType match_type = kMatchTypes[j]; + for (int k = 0; k < 2; ++k) { + bool use_prior = kBoolChoices[k]; + for (int m = 0; m < 3; ++m) { + MiningType mining_type = kMiningType[m]; + if (!share_location && + mining_type != MultiBoxLossParameter_MiningType_NONE) { + continue; + } + multibox_loss_param->set_share_location(share_location); + multibox_loss_param->set_match_type(match_type); + multibox_loss_param->set_use_prior_for_matching(use_prior); + multibox_loss_param->set_mining_type(mining_type); + MultiBoxLossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + } + } + } + } +} + +TYPED_TEST(MultiBoxLossLayerTest, TestLocGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + layer_param.add_propagate_down(true); + layer_param.add_propagate_down(false); + LossParameter* loss_param = layer_param.mutable_loss_param(); + MultiBoxLossParameter* multibox_loss_param = + layer_param.mutable_multibox_loss_param(); + multibox_loss_param->set_num_classes(this->num_classes_); + for (int l = 0; l < 2; ++l) { + MultiBoxLossParameter_LocLossType loc_loss_type = kLocLossTypes[l]; + for (int i = 0; i < 2; ++i) { + bool share_location = kBoolChoices[i]; + this->Fill(share_location); + for (int j = 0; j < 2; ++j) { + MultiBoxLossParameter_MatchType match_type = kMatchTypes[j]; + for (int k = 0; k < 1; ++k) { + bool use_prior = kBoolChoices[k]; + for (int n = 0; n < 4; ++n) { + LossParameter_NormalizationMode normalize = kNormalizationModes[n]; + loss_param->set_normalization(normalize); + for (int u = 0; u < 2; ++u) { + bool use_difficult_gt = kBoolChoices[u]; + for (int m = 0; m < 3; ++m) { + MiningType mining_type = kMiningType[m]; + if (!share_location && + mining_type != MultiBoxLossParameter_MiningType_NONE) { + continue; + } + multibox_loss_param->set_loc_loss_type(loc_loss_type); + multibox_loss_param->set_share_location(share_location); + multibox_loss_param->set_match_type(match_type); + multibox_loss_param->set_use_prior_for_matching(use_prior); + multibox_loss_param->set_use_difficult_gt(use_difficult_gt); + multibox_loss_param->set_mining_type(mining_type); + MultiBoxLossLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); + } + } + } + } + } + } + } +} + +TYPED_TEST(MultiBoxLossLayerTest, TestConfGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + LossParameter* loss_param = layer_param.mutable_loss_param(); + layer_param.add_propagate_down(false); + layer_param.add_propagate_down(true); + MultiBoxLossParameter* multibox_loss_param = + layer_param.mutable_multibox_loss_param(); + multibox_loss_param->set_num_classes(this->num_classes_); + for (int c = 0; c < 2; ++c) { + MultiBoxLossParameter_ConfLossType conf_loss_type = kConfLossTypes[c]; + for (int i = 0; i < 2; ++i) { + bool share_location = kBoolChoices[i]; + this->Fill(share_location); + for (int j = 0; j < 2; ++j) { + MultiBoxLossParameter_MatchType match_type = kMatchTypes[j]; + for (int k = 0; k < 1; ++k) { + bool use_prior = kBoolChoices[k]; + for (int n = 0; n < 4; ++n) { + LossParameter_NormalizationMode normalize = kNormalizationModes[n]; + loss_param->set_normalization(normalize); + for (int u = 0; u < 2; ++u) { + bool use_difficult_gt = kBoolChoices[u]; + for (int m = 0; m < 3; ++m) { + MiningType mining_type = kMiningType[m]; + if (!share_location && + mining_type != MultiBoxLossParameter_MiningType_NONE) { + continue; + } + multibox_loss_param->set_conf_loss_type(conf_loss_type); + multibox_loss_param->set_share_location(share_location); + multibox_loss_param->set_match_type(match_type); + multibox_loss_param->set_use_prior_for_matching(use_prior); + multibox_loss_param->set_use_difficult_gt(use_difficult_gt); + multibox_loss_param->set_background_label_id(0); + multibox_loss_param->set_mining_type(mining_type); + MultiBoxLossLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 1); + } + } + } + } + } + } + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp index 8cc21022305..0a906e22056 100644 --- a/src/caffe/test/test_multinomial_logistic_loss_layer.cpp +++ b/src/caffe/test/test_multinomial_logistic_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_mvn_layer.cpp b/src/caffe/test/test_mvn_layer.cpp index 28a762d2741..81d626748d2 100644 --- a/src/caffe/test/test_mvn_layer.cpp +++ b/src/caffe/test/test_mvn_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/blob.hpp" diff --git a/src/caffe/test/test_net.cpp b/src/caffe/test/test_net.cpp index 24b957f2acc..5b97a8bfb29 100644 --- a/src/caffe/test/test_net.cpp +++ b/src/caffe/test/test_net.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include @@ -17,12 +54,13 @@ namespace caffe { -template -class NetTest : public MultiDeviceTest { - typedef typename TypeParam::Dtype Dtype; + +template +class ParentTest : public ParentType { + typedef typename ParentType::Dtype Dtype; protected: - NetTest() : seed_(1701) {} + ParentTest() : seed_(1701) {} virtual void InitNetFromProtoString(const string& proto) { NetParameter param; @@ -843,7 +881,17 @@ class NetTest : public MultiDeviceTest { shared_ptr > net_; }; +template +class NetTest : public ParentTest> { +}; + +template +class NetTestCPU : public ParentTest> { +}; + + TYPED_TEST_CASE(NetTest, TestDtypesAndDevices); +TYPED_TEST_CASE(NetTestCPU, TestDtypes); TYPED_TEST(NetTest, TestHasBlob) { this->InitTinyNet(); @@ -2446,6 +2494,315 @@ TYPED_TEST(NetTest, TestReshape) { EXPECT_FALSE(same_spatial_shape); } +// TODO: this test should work for Caffe Engine as well +// but there were problems visible on Intel OpenMP +// that need to be investigated +#ifdef MKL2017_SUPPORTED +// This test is just checking if this +// configuration does not explode +TYPED_TEST(NetTestCPU, TestForwardReshapeForward) { + typedef TypeParam Dtype; + const string& proto = + "name: 'TestNetwork' " + " layer {" + " top: 'data'" + " top: 'label'" + " name: 'data'" + " type: 'DummyData'" + " dummy_data_param {" + " shape: { dim: 32 dim: 3 dim: 227 dim: 227 }" + " data_filler {" + " type: 'constant'" + " value: 0.01" + " }" + " }" + " transform_param {" + " mirror: true" + " crop_size: 224" + " mean_value: 104" + " mean_value: 117" + " mean_value: 123" + " }" + " }" + " layer {" + " bottom: 'data'" + " top: 'conv'" + " name: 'conv1'" + " type: 'Convolution'" + " param {" + " lr_mult: 1" + " decay_mult: 1" + " }" + " convolution_param {" + " " + " num_output: 64" + " engine: MKL2017 " + " pad: 3" + " kernel_size: 7" + " stride: 2" + " weight_filler {" + " type: 'xavier'" + " }" + " bias_term: false" + " }" + " }" + " layer {" + " bottom: 'conv'" + " top: 'relu1'" + " name: 'relu1'" + " type: 'ReLU'" + " relu_param {" + " engine: MKL2017 " + " " + " }" + " }" + " layer {" + " bottom: 'conv'" + " top: 'relu2'" + " name: 'relu2'" + " type: 'ReLU'" + " relu_param {" + " engine: MKL2017 " + " " + " }" + " }" + " layer {" + " bottom: 'relu1'" + " bottom: 'relu2'" + " top: 'concat'" + " name: 'concat'" + " type: 'Concat'" + " concat_param {" + " engine: MKL2017 " + " " + " }" + " } " + " layer {" + " bottom: 'concat'" + " top: 'lrn'" + " name: 'LRN'" + " type: 'LRN'" + " lrn_param {" + " engine: MKL2017 " + " local_size: 5" + " alpha: 0.0001" + " beta: 0.75" + " }" + " }" + " layer {" + " bottom: 'lrn'" + " top: 'pooling'" + " name: 'Pooling'" + " type: 'Pooling'" + " pooling_param {" + " engine: MKL2017 " + " kernel_size: 5" + " stride: 2" + " pool: MAX" + " }" + " }" + " layer {" + " bottom: 'pooling'" + " top: 'bn'" + " name: 'BatchNorm'" + " type: 'BatchNorm'" + " batch_norm_param {" + " engine: MKL2017 " + " }" + " }"; + this->InitNetFromProtoString(proto); + this->net_->Forward(); + shared_ptr > input_blob = this->net_->blob_by_name("data"); + input_blob->Reshape(1, 3, 1280, 720); + this->net_->Forward(); +} +#if 0 +TYPED_TEST(NetTest, TestTotalForwardReshape) { + typedef typename TypeParam::Dtype Dtype; + // We set up bottom blobs of two different sizes, switch between + // them, check that forward and backward both run and the results + // are the same, and check that the output shapes change. + Caffe::set_random_seed(this->seed_); + Caffe::set_mode(Caffe::CPU); + FillerParameter filler_param; + filler_param.set_std(1); + GaussianFiller filler(filler_param); + // Check smaller shape first as larger first could hide realloc failures. + Blob blob1(2, 3, 12, 10); + Blob blob2(4, 3, 9, 11); + ASSERT_LT(blob1.count(), blob2.count()); + filler.Fill(&blob1); + filler.Fill(&blob2); + const string& proto = + "name: 'TestNetwork' " + " layer {" + " top: 'data'" + " top: 'label'" + " name: 'data'" + " type: 'DummyData'" + " dummy_data_param {" + " shape: { dim: 3 dim: 3 dim: 13 dim: 11 }" + " data_filler {" + " type: 'constant'" + " value: 0.01" + " }" + " }" + " transform_param {" + " mirror: true" + " crop_size: 224" + " mean_value: 104" + " mean_value: 117" + " mean_value: 123" + " }" + " }" + " layer {" + " bottom: 'data'" + " top: 'conv'" + " name: 'conv1'" + " type: 'Convolution'" + " param {" + " lr_mult: 1" + " decay_mult: 1" + " }" + " convolution_param {" + " " + " num_output: 64" + " engine: MKL2017 " + " pad: 3" + " kernel_size: 7" + " stride: 2" + " weight_filler {" + " type: 'xavier'" + " }" + " bias_term: false" + " }" + " }" + " layer {" + " bottom: 'conv'" + " top: 'relu1'" + " name: 'relu1'" + " type: 'ReLU'" + " relu_param {" + " engine: MKL2017 " + " " + " }" + " }" + " layer {" + " bottom: 'conv'" + " top: 'relu2'" + " name: 'relu2'" + " type: 'ReLU'" + " relu_param {" + " engine: MKL2017 " + " " + " }" + " }" + " layer {" + " bottom: 'relu1'" + " bottom: 'relu2'" + " top: 'concat'" + " name: 'concat'" + " type: 'Concat'" + " concat_param {" + " engine: MKL2017 " + " " + " }" + " } " + " layer {" + " bottom: 'concat'" + " top: 'lrn'" + " name: 'LRN'" + " type: 'LRN'" + " lrn_param {" + " engine: MKL2017 " + " local_size: 5" + " alpha: 0.0001" + " beta: 0.75" + " }" + " }" + " layer {" + " bottom: 'lrn'" + " top: 'pooling'" + " name: 'Pooling'" + " type: 'Pooling'" + " pooling_param {" + " engine: MKL2017 " + " kernel_size: 5" + " stride: 2" + " pool: MAX" + " }" + " }" + " layer {" + " bottom: 'pooling'" + " top: 'bn'" + " name: 'BatchNorm'" + " type: 'BatchNorm'" + " batch_norm_param {" + " engine: MKL2017 " + " }" + " }"; + this->InitNetFromProtoString(proto); + shared_ptr > input_blob = this->net_->blob_by_name("data"); + Blob* output_blob = this->net_->output_blobs()[0]; + input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), + blob1.width()); + + caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->Forward(); + // call backward just to make sure it runs + this->net_->Backward(); + Blob output1(output_blob->num(), output_blob->channels(), + output_blob->height(), output_blob->width()); + caffe_copy(output1.count(), output_blob->cpu_data(), + output1.mutable_cpu_data()); + + input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), + blob2.width()); + + caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->Forward(); + this->net_->Backward(); + Blob output2(output_blob->num(), output_blob->channels(), + output_blob->height(), output_blob->width()); + caffe_copy(output2.count(), output_blob->cpu_data(), + output2.mutable_cpu_data()); + + input_blob->Reshape(blob1.num(), blob1.channels(), blob1.height(), + blob1.width()); + + caffe_copy(blob1.count(), blob1.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->Forward(); + this->net_->Backward(); + for (int i = 0; i < output1.count(); ++i) { + EXPECT_FLOAT_EQ(*(output1.cpu_data() + i), *(output_blob->cpu_data() + i)); + } + + input_blob->Reshape(blob2.num(), blob2.channels(), blob2.height(), + blob2.width()); + + caffe_copy(blob2.count(), blob2.cpu_data(), input_blob->mutable_cpu_data()); + this->net_->Forward(); + this->net_->Backward(); + for (int i = 0; i < output2.count(); ++i) { + EXPECT_FLOAT_EQ(*(output2.cpu_data() + i), *(output_blob->cpu_data() + i)); + } + + EXPECT_EQ(output1.num(), blob1.num()); + EXPECT_EQ(output2.num(), blob2.num()); + bool same_spatial_shape = true; + const int kFirstSpatialAxis = 2; + for (int i = kFirstSpatialAxis; i < output1.num_axes(); ++i) { + if (output1.shape(i) != output2.shape(i)) { + same_spatial_shape = false; + break; + } + } + EXPECT_FALSE(same_spatial_shape); +} +#endif +#endif + + TYPED_TEST(NetTest, TestSkipPropagateDown) { // check bottom_need_backward if propagate_down is true this->InitSkipPropNet(false); @@ -2601,4 +2958,767 @@ TYPED_TEST(NetTest, TestAllInOneNetDeploy) { ASSERT_TRUE(found_data); } + +class CompileNetTest : public ::testing::Test { + protected: + void RunCompilerNetTest( + const string& input_param_string, const string& compiled_param_string) { + NetParameter input_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + input_param_string, &input_param)); + NetParameter expected_compiled_param; + CHECK(google::protobuf::TextFormat::ParseFromString( + compiled_param_string, &expected_compiled_param)); + NetParameter actual_compiled_param; + Net::CompileNet(input_param, &actual_compiled_param); + actual_compiled_param.mutable_compile_net_state()->Clear(); + expected_compiled_param.mutable_compile_net_state()->Clear(); + string expect_net_string = expected_compiled_param.DebugString(); + string actual_net_string = actual_compiled_param.DebugString(); + EXPECT_EQ(expect_net_string, + actual_net_string); + // Also test idempotence. + NetParameter double_compiled_param; + Net::CompileNet(actual_compiled_param, &double_compiled_param); + double_compiled_param.mutable_compile_net_state()->Clear(); + string double_net_string = double_compiled_param.DebugString(); + EXPECT_EQ(actual_net_string, + double_net_string); + } +}; + +TEST_F(CompileNetTest, TestRemoveBatchNorm1) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'conv' " + " type: 'Convolution' " + "} " + "layer { " + " bottom: 'conv' " + " name: 'bn' " + " top: 'conv' " + " type: 'BatchNorm' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'conv' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'conv' " + " type: 'Convolution' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'conv' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} + +TEST_F(CompileNetTest, TestRemoveBatchNorm2) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'fc1' " + " top: 'fc1' " + " type: 'InnerProduct' " + "} " + "layer { " + " bottom: 'fc1' " + " name: 'bn' " + " top: 'bn' " + " type: 'BatchNorm' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'bn' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'fc1' " + " top: 'fc1' " + " type: 'InnerProduct' " + "} " + "layer { " + " bottom: 'fc1' " + " name: 'bn' " + " top: 'bn' " + " type: 'BatchNorm' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'bn' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} + +TEST_F(CompileNetTest, TestRemoveBatchNorm3) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'conv' " + " type: 'Convolution' " + "} " + "layer { " + " bottom: 'conv' " + " name: 'bn' " + " top: 'conv' " + " type: 'BatchNorm' " + " batch_norm_param { " + " use_global_stats: false" + " }" + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'conv' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'conv' " + " type: 'Convolution' " + "} " + "layer { " + " bottom: 'conv' " + " name: 'bn' " + " top: 'conv' " + " type: 'BatchNorm' " + " batch_norm_param { " + " use_global_stats: false" + " }" + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'conv' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} + +TEST_F(CompileNetTest, TestRemoveBatchNorm4) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'conv' " + " type: 'Convolution' " + "} " + "layer { " + " bottom: 'conv' " + " name: 'bn' " + " top: 'conv' " + " type: 'BatchNorm' " + " batch_norm_param { " + " use_global_stats: true" + " }" + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'conv' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'conv' " + " type: 'Convolution' " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'conv' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} +#ifdef MKL2017_SUPPORTED +// If BatchNorm of engine MKL2017 +// produce blob consumed by +// Scale Layer then Scale Layer can be dropped +TEST_F(CompileNetTest, TestCompileNetBatchNorm) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'bn' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: MKL2017 " + " } " + "} " + "layer { " + " bottom: 'bn' " + " top: 'sc' " + " name: 'sc' " + " type: 'Scale' " + " scale_param { " + " bias_term: true " + " }" + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'sc' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'sc' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: MKL2017 " + " bias_term: true " + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'sc' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} + +// Combined BatchNorm (inPlace) followed by scale Layer and InPlace Relu +TEST_F(CompileNetTest, TestCompileNetBatchNormInPlace) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'data' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: MKL2017 " + " } " + "} " + "layer { " + " bottom: 'data' " + " top: 'data' " + " name: 'sc' " + " type: 'Scale' " + " scale_param { " + " bias_term: true " + " }" + "}" + "layer { " + " bottom: 'data' " + " top: 'data' " + " name: 'relu' " + " type: 'ReLU' " + " relu_param { " + " engine: MKL2017 " + " } " + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'data' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'data_x' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: MKL2017 " + " bias_term: true " + " } " + "} " + "layer { " + " bottom: 'data_x' " + " top: 'data_x' " + " name: 'relu' " + " type: 'ReLU' " + " relu_param { " + " engine: MKL2017 " + " } " + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'data_x' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} +#endif + +#if defined(MKL2017_SUPPORTED) && defined(MKLDNN_SUPPORTED) +// Combined Batch Norm and Conv ReLU +TEST_F(CompileNetTest, TestCompileNetBatchNormConvolution) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'bn' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: MKL2017 " + " } " + "} " + "layer { " + " bottom: 'bn' " + " top: 'conv' " + " name: 'sc' " + " type: 'Scale' " + " scale_param { " + " bias_term: true " + " }" + "}" + "layer { " + " bottom: 'conv' " + " name: 'conv' " + " top: 'relu' " + " type: 'Convolution' " + " convolution_param { " + " engine: MKLDNN " + " } " + "} " + "layer { " + " bottom: 'relu' " + " top: 'relu' " + " name: 'relu' " + " type: 'ReLU' " + " relu_param { " + " engine: MKLDNN " + " } " + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'relu' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'conv' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: MKL2017 " + " bias_term: true " + " } " + "} " + "layer { " + " bottom: 'conv' " + " name: 'conv' " + " top: 'relu' " + " type: 'Convolution' " + " convolution_param { " + " engine: MKLDNN " + " relu: true " + "negative_slope: 0" + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'relu' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} +#endif + +#ifdef MKLDNN_SUPPORTED +// If BatchNorm of engine MKLDNN +// produce blob consumed by +// Scale Layer then Scale Layer can be dropped +TEST_F(CompileNetTest, TestCompileNetBatchNormMKLDNN) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'bn' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: MKLDNN " + " } " + "} " + "layer { " + " bottom: 'bn' " + " top: 'sc' " + " name: 'sc' " + " type: 'Scale' " + " scale_param { " + " bias_term: true " + " }" + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'sc' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'sc' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: MKLDNN " + " bias_term: true " + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'sc' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} + +// If Convolution of engine MKLDNN +// is followed by ReLU of engine MKLDNN +TEST_F(CompileNetTest, TestCompileNetConvolution) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'relu' " + " type: 'Convolution' " + " convolution_param { " + " engine: MKLDNN " + " } " + "} " + "layer { " + " bottom: 'relu' " + " top: 'relu' " + " name: 'relu' " + " type: 'ReLU' " + " relu_param { " + " engine: MKLDNN " + " } " + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'relu' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'relu' " + " type: 'Convolution' " + " convolution_param { " + " engine: MKLDNN " + " relu: true " + "negative_slope: 0" + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'relu' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} + +TEST_F(CompileNetTest, TestCompileNetLayerParamEngineConvolution) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'relu' " + " type: 'Convolution' " + " engine: 'MKLDNN:CPU' " + " convolution_param { " + " } " + "} " + "layer { " + " bottom: 'relu' " + " top: 'relu' " + " name: 'relu' " + " type: 'ReLU' " + " engine: 'MKLDNN:CPU' " + " relu_param { " + " } " + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'relu' " + " bottom: 'label' " + "} "; + + const string& output_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'relu' " + " type: 'Convolution' " + " engine: 'MKLDNN:CPU' " + " convolution_param { " + " relu: true " + "negative_slope: 0" + " } " + "} " + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'relu' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, output_proto); +} +// If Convolution of engine MKLDNN +// is followed by ReLU of engine MKLDNN +// , but major subengine is MKLDNN::DLA then there +// no merging ov Convolution and Relu +TEST_F(CompileNetTest, TestNoCompileNetLayerParamEngineConvolution) { + const string& input_proto = + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'conv' " + " top: 'relu' " + " type: 'Convolution' " + " engine: 'MKLDNN:DLA,CPU' " + " convolution_param { " + " } " + "} " + "layer { " + " bottom: 'relu' " + " top: 'relu' " + " name: 'relu' " + " type: 'ReLU' " + " engine: 'MKLDNN:DLA,CPU' " + " relu_param { " + " } " + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'relu' " + " bottom: 'label' " + "} "; + + this->RunCompilerNetTest(input_proto, input_proto); +} + +#endif + +// If There is BatchNorm followed by Scale layer, but BatchNorm +// is of engine Caffe then no reduction takes place +TEST_F(CompileNetTest, TestNoCompileNet) { + const string& input_proto= + "name: 'TestNetwork' " + "layer { " + " name: 'data' " + " type: 'Data' " + " top: 'data' " + " top: 'label' " + "} " + "layer { " + " bottom: 'data' " + " name: 'bn' " + " top: 'bn' " + " type: 'BatchNorm' " + " batch_norm_param { " + " engine: CAFFE " + " } " + "} " + "layer { " + " bottom: 'bn' " + " top: 'sc' " + " name: 'sc' " + " type: 'Scale' " + " scale_param { " + " bias_term: true " + " }" + "}" + "layer { " + " name: 'loss' " + " type: 'SoftmaxWithLoss' " + " bottom: 'sc' " + " bottom: 'label' " + "} "; + this->RunCompilerNetTest(input_proto, input_proto); +} + } // namespace caffe diff --git a/src/caffe/test/test_neuron_layer.cpp b/src/caffe/test/test_neuron_layer.cpp index 342f825cec3..9be0a1abe25 100644 --- a/src/caffe/test/test_neuron_layer.cpp +++ b/src/caffe/test/test_neuron_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_normalize_layer.cpp b/src/caffe/test/test_normalize_layer.cpp new file mode 100644 index 00000000000..4685454ce5b --- /dev/null +++ b/src/caffe/test/test_normalize_layer.cpp @@ -0,0 +1,337 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/normalize_layer.hpp" +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class NormalizeLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + NormalizeLayerTest() + : blob_bottom_(new Blob(2, 3, 2, 3)), + blob_top_(new Blob()) { + // fill the values + FillerParameter filler_param; + // GaussianFiller filler(filler_param); + filler_param.set_value(1); + ConstantFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~NormalizeLayerTest() { delete blob_bottom_; delete blob_top_; } + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(NormalizeLayerTest, TestDtypesAndDevices); + +TYPED_TEST(NormalizeLayerTest, TestForward) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test norm + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + Dtype norm = 0; + for (int j = 0; j < channels; ++j) { + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + norm += data * data; + } + } + } + const Dtype kErrorBound = 1e-5; + // expect unit norm + EXPECT_NEAR(1, sqrt(norm), kErrorBound); + } +} + +TYPED_TEST(NormalizeLayerTest, TestForwardScale) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->mutable_scale_filler()->set_type("constant"); + norm_param->mutable_scale_filler()->set_value(10); + NormalizeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test norm + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + Dtype norm = 0; + for (int j = 0; j < channels; ++j) { + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + norm += data * data; + } + } + } + const Dtype kErrorBound = 1e-5; + // expect unit norm + EXPECT_NEAR(10, sqrt(norm), kErrorBound); + } +} + +TYPED_TEST(NormalizeLayerTest, TestForwardScaleChannels) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->set_channel_shared(false); + norm_param->mutable_scale_filler()->set_type("constant"); + norm_param->mutable_scale_filler()->set_value(10); + NormalizeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test norm + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + Dtype norm = 0; + for (int j = 0; j < channels; ++j) { + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + norm += data * data; + } + } + } + const Dtype kErrorBound = 1e-5; + // expect unit norm + EXPECT_NEAR(10, sqrt(norm), kErrorBound); + } +} + +TYPED_TEST(NormalizeLayerTest, TestForwardEltWise) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->set_across_spatial(false); + NormalizeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test norm + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype norm = 0; + for (int j = 0; j < channels; ++j) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + norm += data * data; + } + const Dtype kErrorBound = 1e-5; + // expect unit norm + EXPECT_NEAR(1, sqrt(norm), kErrorBound); + } + } + } +} + +TYPED_TEST(NormalizeLayerTest, TestForwardEltWiseScale) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->set_across_spatial(false); + norm_param->mutable_scale_filler()->set_type("constant"); + norm_param->mutable_scale_filler()->set_value(10); + NormalizeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test norm + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype norm = 0; + for (int j = 0; j < channels; ++j) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + norm += data * data; + } + const Dtype kErrorBound = 1e-5; + // expect unit norm + EXPECT_NEAR(10, sqrt(norm), kErrorBound); + } + } + } +} + +TYPED_TEST(NormalizeLayerTest, TestForwardEltWiseScaleChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->set_across_spatial(false); + norm_param->set_channel_shared(false); + norm_param->mutable_scale_filler()->set_type("constant"); + norm_param->mutable_scale_filler()->set_value(10); + NormalizeLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Test norm + int num = this->blob_bottom_->num(); + int channels = this->blob_bottom_->channels(); + int height = this->blob_bottom_->height(); + int width = this->blob_bottom_->width(); + + for (int i = 0; i < num; ++i) { + for (int k = 0; k < height; ++k) { + for (int l = 0; l < width; ++l) { + Dtype norm = 0; + for (int j = 0; j < channels; ++j) { + Dtype data = this->blob_top_->data_at(i, j, k, l); + norm += data * data; + } + const Dtype kErrorBound = 1e-5; + // expect unit norm + EXPECT_NEAR(10, sqrt(norm), kErrorBound); + } + } + } +} + +TYPED_TEST(NormalizeLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_, 0); +} + +TYPED_TEST(NormalizeLayerTest, TestGradientScale) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->mutable_scale_filler()->set_type("constant"); + norm_param->mutable_scale_filler()->set_value(3); + NormalizeLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NormalizeLayerTest, TestGradientScaleChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->set_channel_shared(false); + norm_param->mutable_scale_filler()->set_type("constant"); + norm_param->mutable_scale_filler()->set_value(3); + NormalizeLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NormalizeLayerTest, TestGradientEltWise) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->set_across_spatial(false); + NormalizeLayer layer(layer_param); + GradientChecker checker(1e-3, 1e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NormalizeLayerTest, TestGradientEltWiseScale) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->set_across_spatial(false); + norm_param->mutable_scale_filler()->set_type("constant"); + norm_param->mutable_scale_filler()->set_value(3); + NormalizeLayer layer(layer_param); + GradientChecker checker(1e-3, 2e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +TYPED_TEST(NormalizeLayerTest, TestGradientEltWiseScaleChannel) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + NormalizeParameter* norm_param = layer_param.mutable_norm_param(); + norm_param->set_across_spatial(false); + norm_param->set_channel_shared(false); + norm_param->mutable_scale_filler()->set_type("constant"); + norm_param->mutable_scale_filler()->set_value(3); + NormalizeLayer layer(layer_param); + GradientChecker checker(1e-3, 2e-3); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_permute_layer.cpp b/src/caffe/test/test_permute_layer.cpp new file mode 100644 index 00000000000..d98a9a37264 --- /dev/null +++ b/src/caffe/test/test_permute_layer.cpp @@ -0,0 +1,296 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/permute_layer.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +static const float eps = 1e-6; + +template +class PermuteLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + protected: + PermuteLayerTest() + : blob_bottom_(new Blob(2, 2, 2, 3)), + blob_top_(new Blob()) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_top_vec_.push_back(blob_top_); + } + + virtual ~PermuteLayerTest() { + delete blob_bottom_; + delete blob_top_; + } + + Blob* const blob_bottom_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(PermuteLayerTest, TestDtypesAndDevices); + +TYPED_TEST(PermuteLayerTest, TestSetUp) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PermuteParameter* permute_param = layer_param.mutable_permute_param(); + permute_param->add_order(0); + permute_param->add_order(2); + permute_param->add_order(3); + permute_param->add_order(1); + PermuteLayer layer(layer_param); + + this->blob_bottom_->Reshape(2, 3, 4, 5); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 4); + EXPECT_EQ(this->blob_top_->height(), 5); + EXPECT_EQ(this->blob_top_->width(), 3); +} + +TYPED_TEST(PermuteLayerTest, TestSetUpIdentity) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PermuteLayer layer(layer_param); + + this->blob_bottom_->Reshape(2, 3, 4, 5); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 5); +} + +TYPED_TEST(PermuteLayerTest, TestFowardIdentity) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PermuteLayer layer(layer_param); + + this->blob_bottom_->Reshape(2, 3, 4, 5); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + EXPECT_EQ(this->blob_top_->num(), 2); + EXPECT_EQ(this->blob_top_->channels(), 3); + EXPECT_EQ(this->blob_top_->height(), 4); + EXPECT_EQ(this->blob_top_->width(), 5); + for (int i = 0; i < this->blob_bottom_->count(); ++i) { + EXPECT_NEAR(this->blob_bottom_->cpu_data()[i], + this->blob_top_->cpu_data()[i], eps); + } +} + +TYPED_TEST(PermuteLayerTest, TestFowrad2D) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PermuteParameter* permute_param = layer_param.mutable_permute_param(); + permute_param->add_order(0); + permute_param->add_order(1); + permute_param->add_order(3); + permute_param->add_order(2); + PermuteLayer layer(layer_param); + + const int num = 2; + const int channels = 3; + const int height = 2; + const int width = 3; + this->blob_bottom_->Reshape(num, channels, height, width); + // Input: 2 x 3 channels of: + // [1 2 3] + // [4 5 6] + for (int i = 0; i < height * width * num * channels; i += height * width) { + this->blob_bottom_->mutable_cpu_data()[i + 0] = 1; + this->blob_bottom_->mutable_cpu_data()[i + 1] = 2; + this->blob_bottom_->mutable_cpu_data()[i + 2] = 3; + this->blob_bottom_->mutable_cpu_data()[i + 3] = 4; + this->blob_bottom_->mutable_cpu_data()[i + 4] = 5; + this->blob_bottom_->mutable_cpu_data()[i + 5] = 6; + } + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Expected output: 2 x 3 channels of: + // [1 4] + // [2 5] + // [3 6] + for (int i = 0; i < height * width * num * channels; i += height * width) { + EXPECT_EQ(this->blob_top_->cpu_data()[i + 0], 1); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 1], 4); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 2], 2); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 3], 5); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 4], 3); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 5], 6); + } +} + +TYPED_TEST(PermuteLayerTest, TestFowrad3D) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PermuteParameter* permute_param = layer_param.mutable_permute_param(); + permute_param->add_order(0); + permute_param->add_order(2); + permute_param->add_order(3); + permute_param->add_order(1); + PermuteLayer layer(layer_param); + + const int num = 2; + const int channels = 2; + const int height = 2; + const int width = 3; + this->blob_bottom_->Reshape(num, channels, height, width); + // Input: 2 of: + // [1 2 3] + // [4 5 6] + // ======= + // [7 8 9] + // [10 11 12] + int inner_dim = channels * height * width; + for (int i = 0; i < num; ++i) { + for (int j = 0; j < inner_dim; ++j) { + this->blob_bottom_->mutable_cpu_data()[i * inner_dim + j] = j + 1; + } + } + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Expected output: 2 of: + // [1 7] + // [2 8] + // [3 9] + // ===== + // [4 10] + // [5 11] + // [6 12] + for (int i = 0; i < num * inner_dim; i += inner_dim) { + EXPECT_EQ(this->blob_top_->cpu_data()[i + 0], 1); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 1], 7); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 2], 2); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 3], 8); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 4], 3); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 5], 9); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 6], 4); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 7], 10); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 8], 5); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 9], 11); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 10], 6); + EXPECT_EQ(this->blob_top_->cpu_data()[i + 11], 12); + } +} + +TYPED_TEST(PermuteLayerTest, TestTwoPermute) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PermuteParameter* permute_param = layer_param.mutable_permute_param(); + permute_param->add_order(0); + permute_param->add_order(2); + permute_param->add_order(3); + permute_param->add_order(1); + PermuteLayer layer1(layer_param); + + Blob input1(2, 3, 4, 5); + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(&input1); + Blob output1; + vector*> bottom_vec, top_vec; + bottom_vec.push_back(&input1); + top_vec.push_back(&output1); + layer1.SetUp(bottom_vec, top_vec); + layer1.Forward(bottom_vec, top_vec); + + EXPECT_EQ(output1.num(), 2); + EXPECT_EQ(output1.channels(), 4); + EXPECT_EQ(output1.height(), 5); + EXPECT_EQ(output1.width(), 3); + + // Create second permute layer which transfer back the original order. + permute_param->clear_order(); + permute_param->add_order(0); + permute_param->add_order(3); + permute_param->add_order(1); + permute_param->add_order(2); + PermuteLayer layer2(layer_param); + + Blob output2; + bottom_vec.clear(); + bottom_vec.push_back(&output1); + top_vec.clear(); + top_vec.push_back(&output2); + layer2.SetUp(bottom_vec, top_vec); + layer2.Forward(bottom_vec, top_vec); + + EXPECT_EQ(output2.num(), 2); + EXPECT_EQ(output2.channels(), 3); + EXPECT_EQ(output2.height(), 4); + EXPECT_EQ(output2.width(), 5); + + for (int i = 0; i < output2.count(); ++i) { + EXPECT_NEAR(input1.cpu_data()[i], output2.cpu_data()[i], eps); + } +} + +TYPED_TEST(PermuteLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + PermuteParameter* permute_param = layer_param.mutable_permute_param(); + permute_param->add_order(0); + permute_param->add_order(2); + permute_param->add_order(3); + permute_param->add_order(1); + PermuteLayer layer(layer_param); + GradientChecker checker(1e-2, 1e-2); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_platform.cpp b/src/caffe/test/test_platform.cpp index f3513e08814..61260e4bd10 100644 --- a/src/caffe/test/test_platform.cpp +++ b/src/caffe/test/test_platform.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CPU_ONLY #include diff --git a/src/caffe/test/test_pooling_layer.cpp b/src/caffe/test/test_pooling_layer.cpp index bb95cae032d..7f76b394b48 100644 --- a/src/caffe/test/test_pooling_layer.cpp +++ b/src/caffe/test/test_pooling_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_power_layer.cpp b/src/caffe/test/test_power_layer.cpp index 1aa587ac97a..ddddc3bb92e 100644 --- a/src/caffe/test/test_power_layer.cpp +++ b/src/caffe/test/test_power_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_prior_box_layer.cpp b/src/caffe/test/test_prior_box_layer.cpp new file mode 100644 index 00000000000..d1eab1ad6ec --- /dev/null +++ b/src/caffe/test/test_prior_box_layer.cpp @@ -0,0 +1,599 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/prior_box_layer.hpp" + +#include "caffe/test/test_caffe_main.hpp" + +namespace caffe { + +template +class PriorBoxLayerTest : public CPUDeviceTest { + protected: + PriorBoxLayerTest() + : blob_bottom_(new Blob(10, 10, 10, 10)), + blob_data_(new Blob(10, 3, 100, 100)), + blob_top_(new Blob()), + min_size_(4), + max_size_(9) { + Caffe::set_random_seed(1701); + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_); + filler.Fill(this->blob_data_); + blob_bottom_vec_.push_back(blob_bottom_); + blob_bottom_vec_.push_back(blob_data_); + blob_top_vec_.push_back(blob_top_); + } + virtual ~PriorBoxLayerTest() + { + delete blob_top_; + delete blob_data_; + delete blob_bottom_; + } + Blob* const blob_bottom_; + Blob* const blob_data_; + Blob* const blob_top_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; + size_t min_size_; + size_t max_size_; +}; + +TYPED_TEST_CASE(PriorBoxLayerTest, TestDtypes); + +TYPED_TEST(PriorBoxLayerTest, TestSetup) { + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 2); + EXPECT_EQ(this->blob_top_->height(), 100 * 2 * 4); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(PriorBoxLayerTest, TestSetupMultiSize) { + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_min_size(this->min_size_ + 10); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_max_size(this->max_size_ + 10); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 2); + EXPECT_EQ(this->blob_top_->height(), 100 * 4 * 4); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(PriorBoxLayerTest, TestSetupNoMaxSize) { + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 2); + EXPECT_EQ(this->blob_top_->height(), 100 * 1 * 4); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(PriorBoxLayerTest, TestSetupMultiSizeNoMaxSize) { + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_min_size(this->min_size_ + 10); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 2); + EXPECT_EQ(this->blob_top_->height(), 100 * 2 * 4); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(PriorBoxLayerTest, TestSetupAspectRatio1) { + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_aspect_ratio(1.); + prior_box_param->add_aspect_ratio(2.); + prior_box_param->set_flip(false); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 2); + EXPECT_EQ(this->blob_top_->height(), 100 * 3 * 4); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(PriorBoxLayerTest, TestSetupAspectRatioNoFlip) { + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_aspect_ratio(2.); + prior_box_param->add_aspect_ratio(3.); + prior_box_param->set_flip(false); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 2); + EXPECT_EQ(this->blob_top_->height(), 100 * 4 * 4); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(PriorBoxLayerTest, TestSetupAspectRatio) { + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_aspect_ratio(2.); + prior_box_param->add_aspect_ratio(3.); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 2); + EXPECT_EQ(this->blob_top_->height(), 100 * 6 * 4); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(PriorBoxLayerTest, TestSetupAspectRatioMultiSize) { + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_min_size(this->min_size_ + 10); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_max_size(this->max_size_ + 10); + prior_box_param->add_aspect_ratio(2.); + prior_box_param->add_aspect_ratio(3.); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + EXPECT_EQ(this->blob_top_->num(), 1); + EXPECT_EQ(this->blob_top_->channels(), 2); + EXPECT_EQ(this->blob_top_->height(), 100 * 12 * 4); + EXPECT_EQ(this->blob_top_->width(), 1); +} + +TYPED_TEST(PriorBoxLayerTest, TestCPU) { + const TypeParam eps = 1e-6; + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* top_data = this->blob_top_->cpu_data(); + int dim = this->blob_top_->height(); + // pick a few generated priors and compare against the expected number. + // first prior + EXPECT_NEAR(top_data[0], 0.03, eps); + EXPECT_NEAR(top_data[1], 0.03, eps); + EXPECT_NEAR(top_data[2], 0.07, eps); + EXPECT_NEAR(top_data[3], 0.07, eps); + // second prior + EXPECT_NEAR(top_data[4], 0.02, eps); + EXPECT_NEAR(top_data[5], 0.02, eps); + EXPECT_NEAR(top_data[6], 0.08, eps); + EXPECT_NEAR(top_data[7], 0.08, eps); + // prior in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*2*4+4*2*4], 0.43, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+1], 0.43, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+2], 0.47, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+3], 0.47, eps); + + // check variance + top_data += dim; + for (int d = 0; d < dim; ++d) { + EXPECT_NEAR(top_data[d], 0.1, eps); + } +} + +TYPED_TEST(PriorBoxLayerTest, TestCPUNoMaxSize) { + const TypeParam eps = 1e-6; + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* top_data = this->blob_top_->cpu_data(); + int dim = this->blob_top_->height(); + // pick a few generated priors and compare against the expected number. + // first prior + EXPECT_NEAR(top_data[0], 0.03, eps); + EXPECT_NEAR(top_data[1], 0.03, eps); + EXPECT_NEAR(top_data[2], 0.07, eps); + EXPECT_NEAR(top_data[3], 0.07, eps); + // prior in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*1*4+4*1*4], 0.43, eps); + EXPECT_NEAR(top_data[4*10*1*4+4*1*4+1], 0.43, eps); + EXPECT_NEAR(top_data[4*10*1*4+4*1*4+2], 0.47, eps); + EXPECT_NEAR(top_data[4*10*1*4+4*1*4+3], 0.47, eps); + + // check variance + top_data += dim; + for (int d = 0; d < dim; ++d) { + EXPECT_NEAR(top_data[d], 0.1, eps); + } +} + +TYPED_TEST(PriorBoxLayerTest, TestCPUVariance1) { + const TypeParam eps = 1e-6; + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_variance(1.); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* top_data = this->blob_top_->cpu_data(); + int dim = this->blob_top_->height(); + // pick a few generated priors and compare against the expected number. + // first prior + EXPECT_NEAR(top_data[0], 0.03, eps); + EXPECT_NEAR(top_data[1], 0.03, eps); + EXPECT_NEAR(top_data[2], 0.07, eps); + EXPECT_NEAR(top_data[3], 0.07, eps); + // second prior + EXPECT_NEAR(top_data[4], 0.02, eps); + EXPECT_NEAR(top_data[5], 0.02, eps); + EXPECT_NEAR(top_data[6], 0.08, eps); + EXPECT_NEAR(top_data[7], 0.08, eps); + // prior in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*2*4+4*2*4], 0.43, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+1], 0.43, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+2], 0.47, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+3], 0.47, eps); + + // check variance + top_data += dim; + for (int d = 0; d < dim; ++d) { + EXPECT_NEAR(top_data[d], 1., eps); + } +} + +TYPED_TEST(PriorBoxLayerTest, TestCPUVarianceMulti) { + const TypeParam eps = 1e-6; + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_variance(0.1); + prior_box_param->add_variance(0.2); + prior_box_param->add_variance(0.3); + prior_box_param->add_variance(0.4); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* top_data = this->blob_top_->cpu_data(); + int dim = this->blob_top_->height(); + // pick a few generated priors and compare against the expected number. + // first prior + EXPECT_NEAR(top_data[0], 0.03, eps); + EXPECT_NEAR(top_data[1], 0.03, eps); + EXPECT_NEAR(top_data[2], 0.07, eps); + EXPECT_NEAR(top_data[3], 0.07, eps); + // second prior + EXPECT_NEAR(top_data[4], 0.02, eps); + EXPECT_NEAR(top_data[5], 0.02, eps); + EXPECT_NEAR(top_data[6], 0.08, eps); + EXPECT_NEAR(top_data[7], 0.08, eps); + // prior in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*2*4+4*2*4], 0.43, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+1], 0.43, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+2], 0.47, eps); + EXPECT_NEAR(top_data[4*10*2*4+4*2*4+3], 0.47, eps); + + // check variance + top_data += dim; + for (int d = 0; d < dim; ++d) { + EXPECT_NEAR(top_data[d], 0.1 * (d % 4 + 1), eps); + } +} + +TYPED_TEST(PriorBoxLayerTest, TestCPUAspectRatioNoFlip) { + const TypeParam eps = 1e-6; + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_aspect_ratio(2.); + prior_box_param->set_flip(false); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* top_data = this->blob_top_->cpu_data(); + int dim = this->blob_top_->height(); + // pick a few generated priors and compare against the expected number. + // first prior + EXPECT_NEAR(top_data[0], 0.03, eps); + EXPECT_NEAR(top_data[1], 0.03, eps); + EXPECT_NEAR(top_data[2], 0.07, eps); + EXPECT_NEAR(top_data[3], 0.07, eps); + // second prior + EXPECT_NEAR(top_data[4], 0.02, eps); + EXPECT_NEAR(top_data[5], 0.02, eps); + EXPECT_NEAR(top_data[6], 0.08, eps); + EXPECT_NEAR(top_data[7], 0.08, eps); + // third prior + EXPECT_NEAR(top_data[8], 0.05 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[9], 0.05 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[10], 0.05 + 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[11], 0.05 + 0.01*sqrt(2.), eps); + // prior in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*3*4+4*3*4], 0.43, eps); + EXPECT_NEAR(top_data[4*10*3*4+4*3*4+1], 0.43, eps); + EXPECT_NEAR(top_data[4*10*3*4+4*3*4+2], 0.47, eps); + EXPECT_NEAR(top_data[4*10*3*4+4*3*4+3], 0.47, eps); + // prior with ratio 1:2 in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*3*4+4*3*4+8], 0.45 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*3*4+4*3*4+9], 0.45 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*3*4+4*3*4+10], 0.45 + 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*3*4+4*3*4+11], 0.45 + 0.01*sqrt(2.), eps); + + // check variance + top_data += dim; + for (int d = 0; d < dim; ++d) { + EXPECT_NEAR(top_data[d], 0.1, eps); + } +} + +TYPED_TEST(PriorBoxLayerTest, TestCPUAspectRatio) { + const TypeParam eps = 1e-6; + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_aspect_ratio(2.); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* top_data = this->blob_top_->cpu_data(); + int dim = this->blob_top_->height(); + // pick a few generated priors and compare against the expected number. + // first prior + EXPECT_NEAR(top_data[0], 0.03, eps); + EXPECT_NEAR(top_data[1], 0.03, eps); + EXPECT_NEAR(top_data[2], 0.07, eps); + EXPECT_NEAR(top_data[3], 0.07, eps); + // second prior + EXPECT_NEAR(top_data[4], 0.02, eps); + EXPECT_NEAR(top_data[5], 0.02, eps); + EXPECT_NEAR(top_data[6], 0.08, eps); + EXPECT_NEAR(top_data[7], 0.08, eps); + // third prior + EXPECT_NEAR(top_data[8], 0.05 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[9], 0.05 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[10], 0.05 + 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[11], 0.05 + 0.01*sqrt(2.), eps); + // forth prior + EXPECT_NEAR(top_data[12], 0.05 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[13], 0.05 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[14], 0.05 + 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[15], 0.05 + 0.02*sqrt(2.), eps); + // prior in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*4*4+4*4*4], 0.43, eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+1], 0.43, eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+2], 0.47, eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+3], 0.47, eps); + // prior with ratio 1:2 in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+8], 0.45 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+9], 0.45 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+10], 0.45 + 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+11], 0.45 + 0.01*sqrt(2.), eps); + // prior with ratio 2:1 in the 5-th row and 5-th col + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+12], 0.45 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+13], 0.45 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+14], 0.45 + 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[4*10*4*4+4*4*4+15], 0.45 + 0.02*sqrt(2.), eps); + + // check variance + top_data += dim; + for (int d = 0; d < dim; ++d) { + EXPECT_NEAR(top_data[d], 0.1, eps); + } +} + +TYPED_TEST(PriorBoxLayerTest, TestCPUAspectRatioMultiSize) { + const TypeParam eps = 1e-6; + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_min_size(this->min_size_ + 4); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_max_size(this->max_size_ + 9); + prior_box_param->add_aspect_ratio(2.); + prior_box_param->set_clip(true); + PriorBoxLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* top_data = this->blob_top_->cpu_data(); + int dim = this->blob_top_->height(); + // pick a few generated priors and compare against the expected number. + // first prior + EXPECT_NEAR(top_data[0], 0.03, eps); + EXPECT_NEAR(top_data[1], 0.03, eps); + EXPECT_NEAR(top_data[2], 0.07, eps); + EXPECT_NEAR(top_data[3], 0.07, eps); + // second prior + EXPECT_NEAR(top_data[4], 0.02, eps); + EXPECT_NEAR(top_data[5], 0.02, eps); + EXPECT_NEAR(top_data[6], 0.08, eps); + EXPECT_NEAR(top_data[7], 0.08, eps); + // third prior + EXPECT_NEAR(top_data[8], 0.05 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[9], 0.05 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[10], 0.05 + 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[11], 0.05 + 0.01*sqrt(2.), eps); + // forth prior + EXPECT_NEAR(top_data[12], 0.05 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[13], 0.05 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[14], 0.05 + 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[15], 0.05 + 0.02*sqrt(2.), eps); + // fifth prior + EXPECT_NEAR(top_data[16], 0.01, eps); + EXPECT_NEAR(top_data[17], 0.01, eps); + EXPECT_NEAR(top_data[18], 0.09, eps); + EXPECT_NEAR(top_data[19], 0.09, eps); + // sixth prior + EXPECT_NEAR(top_data[20], 0.00, eps); + EXPECT_NEAR(top_data[21], 0.00, eps); + EXPECT_NEAR(top_data[22], 0.11, eps); + EXPECT_NEAR(top_data[23], 0.11, eps); + // seventh prior + EXPECT_NEAR(top_data[24], 0.00, eps); + EXPECT_NEAR(top_data[25], 0.05 - 0.04/sqrt(2.), eps); + EXPECT_NEAR(top_data[26], 0.05 + 0.04*sqrt(2.), eps); + EXPECT_NEAR(top_data[27], 0.05 + 0.04/sqrt(2.), eps); + // forth prior + EXPECT_NEAR(top_data[28], 0.05 - 0.04/sqrt(2.), eps); + EXPECT_NEAR(top_data[29], 0.00, eps); + EXPECT_NEAR(top_data[30], 0.05 + 0.04/sqrt(2.), eps); + EXPECT_NEAR(top_data[31], 0.05 + 0.04*sqrt(2.), eps); + // prior in the 5-th row and 5-th col + EXPECT_NEAR(top_data[8*10*4*4+8*4*4], 0.43, eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+1], 0.43, eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+2], 0.47, eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+3], 0.47, eps); + // prior with ratio 1:2 in the 5-th row and 5-th col + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+8], 0.45 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+9], 0.45 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+10], 0.45 + 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+11], 0.45 + 0.01*sqrt(2.), eps); + // prior with ratio 2:1 in the 5-th row and 5-th col + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+12], 0.45 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+13], 0.45 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+14], 0.45 + 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[8*10*4*4+8*4*4+15], 0.45 + 0.02*sqrt(2.), eps); + + // check variance + top_data += dim; + for (int d = 0; d < dim; ++d) { + EXPECT_NEAR(top_data[d], 0.1, eps); + } +} + +TYPED_TEST(PriorBoxLayerTest, TestCPUFixStep) { + const TypeParam eps = 1e-6; + LayerParameter layer_param; + PriorBoxParameter* prior_box_param = layer_param.mutable_prior_box_param(); + prior_box_param->add_min_size(this->min_size_); + prior_box_param->add_max_size(this->max_size_); + prior_box_param->add_aspect_ratio(2.); + prior_box_param->set_img_size(100); + prior_box_param->set_step(10); + PriorBoxLayer layer(layer_param); + vector shape(4, 10); + shape[2] = 20; + this->blob_bottom_->Reshape(shape); + shape[1] = 3; + shape[2] = 200; + shape[3] = 100; + this->blob_data_->Reshape(shape); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + layer.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + // Now, check values + const TypeParam* top_data = this->blob_top_->cpu_data(); + int dim = this->blob_top_->height(); + // pick a few generated priors and compare against the expected number. + // first prior + EXPECT_NEAR(top_data[0], 0.03, eps); + EXPECT_NEAR(top_data[1], 0.03, eps); + EXPECT_NEAR(top_data[2], 0.07, eps); + EXPECT_NEAR(top_data[3], 0.07, eps); + // second prior + EXPECT_NEAR(top_data[4], 0.02, eps); + EXPECT_NEAR(top_data[5], 0.02, eps); + EXPECT_NEAR(top_data[6], 0.08, eps); + EXPECT_NEAR(top_data[7], 0.08, eps); + // third prior + EXPECT_NEAR(top_data[8], 0.05 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[9], 0.05 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[10], 0.05 + 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[11], 0.05 + 0.01*sqrt(2.), eps); + // forth prior + EXPECT_NEAR(top_data[12], 0.05 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[13], 0.05 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[14], 0.05 + 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[15], 0.05 + 0.02*sqrt(2.), eps); + // prior in the 15-th row and 5-th col + EXPECT_NEAR(top_data[14*10*4*4+4*4*4], 0.43, eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+1], 1.43, eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+2], 0.47, eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+3], 1.47, eps); + // prior with ratio 1:2 in the 15-th row and 5-th col + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+8], 0.45 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+9], 1.45 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+10], 0.45 + 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+11], 1.45 + 0.01*sqrt(2.), eps); + // prior with ratio 2:1 in the 15-th row and 5-th col + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+12], 0.45 - 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+13], 1.45 - 0.02*sqrt(2.), eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+14], 0.45 + 0.01*sqrt(2.), eps); + EXPECT_NEAR(top_data[14*10*4*4+4*4*4+15], 1.45 + 0.02*sqrt(2.), eps); + + // check variance + top_data += dim; + for (int d = 0; d < dim; ++d) { + EXPECT_NEAR(top_data[d], 0.1, eps); + } +} + +} // namespace caffe diff --git a/src/caffe/test/test_protobuf.cpp b/src/caffe/test/test_protobuf.cpp index 01de461afdf..6b4f9619f7f 100644 --- a/src/caffe/test/test_protobuf.cpp +++ b/src/caffe/test/test_protobuf.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + // This is simply a script that tries serializing protocol buffer in text // format. Nothing special here and no actual code is being tested. #include diff --git a/src/caffe/test/test_random_number_generator.cpp b/src/caffe/test/test_random_number_generator.cpp index 833b0047b5d..e0eb20d021c 100644 --- a/src/caffe/test/test_random_number_generator.cpp +++ b/src/caffe/test/test_random_number_generator.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_reduction_layer.cpp b/src/caffe/test/test_reduction_layer.cpp index 6ed7cda6adc..119cfd4bcc5 100644 --- a/src/caffe/test/test_reduction_layer.cpp +++ b/src/caffe/test/test_reduction_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_reshape_layer.cpp b/src/caffe/test/test_reshape_layer.cpp index 4f2613868d4..d7427b5fdba 100644 --- a/src/caffe/test/test_reshape_layer.cpp +++ b/src/caffe/test/test_reshape_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_rnn_layer.cpp b/src/caffe/test/test_rnn_layer.cpp index dd8952d62d6..c06f1050b3c 100644 --- a/src/caffe/test/test_rnn_layer.cpp +++ b/src/caffe/test/test_rnn_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_scale_layer.cpp b/src/caffe/test/test_scale_layer.cpp index ad116795f44..5822e4414d3 100644 --- a/src/caffe/test/test_scale_layer.cpp +++ b/src/caffe/test/test_scale_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp index 5dfd7656db2..cf5f6b8ee9e 100644 --- a/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp +++ b/src/caffe/test/test_sigmoid_cross_entropy_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_slice_layer.cpp b/src/caffe/test/test_slice_layer.cpp index c2b231e1ef4..d8c5500de7e 100644 --- a/src/caffe/test/test_slice_layer.cpp +++ b/src/caffe/test/test_slice_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_smooth_L1_loss_layer.cpp b/src/caffe/test/test_smooth_L1_loss_layer.cpp new file mode 100644 index 00000000000..6c5c41800f2 --- /dev/null +++ b/src/caffe/test/test_smooth_L1_loss_layer.cpp @@ -0,0 +1,126 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include "gtest/gtest.h" + +#include "caffe/blob.hpp" +#include "caffe/common.hpp" +#include "caffe/filler.hpp" +#include "caffe/layers/smooth_L1_loss_layer.hpp" + +#include "caffe/test/test_caffe_main.hpp" +#include "caffe/test/test_gradient_check_util.hpp" + +namespace caffe { + +template +class SmoothL1LossLayerTest : public MultiDeviceTest { + typedef typename TypeParam::Dtype Dtype; + + protected: + SmoothL1LossLayerTest() + : blob_bottom_data_(new Blob(10, 5, 1, 1)), + blob_bottom_label_(new Blob(10, 5, 1, 1)), + blob_top_loss_(new Blob()) { + // fill the values + FillerParameter filler_param; + GaussianFiller filler(filler_param); + filler.Fill(this->blob_bottom_data_); + blob_bottom_vec_.push_back(blob_bottom_data_); + filler.Fill(this->blob_bottom_label_); + blob_bottom_vec_.push_back(blob_bottom_label_); + blob_top_vec_.push_back(blob_top_loss_); + } + virtual ~SmoothL1LossLayerTest() { + delete blob_bottom_data_; + delete blob_bottom_label_; + delete blob_top_loss_; + } + + void TestForward() { + // Get the loss without a specified objective weight -- should be + // equivalent to explicitly specifiying a weight of 1. + LayerParameter layer_param; + SmoothL1LossLayer layer_weight_1(layer_param); + layer_weight_1.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype loss_weight_1 = + layer_weight_1.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + + // Get the loss again with a different objective weight; check that it is + // scaled appropriately. + const Dtype kLossWeight = 3.7; + layer_param.add_loss_weight(kLossWeight); + SmoothL1LossLayer layer_weight_2(layer_param); + layer_weight_2.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype loss_weight_2 = + layer_weight_2.Forward(this->blob_bottom_vec_, this->blob_top_vec_); + const Dtype kErrorMargin = 1e-5; + EXPECT_NEAR(loss_weight_1 * kLossWeight, loss_weight_2, kErrorMargin); + // Make sure the loss is non-trivial. + const Dtype kNonTrivialAbsThresh = 1e-1; + EXPECT_GE(fabs(loss_weight_1), kNonTrivialAbsThresh); + } + + Blob* const blob_bottom_data_; + Blob* const blob_bottom_label_; + Blob* const blob_top_loss_; + vector*> blob_bottom_vec_; + vector*> blob_top_vec_; +}; + +TYPED_TEST_CASE(SmoothL1LossLayerTest, TestDtypesAndDevices); + +TYPED_TEST(SmoothL1LossLayerTest, TestForward) { + this->TestForward(); +} + +TYPED_TEST(SmoothL1LossLayerTest, TestGradient) { + typedef typename TypeParam::Dtype Dtype; + LayerParameter layer_param; + const Dtype kLossWeight = 3.7; + layer_param.add_loss_weight(kLossWeight); + SmoothL1LossLayer layer(layer_param); + layer.SetUp(this->blob_bottom_vec_, this->blob_top_vec_); + GradientChecker checker(1e-2, 1e-2, 1701); + checker.CheckGradientExhaustive(&layer, this->blob_bottom_vec_, + this->blob_top_vec_); +} + +} // namespace caffe diff --git a/src/caffe/test/test_softmax_layer.cpp b/src/caffe/test/test_softmax_layer.cpp index 94443576724..b2c8382a3b3 100644 --- a/src/caffe/test/test_softmax_layer.cpp +++ b/src/caffe/test/test_softmax_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_softmax_with_loss_layer.cpp b/src/caffe/test/test_softmax_with_loss_layer.cpp index c67f3e0d907..3d1eee837c2 100644 --- a/src/caffe/test/test_softmax_with_loss_layer.cpp +++ b/src/caffe/test/test_softmax_with_loss_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_solver.cpp b/src/caffe/test/test_solver.cpp index b181642681c..5de1274b162 100644 --- a/src/caffe/test/test_solver.cpp +++ b/src/caffe/test/test_solver.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/test/test_solver_factory.cpp b/src/caffe/test/test_solver_factory.cpp index eef5290fe2e..ef6b42e7fca 100644 --- a/src/caffe/test/test_solver_factory.cpp +++ b/src/caffe/test/test_solver_factory.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_split_layer.cpp b/src/caffe/test/test_split_layer.cpp index 007142126ea..dc7db6e195d 100644 --- a/src/caffe/test/test_split_layer.cpp +++ b/src/caffe/test/test_split_layer.cpp @@ -1,9 +1,43 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include -#include "google/protobuf/text_format.h" -#include "gtest/gtest.h" - #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" @@ -14,6 +48,9 @@ #include "caffe/test/test_caffe_main.hpp" #include "caffe/test/test_gradient_check_util.hpp" +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + namespace caffe { template diff --git a/src/caffe/test/test_spp_layer.cpp b/src/caffe/test/test_spp_layer.cpp index 59a3af2aec1..66660fccfa7 100644 --- a/src/caffe/test/test_spp_layer.cpp +++ b/src/caffe/test/test_spp_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" @@ -41,7 +78,12 @@ class SPPLayerTest : public MultiDeviceTest { blob_bottom_vec_3_.push_back(blob_bottom_3_); blob_top_vec_.push_back(blob_top_); } - virtual ~SPPLayerTest() { delete blob_bottom_; delete blob_top_; } + virtual ~SPPLayerTest() { + delete blob_bottom_; + delete blob_top_; + delete blob_bottom_2_; + delete blob_bottom_3_; + } Blob* const blob_bottom_; Blob* const blob_bottom_2_; diff --git a/src/caffe/test/test_stochastic_pooling.cpp b/src/caffe/test/test_stochastic_pooling.cpp index cd5db8383ab..07b6b8b758b 100644 --- a/src/caffe/test/test_stochastic_pooling.cpp +++ b/src/caffe/test/test_stochastic_pooling.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_syncedmem.cpp b/src/caffe/test/test_syncedmem.cpp index 16dfb58230f..b20de65c25e 100644 --- a/src/caffe/test/test_syncedmem.cpp +++ b/src/caffe/test/test_syncedmem.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_tanh_layer.cpp b/src/caffe/test/test_tanh_layer.cpp index bb8699a8e91..238ffc14bf5 100644 --- a/src/caffe/test/test_tanh_layer.cpp +++ b/src/caffe/test/test_tanh_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_threshold_layer.cpp b/src/caffe/test/test_threshold_layer.cpp index 1e84cc5ab84..2b16ec3adb9 100644 --- a/src/caffe/test/test_threshold_layer.cpp +++ b/src/caffe/test/test_threshold_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_tile_layer.cpp b/src/caffe/test/test_tile_layer.cpp index 7ff75520e8e..9ae0791d354 100644 --- a/src/caffe/test/test_tile_layer.cpp +++ b/src/caffe/test/test_tile_layer.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "gtest/gtest.h" diff --git a/src/caffe/test/test_training_utils.cpp b/src/caffe/test/test_training_utils.cpp new file mode 100644 index 00000000000..396289f2ff2 --- /dev/null +++ b/src/caffe/test/test_training_utils.cpp @@ -0,0 +1,100 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include "boost/algorithm/string.hpp" +#include "boost/lexical_cast.hpp" + +#include "google/protobuf/text_format.h" +#include "gtest/gtest.h" + +#include "caffe/solver.hpp" +#include "caffe/test/test_caffe_main.hpp" + +#include "caffe/util/io.hpp" +#include "caffe/util/upgrade_proto.hpp" + +#include "caffe/training_utils.hpp" + +namespace caffe { + +class TrainingUtilsTest : public ::testing::Test { + protected: + TrainingUtilsTest() + : topology_file_name_( + CMAKE_SOURCE_DIR "caffe/test/test_data/test_topology.prototxt") { + } + + string topology_file_name_; +}; + +TEST_F(TrainingUtilsTest, GetStagesFromFlags) { + string stages_flag = "stage1,stage2, stage3"; + + EXPECT_EQ(get_stages_from_flags(stages_flag).size(), 3); +} + +TEST_F(TrainingUtilsTest, SetSolverParamsFromFlags) { + caffe::SolverParameter solver_param; + string solver; + string engine = "new engine"; + string stages = "stage1,stage2"; + int level = 1; + + use_flags(&solver_param, solver, engine, level, stages); + + EXPECT_EQ(solver_param.engine(), engine); + EXPECT_EQ(solver_param.train_state().stage_size(), 2); + EXPECT_EQ(solver_param.train_state().level(), 1); +} + +TEST_F(TrainingUtilsTest, MultiphaseTrainNegativeCpuMode) { + caffe::MultiPhaseSolverParameter multi_solver_param; + caffe::SolverBatchSizePair* solver_param_pair = + multi_solver_param.add_params_pair(); + + solver_param_pair->mutable_solver_params()->set_solver_mode( + caffe::SolverParameter_SolverMode_GPU); + + solver_param_pair->mutable_solver_params()->set_net( + this->topology_file_name_); + EXPECT_EQ(multiphase_train(&multi_solver_param, "", "", 0, ""), -1); +} + +} // namespace caffe diff --git a/src/caffe/test/test_upgrade_proto.cpp b/src/caffe/test/test_upgrade_proto.cpp index 9dcc2aa55ec..ac482c4ca74 100644 --- a/src/caffe/test/test_upgrade_proto.cpp +++ b/src/caffe/test/test_upgrade_proto.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/test/test_util_blas.cpp b/src/caffe/test/test_util_blas.cpp index 9ee8818ff1d..c572bf8ef18 100644 --- a/src/caffe/test/test_util_blas.cpp +++ b/src/caffe/test/test_util_blas.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifndef CPU_ONLY // CPU-GPU test #include "gtest/gtest.h" diff --git a/src/caffe/util/bbox_util.cpp b/src/caffe/util/bbox_util.cpp new file mode 100644 index 00000000000..715f8468501 --- /dev/null +++ b/src/caffe/util/bbox_util.cpp @@ -0,0 +1,2289 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "boost/iterator/counting_iterator.hpp" + +#include "caffe/util/bbox_util.hpp" + +namespace caffe { + +bool SortBBoxAscend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) { + return bbox1.score() < bbox2.score(); +} + +bool SortBBoxDescend(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) { + return bbox1.score() > bbox2.score(); +} + +template +bool SortScorePairAscend(const pair& pair1, + const pair& pair2) { + return pair1.first < pair2.first; +} + +// Explicit initialization. +template bool SortScorePairAscend(const pair& pair1, + const pair& pair2); +template bool SortScorePairAscend(const pair >& pair1, + const pair >& pair2); + +template +bool SortScorePairDescend(const pair& pair1, + const pair& pair2) { + return pair1.first > pair2.first; +} + +// Explicit initialization. +template bool SortScorePairDescend(const pair& pair1, + const pair& pair2); +template bool SortScorePairDescend(const pair >& pair1, + const pair >& pair2); + +NormalizedBBox UnitBBox() { + NormalizedBBox unit_bbox; + unit_bbox.set_xmin(0.); + unit_bbox.set_ymin(0.); + unit_bbox.set_xmax(1.); + unit_bbox.set_ymax(1.); + return unit_bbox; +} + +bool IsCrossBoundaryBBox(const NormalizedBBox& bbox) { + return bbox.xmin() < 0 || bbox.xmin() > 1 || + bbox.ymin() < 0 || bbox.ymin() > 1 || + bbox.xmax() < 0 || bbox.xmax() > 1 || + bbox.ymax() < 0 || bbox.ymax() > 1; +} + +void IntersectBBox(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2, + NormalizedBBox* intersect_bbox) { + if (bbox2.xmin() > bbox1.xmax() || bbox2.xmax() < bbox1.xmin() || + bbox2.ymin() > bbox1.ymax() || bbox2.ymax() < bbox1.ymin()) { + // Return [0, 0, 0, 0] if there is no intersection. + intersect_bbox->set_xmin(0); + intersect_bbox->set_ymin(0); + intersect_bbox->set_xmax(0); + intersect_bbox->set_ymax(0); + } else { + intersect_bbox->set_xmin(std::max(bbox1.xmin(), bbox2.xmin())); + intersect_bbox->set_ymin(std::max(bbox1.ymin(), bbox2.ymin())); + intersect_bbox->set_xmax(std::min(bbox1.xmax(), bbox2.xmax())); + intersect_bbox->set_ymax(std::min(bbox1.ymax(), bbox2.ymax())); + } +} + +float BBoxSize(const NormalizedBBox& bbox, const bool normalized) { + if (bbox.xmax() < bbox.xmin() || bbox.ymax() < bbox.ymin()) { + // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0. + return 0; + } else { + if (bbox.has_size()) { + return bbox.size(); + } else { + float width = bbox.xmax() - bbox.xmin(); + float height = bbox.ymax() - bbox.ymin(); + if (normalized) { + return width * height; + } else { + // If bbox is not within range [0, 1]. + return (width + 1) * (height + 1); + } + } + } +} + +template +Dtype BBoxSize(const Dtype* bbox, const bool normalized) { + if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) { + // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0. + return Dtype(0.); + } else { + const Dtype width = bbox[2] - bbox[0]; + const Dtype height = bbox[3] - bbox[1]; + if (normalized) { + return width * height; + } else { + // If bbox is not within range [0, 1]. + return (width + 1) * (height + 1); + } + } +} + +template float BBoxSize(const float* bbox, const bool normalized); +template double BBoxSize(const double* bbox, const bool normalized); + +void ClipBBox(const NormalizedBBox& bbox, NormalizedBBox* clip_bbox) { + clip_bbox->set_xmin(std::max(std::min(bbox.xmin(), 1.f), 0.f)); + clip_bbox->set_ymin(std::max(std::min(bbox.ymin(), 1.f), 0.f)); + clip_bbox->set_xmax(std::max(std::min(bbox.xmax(), 1.f), 0.f)); + clip_bbox->set_ymax(std::max(std::min(bbox.ymax(), 1.f), 0.f)); + clip_bbox->clear_size(); + clip_bbox->set_size(BBoxSize(*clip_bbox)); + clip_bbox->set_difficult(bbox.difficult()); +} + +void ClipBBox(const NormalizedBBox& bbox, const float height, const float width, + NormalizedBBox* clip_bbox) { + clip_bbox->set_xmin(std::max(std::min(bbox.xmin(), width), 0.f)); + clip_bbox->set_ymin(std::max(std::min(bbox.ymin(), height), 0.f)); + clip_bbox->set_xmax(std::max(std::min(bbox.xmax(), width), 0.f)); + clip_bbox->set_ymax(std::max(std::min(bbox.ymax(), height), 0.f)); + clip_bbox->clear_size(); + clip_bbox->set_size(BBoxSize(*clip_bbox)); + clip_bbox->set_difficult(bbox.difficult()); +} + +void ScaleBBox(const NormalizedBBox& bbox, const int height, const int width, + NormalizedBBox* scale_bbox) { + scale_bbox->set_xmin(bbox.xmin() * width); + scale_bbox->set_ymin(bbox.ymin() * height); + scale_bbox->set_xmax(bbox.xmax() * width); + scale_bbox->set_ymax(bbox.ymax() * height); + scale_bbox->clear_size(); + bool normalized = !(width > 1 || height > 1); + scale_bbox->set_size(BBoxSize(*scale_bbox, normalized)); + scale_bbox->set_difficult(bbox.difficult()); +} + +void OutputBBox(const NormalizedBBox& bbox, const pair& img_size, + const bool has_resize, const ResizeParameter& resize_param, + NormalizedBBox* out_bbox) { + const int height = img_size.first; + const int width = img_size.second; + NormalizedBBox temp_bbox = bbox; + if (has_resize && resize_param.resize_mode()) { + float resize_height = resize_param.height(); + CHECK_GT(resize_height, 0); + float resize_width = resize_param.width(); + CHECK_GT(resize_width, 0); + float resize_aspect = resize_width / resize_height; + int height_scale = resize_param.height_scale(); + int width_scale = resize_param.width_scale(); + float aspect = static_cast(width) / height; + + float padding; + NormalizedBBox source_bbox; + switch (resize_param.resize_mode()) { + case ResizeParameter_Resize_mode_WARP: + ClipBBox(temp_bbox, &temp_bbox); + ScaleBBox(temp_bbox, height, width, out_bbox); + break; + case ResizeParameter_Resize_mode_FIT_LARGE_SIZE_AND_PAD: + if (aspect > resize_aspect) { + padding = (resize_height - resize_width / aspect) / 2; + source_bbox.set_xmin(0.); + source_bbox.set_ymin(padding / resize_height); + source_bbox.set_xmax(1.); + source_bbox.set_ymax(1. - padding / resize_height); + } else { + padding = (resize_width - resize_height * aspect) / 2; + source_bbox.set_xmin(padding / resize_width); + source_bbox.set_ymin(0.); + source_bbox.set_xmax(1. - padding / resize_width); + source_bbox.set_ymax(1.); + } + ProjectBBox(source_bbox, bbox, &temp_bbox); + ClipBBox(temp_bbox, &temp_bbox); + ScaleBBox(temp_bbox, height, width, out_bbox); + break; + case ResizeParameter_Resize_mode_FIT_SMALL_SIZE: + if (height_scale == 0 || width_scale == 0) { + ClipBBox(temp_bbox, &temp_bbox); + ScaleBBox(temp_bbox, height, width, out_bbox); + } else { + ScaleBBox(temp_bbox, height_scale, width_scale, out_bbox); + ClipBBox(*out_bbox, height, width, out_bbox); + } + break; + default: + LOG(FATAL) << "Unknown resize mode."; + } + } else { + // Clip the normalized bbox first. + ClipBBox(temp_bbox, &temp_bbox); + // Scale the bbox according to the original image size. + ScaleBBox(temp_bbox, height, width, out_bbox); + } +} + +void LocateBBox(const NormalizedBBox& src_bbox, const NormalizedBBox& bbox, + NormalizedBBox* loc_bbox) { + float src_width = src_bbox.xmax() - src_bbox.xmin(); + float src_height = src_bbox.ymax() - src_bbox.ymin(); + loc_bbox->set_xmin(src_bbox.xmin() + bbox.xmin() * src_width); + loc_bbox->set_ymin(src_bbox.ymin() + bbox.ymin() * src_height); + loc_bbox->set_xmax(src_bbox.xmin() + bbox.xmax() * src_width); + loc_bbox->set_ymax(src_bbox.ymin() + bbox.ymax() * src_height); + loc_bbox->set_difficult(bbox.difficult()); +} + +bool ProjectBBox(const NormalizedBBox& src_bbox, const NormalizedBBox& bbox, + NormalizedBBox* proj_bbox) { + if (bbox.xmin() >= src_bbox.xmax() || bbox.xmax() <= src_bbox.xmin() || + bbox.ymin() >= src_bbox.ymax() || bbox.ymax() <= src_bbox.ymin()) { + return false; + } + float src_width = src_bbox.xmax() - src_bbox.xmin(); + float src_height = src_bbox.ymax() - src_bbox.ymin(); + proj_bbox->set_xmin((bbox.xmin() - src_bbox.xmin()) / src_width); + proj_bbox->set_ymin((bbox.ymin() - src_bbox.ymin()) / src_height); + proj_bbox->set_xmax((bbox.xmax() - src_bbox.xmin()) / src_width); + proj_bbox->set_ymax((bbox.ymax() - src_bbox.ymin()) / src_height); + proj_bbox->set_difficult(bbox.difficult()); + ClipBBox(*proj_bbox, proj_bbox); + if (BBoxSize(*proj_bbox) > 0) { + return true; + } else { + return false; + } +} + +void ExtrapolateBBox(const ResizeParameter& param, const int height, + const int width, const NormalizedBBox& crop_bbox, NormalizedBBox* bbox) { + float height_scale = param.height_scale(); + float width_scale = param.width_scale(); + if (height_scale > 0 && width_scale > 0 && + param.resize_mode() == ResizeParameter_Resize_mode_FIT_SMALL_SIZE) { + float orig_aspect = static_cast(width) / height; + float resize_height = param.height(); + float resize_width = param.width(); + float resize_aspect = resize_width / resize_height; + if (orig_aspect < resize_aspect) { + resize_height = resize_width / orig_aspect; + } else { + resize_width = resize_height * orig_aspect; + } + float crop_height = resize_height * (crop_bbox.ymax() - crop_bbox.ymin()); + float crop_width = resize_width * (crop_bbox.xmax() - crop_bbox.xmin()); + CHECK_GE(crop_width, width_scale); + CHECK_GE(crop_height, height_scale); + bbox->set_xmin(bbox->xmin() * crop_width / width_scale); + bbox->set_xmax(bbox->xmax() * crop_width / width_scale); + bbox->set_ymin(bbox->ymin() * crop_height / height_scale); + bbox->set_ymax(bbox->ymax() * crop_height / height_scale); + } +} + +float JaccardOverlap(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2, + const bool normalized) { + NormalizedBBox intersect_bbox; + IntersectBBox(bbox1, bbox2, &intersect_bbox); + float intersect_width, intersect_height; + if (normalized) { + intersect_width = intersect_bbox.xmax() - intersect_bbox.xmin(); + intersect_height = intersect_bbox.ymax() - intersect_bbox.ymin(); + } else { + intersect_width = intersect_bbox.xmax() - intersect_bbox.xmin() + 1; + intersect_height = intersect_bbox.ymax() - intersect_bbox.ymin() + 1; + } + if (intersect_width > 0 && intersect_height > 0) { + float intersect_size = intersect_width * intersect_height; + float bbox1_size = BBoxSize(bbox1); + float bbox2_size = BBoxSize(bbox2); + return intersect_size / (bbox1_size + bbox2_size - intersect_size); + } else { + return 0.; + } +} + +template +Dtype JaccardOverlap(const Dtype* bbox1, const Dtype* bbox2) { + if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] || + bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) { + return Dtype(0.); + } else { + const Dtype inter_xmin = std::max(bbox1[0], bbox2[0]); + const Dtype inter_ymin = std::max(bbox1[1], bbox2[1]); + const Dtype inter_xmax = std::min(bbox1[2], bbox2[2]); + const Dtype inter_ymax = std::min(bbox1[3], bbox2[3]); + + const Dtype inter_width = inter_xmax - inter_xmin; + const Dtype inter_height = inter_ymax - inter_ymin; + const Dtype inter_size = inter_width * inter_height; + + const Dtype bbox1_size = BBoxSize(bbox1); + const Dtype bbox2_size = BBoxSize(bbox2); + + return inter_size / (bbox1_size + bbox2_size - inter_size); + } +} + +template float JaccardOverlap(const float* bbox1, const float* bbox2); +template double JaccardOverlap(const double* bbox1, const double* bbox2); + +float BBoxCoverage(const NormalizedBBox& bbox1, const NormalizedBBox& bbox2) { + NormalizedBBox intersect_bbox; + IntersectBBox(bbox1, bbox2, &intersect_bbox); + float intersect_size = BBoxSize(intersect_bbox); + if (intersect_size > 0) { + float bbox1_size = BBoxSize(bbox1); + CHECK_NE(bbox1_size, 0); + return intersect_size / bbox1_size; + } else { + return 0.; + } +} + +bool MeetEmitConstraint(const NormalizedBBox& src_bbox, + const NormalizedBBox& bbox, + const EmitConstraint& emit_constraint) { + EmitType emit_type = emit_constraint.emit_type(); + if (emit_type == EmitConstraint_EmitType_CENTER) { + float x_center = (bbox.xmin() + bbox.xmax()) / 2; + float y_center = (bbox.ymin() + bbox.ymax()) / 2; + if (x_center >= src_bbox.xmin() && x_center <= src_bbox.xmax() && + y_center >= src_bbox.ymin() && y_center <= src_bbox.ymax()) { + return true; + } else { + return false; + } + } else if (emit_type == EmitConstraint_EmitType_MIN_OVERLAP) { + float bbox_coverage = BBoxCoverage(bbox, src_bbox); + return bbox_coverage > emit_constraint.emit_overlap(); + } else { + LOG(FATAL) << "Unknown emit type."; + return false; + } +} + +void EncodeBBox( + const NormalizedBBox& prior_bbox, const vector& prior_variance, + const CodeType code_type, const bool encode_variance_in_target, + const NormalizedBBox& bbox, NormalizedBBox* encode_bbox) { + if (code_type == PriorBoxParameter_CodeType_CORNER) { + if (encode_variance_in_target) { + encode_bbox->set_xmin(bbox.xmin() - prior_bbox.xmin()); + encode_bbox->set_ymin(bbox.ymin() - prior_bbox.ymin()); + encode_bbox->set_xmax(bbox.xmax() - prior_bbox.xmax()); + encode_bbox->set_ymax(bbox.ymax() - prior_bbox.ymax()); + } else { + // Encode variance in bbox. + CHECK_EQ(prior_variance.size(), 4); + for (int i = 0; i < prior_variance.size(); ++i) { + CHECK_GT(prior_variance[i], 0); + } + encode_bbox->set_xmin( + (bbox.xmin() - prior_bbox.xmin()) / prior_variance[0]); + encode_bbox->set_ymin( + (bbox.ymin() - prior_bbox.ymin()) / prior_variance[1]); + encode_bbox->set_xmax( + (bbox.xmax() - prior_bbox.xmax()) / prior_variance[2]); + encode_bbox->set_ymax( + (bbox.ymax() - prior_bbox.ymax()) / prior_variance[3]); + } + } else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) { + float prior_width = prior_bbox.xmax() - prior_bbox.xmin(); + CHECK_GT(prior_width, 0); + float prior_height = prior_bbox.ymax() - prior_bbox.ymin(); + CHECK_GT(prior_height, 0); + float prior_center_x = (prior_bbox.xmin() + prior_bbox.xmax()) / 2.; + float prior_center_y = (prior_bbox.ymin() + prior_bbox.ymax()) / 2.; + + float bbox_width = bbox.xmax() - bbox.xmin(); + CHECK_GT(bbox_width, 0); + float bbox_height = bbox.ymax() - bbox.ymin(); + CHECK_GT(bbox_height, 0); + float bbox_center_x = (bbox.xmin() + bbox.xmax()) / 2.; + float bbox_center_y = (bbox.ymin() + bbox.ymax()) / 2.; + + if (encode_variance_in_target) { + encode_bbox->set_xmin((bbox_center_x - prior_center_x) / prior_width); + encode_bbox->set_ymin((bbox_center_y - prior_center_y) / prior_height); + encode_bbox->set_xmax(log(bbox_width / prior_width)); + encode_bbox->set_ymax(log(bbox_height / prior_height)); + } else { + // Encode variance in bbox. + encode_bbox->set_xmin( + (bbox_center_x - prior_center_x) / prior_width / prior_variance[0]); + encode_bbox->set_ymin( + (bbox_center_y - prior_center_y) / prior_height / prior_variance[1]); + encode_bbox->set_xmax( + log(bbox_width / prior_width) / prior_variance[2]); + encode_bbox->set_ymax( + log(bbox_height / prior_height) / prior_variance[3]); + } + } else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) { + float prior_width = prior_bbox.xmax() - prior_bbox.xmin(); + CHECK_GT(prior_width, 0); + float prior_height = prior_bbox.ymax() - prior_bbox.ymin(); + CHECK_GT(prior_height, 0); + if (encode_variance_in_target) { + encode_bbox->set_xmin((bbox.xmin() - prior_bbox.xmin()) / prior_width); + encode_bbox->set_ymin((bbox.ymin() - prior_bbox.ymin()) / prior_height); + encode_bbox->set_xmax((bbox.xmax() - prior_bbox.xmax()) / prior_width); + encode_bbox->set_ymax((bbox.ymax() - prior_bbox.ymax()) / prior_height); + } else { + // Encode variance in bbox. + CHECK_EQ(prior_variance.size(), 4); + for (int i = 0; i < prior_variance.size(); ++i) { + CHECK_GT(prior_variance[i], 0); + } + encode_bbox->set_xmin( + (bbox.xmin() - prior_bbox.xmin()) / prior_width / prior_variance[0]); + encode_bbox->set_ymin( + (bbox.ymin() - prior_bbox.ymin()) / prior_height / prior_variance[1]); + encode_bbox->set_xmax( + (bbox.xmax() - prior_bbox.xmax()) / prior_width / prior_variance[2]); + encode_bbox->set_ymax( + (bbox.ymax() - prior_bbox.ymax()) / prior_height / prior_variance[3]); + } + } else { + LOG(FATAL) << "Unknown LocLossType."; + } +} + +void DecodeBBox( + const NormalizedBBox& prior_bbox, const vector& prior_variance, + const CodeType code_type, const bool variance_encoded_in_target, + const bool clip_bbox, const NormalizedBBox& bbox, + NormalizedBBox* decode_bbox) { + if (code_type == PriorBoxParameter_CodeType_CORNER) { + if (variance_encoded_in_target) { + // variance is encoded in target, we simply need to add the offset + // predictions. + decode_bbox->set_xmin(prior_bbox.xmin() + bbox.xmin()); + decode_bbox->set_ymin(prior_bbox.ymin() + bbox.ymin()); + decode_bbox->set_xmax(prior_bbox.xmax() + bbox.xmax()); + decode_bbox->set_ymax(prior_bbox.ymax() + bbox.ymax()); + } else { + // variance is encoded in bbox, we need to scale the offset accordingly. + decode_bbox->set_xmin( + prior_bbox.xmin() + prior_variance[0] * bbox.xmin()); + decode_bbox->set_ymin( + prior_bbox.ymin() + prior_variance[1] * bbox.ymin()); + decode_bbox->set_xmax( + prior_bbox.xmax() + prior_variance[2] * bbox.xmax()); + decode_bbox->set_ymax( + prior_bbox.ymax() + prior_variance[3] * bbox.ymax()); + } + } else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) { + float prior_width = prior_bbox.xmax() - prior_bbox.xmin(); + CHECK_GT(prior_width, 0); + float prior_height = prior_bbox.ymax() - prior_bbox.ymin(); + CHECK_GT(prior_height, 0); + float prior_center_x = (prior_bbox.xmin() + prior_bbox.xmax()) / 2.; + float prior_center_y = (prior_bbox.ymin() + prior_bbox.ymax()) / 2.; + + float decode_bbox_center_x, decode_bbox_center_y; + float decode_bbox_width, decode_bbox_height; + if (variance_encoded_in_target) { + // variance is encoded in target, we simply need to retore the offset + // predictions. + decode_bbox_center_x = bbox.xmin() * prior_width + prior_center_x; + decode_bbox_center_y = bbox.ymin() * prior_height + prior_center_y; + decode_bbox_width = exp(bbox.xmax()) * prior_width; + decode_bbox_height = exp(bbox.ymax()) * prior_height; + } else { + // variance is encoded in bbox, we need to scale the offset accordingly. + decode_bbox_center_x = + prior_variance[0] * bbox.xmin() * prior_width + prior_center_x; + decode_bbox_center_y = + prior_variance[1] * bbox.ymin() * prior_height + prior_center_y; + decode_bbox_width = + exp(prior_variance[2] * bbox.xmax()) * prior_width; + decode_bbox_height = + exp(prior_variance[3] * bbox.ymax()) * prior_height; + } + + decode_bbox->set_xmin(decode_bbox_center_x - decode_bbox_width / 2.); + decode_bbox->set_ymin(decode_bbox_center_y - decode_bbox_height / 2.); + decode_bbox->set_xmax(decode_bbox_center_x + decode_bbox_width / 2.); + decode_bbox->set_ymax(decode_bbox_center_y + decode_bbox_height / 2.); + } else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) { + float prior_width = prior_bbox.xmax() - prior_bbox.xmin(); + CHECK_GT(prior_width, 0); + float prior_height = prior_bbox.ymax() - prior_bbox.ymin(); + CHECK_GT(prior_height, 0); + if (variance_encoded_in_target) { + // variance is encoded in target, we simply need to add the offset + // predictions. + decode_bbox->set_xmin(prior_bbox.xmin() + bbox.xmin() * prior_width); + decode_bbox->set_ymin(prior_bbox.ymin() + bbox.ymin() * prior_height); + decode_bbox->set_xmax(prior_bbox.xmax() + bbox.xmax() * prior_width); + decode_bbox->set_ymax(prior_bbox.ymax() + bbox.ymax() * prior_height); + } else { + // variance is encoded in bbox, we need to scale the offset accordingly. + decode_bbox->set_xmin( + prior_bbox.xmin() + prior_variance[0] * bbox.xmin() * prior_width); + decode_bbox->set_ymin( + prior_bbox.ymin() + prior_variance[1] * bbox.ymin() * prior_height); + decode_bbox->set_xmax( + prior_bbox.xmax() + prior_variance[2] * bbox.xmax() * prior_width); + decode_bbox->set_ymax( + prior_bbox.ymax() + prior_variance[3] * bbox.ymax() * prior_height); + } + } else { + LOG(FATAL) << "Unknown LocLossType."; + } + float bbox_size = BBoxSize(*decode_bbox); + decode_bbox->set_size(bbox_size); + if (clip_bbox) { + ClipBBox(*decode_bbox, decode_bbox); + } +} + +void DecodeBBoxes( + const vector& prior_bboxes, + const vector >& prior_variances, + const CodeType code_type, const bool variance_encoded_in_target, + const bool clip_bbox, const vector& bboxes, + vector* decode_bboxes) { + CHECK_EQ(prior_bboxes.size(), prior_variances.size()); + CHECK_EQ(prior_bboxes.size(), bboxes.size()); + int num_bboxes = prior_bboxes.size(); + if (num_bboxes >= 1) { + CHECK_EQ(prior_variances[0].size(), 4); + } + decode_bboxes->clear(); + for (int i = 0; i < num_bboxes; ++i) { + NormalizedBBox decode_bbox; + DecodeBBox(prior_bboxes[i], prior_variances[i], code_type, + variance_encoded_in_target, clip_bbox, bboxes[i], &decode_bbox); + decode_bboxes->push_back(decode_bbox); + } +} + +void DecodeBBoxesAll(const vector& all_loc_preds, + const vector& prior_bboxes, + const vector >& prior_variances, + const int num, const bool share_location, + const int num_loc_classes, const int background_label_id, + const CodeType code_type, const bool variance_encoded_in_target, + const bool clip, vector* all_decode_bboxes) { + CHECK_EQ(all_loc_preds.size(), num); + all_decode_bboxes->clear(); + all_decode_bboxes->resize(num); + for (int i = 0; i < num; ++i) { + // Decode predictions into bboxes. + LabelBBox& decode_bboxes = (*all_decode_bboxes)[i]; + for (int c = 0; c < num_loc_classes; ++c) { + int label = share_location ? -1 : c; + if (label == background_label_id) { + // Ignore background class. + continue; + } + if (all_loc_preds[i].find(label) == all_loc_preds[i].end()) { + // Something bad happened if there are no predictions for current label. + LOG(FATAL) << "Could not find location predictions for label " << label; + } + const vector& label_loc_preds = + all_loc_preds[i].find(label)->second; + DecodeBBoxes(prior_bboxes, prior_variances, + code_type, variance_encoded_in_target, clip, + label_loc_preds, &(decode_bboxes[label])); + } + } +} + +void MatchBBox(const vector& gt_bboxes, + const vector& pred_bboxes, const int label, + const MatchType match_type, const float overlap_threshold, + const bool ignore_cross_boundary_bbox, + vector* match_indices, vector* match_overlaps) { + int num_pred = pred_bboxes.size(); + match_indices->clear(); + match_indices->resize(num_pred, -1); + match_overlaps->clear(); + match_overlaps->resize(num_pred, 0.); + + int num_gt = 0; + vector gt_indices; + if (label == -1) { + // label -1 means comparing against all ground truth. + num_gt = gt_bboxes.size(); + for (int i = 0; i < num_gt; ++i) { + gt_indices.push_back(i); + } + } else { + // Count number of ground truth boxes which has the desired label. + for (int i = 0; i < gt_bboxes.size(); ++i) { + if (gt_bboxes[i].label() == label) { + num_gt++; + gt_indices.push_back(i); + } + } + } + if (num_gt == 0) { + return; + } + + // Store the positive overlap between predictions and ground truth. + map > overlaps; + for (int i = 0; i < num_pred; ++i) { + if (ignore_cross_boundary_bbox && IsCrossBoundaryBBox(pred_bboxes[i])) { + (*match_indices)[i] = -2; + continue; + } + for (int j = 0; j < num_gt; ++j) { + float overlap = JaccardOverlap(pred_bboxes[i], gt_bboxes[gt_indices[j]]); + if (overlap > 1e-6) { + (*match_overlaps)[i] = std::max((*match_overlaps)[i], overlap); + overlaps[i][j] = overlap; + } + } + } + + // Bipartite matching. + vector gt_pool; + for (int i = 0; i < num_gt; ++i) { + gt_pool.push_back(i); + } + while (gt_pool.size() > 0) { + // Find the most overlapped gt and cooresponding predictions. + int max_idx = -1; + int max_gt_idx = -1; + float max_overlap = -1; + for (map >::iterator it = overlaps.begin(); + it != overlaps.end(); ++it) { + int i = it->first; + if ((*match_indices)[i] != -1) { + // The prediction already has matched ground truth or is ignored. + continue; + } + for (int p = 0; p < gt_pool.size(); ++p) { + int j = gt_pool[p]; + if (it->second.find(j) == it->second.end()) { + // No overlap between the i-th prediction and j-th ground truth. + continue; + } + // Find the maximum overlapped pair. + if (it->second[j] > max_overlap) { + // If the prediction has not been matched to any ground truth, + // and the overlap is larger than maximum overlap, update. + max_idx = i; + max_gt_idx = j; + max_overlap = it->second[j]; + } + } + } + if (max_idx == -1) { + // Cannot find good match. + break; + } else { + CHECK_EQ((*match_indices)[max_idx], -1); + (*match_indices)[max_idx] = gt_indices[max_gt_idx]; + (*match_overlaps)[max_idx] = max_overlap; + // Erase the ground truth. + gt_pool.erase(std::find(gt_pool.begin(), gt_pool.end(), max_gt_idx)); + } + } + + switch (match_type) { + case MultiBoxLossParameter_MatchType_BIPARTITE: + // Already done. + break; + case MultiBoxLossParameter_MatchType_PER_PREDICTION: + // Get most overlaped for the rest prediction bboxes. + for (map >::iterator it = overlaps.begin(); + it != overlaps.end(); ++it) { + int i = it->first; + if ((*match_indices)[i] != -1) { + // The prediction already has matched ground truth or is ignored. + continue; + } + int max_gt_idx = -1; + float max_overlap = -1; + for (int j = 0; j < num_gt; ++j) { + if (it->second.find(j) == it->second.end()) { + // No overlap between the i-th prediction and j-th ground truth. + continue; + } + // Find the maximum overlapped pair. + float overlap = it->second[j]; + if (overlap >= overlap_threshold && overlap > max_overlap) { + // If the prediction has not been matched to any ground truth, + // and the overlap is larger than maximum overlap, update. + max_gt_idx = j; + max_overlap = overlap; + } + } + if (max_gt_idx != -1) { + // Found a matched ground truth. + CHECK_EQ((*match_indices)[i], -1); + (*match_indices)[i] = gt_indices[max_gt_idx]; + (*match_overlaps)[i] = max_overlap; + } + } + break; + default: + LOG(FATAL) << "Unknown matching type."; + break; + } + + return; +} + +void FindMatches(const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector& prior_bboxes, + const vector >& prior_variances, + const MultiBoxLossParameter& multibox_loss_param, + vector > >* all_match_overlaps, + vector > >* all_match_indices) { + // all_match_overlaps->clear(); + // all_match_indices->clear(); + // Get parameters. + CHECK(multibox_loss_param.has_num_classes()) << "Must provide num_classes."; + const int num_classes = multibox_loss_param.num_classes(); + CHECK_GE(num_classes, 1) << "num_classes should not be less than 1."; + const bool share_location = multibox_loss_param.share_location(); + const int loc_classes = share_location ? 1 : num_classes; + const MatchType match_type = multibox_loss_param.match_type(); + const float overlap_threshold = multibox_loss_param.overlap_threshold(); + const bool use_prior_for_matching = + multibox_loss_param.use_prior_for_matching(); + const int background_label_id = multibox_loss_param.background_label_id(); + const CodeType code_type = multibox_loss_param.code_type(); + const bool encode_variance_in_target = + multibox_loss_param.encode_variance_in_target(); + const bool ignore_cross_boundary_bbox = + multibox_loss_param.ignore_cross_boundary_bbox(); + // Find the matches. + int num = all_loc_preds.size(); + for (int i = 0; i < num; ++i) { + map > match_indices; + map > match_overlaps; + // Check if there is ground truth for current image. + if (all_gt_bboxes.find(i) == all_gt_bboxes.end()) { + // There is no gt for current image. All predictions are negative. + all_match_indices->push_back(match_indices); + all_match_overlaps->push_back(match_overlaps); + continue; + } + // Find match between predictions and ground truth. + const vector& gt_bboxes = all_gt_bboxes.find(i)->second; + if (!use_prior_for_matching) { + for (int c = 0; c < loc_classes; ++c) { + int label = share_location ? -1 : c; + if (!share_location && label == background_label_id) { + // Ignore background loc predictions. + continue; + } + // Decode the prediction into bbox first. + vector loc_bboxes; + bool clip_bbox = false; + DecodeBBoxes(prior_bboxes, prior_variances, + code_type, encode_variance_in_target, clip_bbox, + all_loc_preds[i].find(label)->second, &loc_bboxes); + MatchBBox(gt_bboxes, loc_bboxes, label, match_type, + overlap_threshold, ignore_cross_boundary_bbox, + &match_indices[label], &match_overlaps[label]); + } + } else { + // Use prior bboxes to match against all ground truth. + vector temp_match_indices; + vector temp_match_overlaps; + const int label = -1; + MatchBBox(gt_bboxes, prior_bboxes, label, match_type, overlap_threshold, + ignore_cross_boundary_bbox, &temp_match_indices, + &temp_match_overlaps); + if (share_location) { + match_indices[label] = temp_match_indices; + match_overlaps[label] = temp_match_overlaps; + } else { + // Get ground truth label for each ground truth bbox. + vector gt_labels; + for (int g = 0; g < gt_bboxes.size(); ++g) { + gt_labels.push_back(gt_bboxes[g].label()); + } + // Distribute the matching results to different loc_class. + for (int c = 0; c < loc_classes; ++c) { + if (c == background_label_id) { + // Ignore background loc predictions. + continue; + } + match_indices[c].resize(temp_match_indices.size(), -1); + match_overlaps[c] = temp_match_overlaps; + for (int m = 0; m < temp_match_indices.size(); ++m) { + if (temp_match_indices[m] > -1) { + const int gt_idx = temp_match_indices[m]; + CHECK_LT(gt_idx, gt_labels.size()); + if (c == gt_labels[gt_idx]) { + match_indices[c][m] = gt_idx; + } + } + } + } + } + } + all_match_indices->push_back(match_indices); + all_match_overlaps->push_back(match_overlaps); + } +} + +int CountNumMatches(const vector > >& all_match_indices, + const int num) { + int num_matches = 0; + for (int i = 0; i < num; ++i) { + const map >& match_indices = all_match_indices[i]; + for (map >::const_iterator it = match_indices.begin(); + it != match_indices.end(); ++it) { + const vector& match_index = it->second; + for (int m = 0; m < match_index.size(); ++m) { + if (match_index[m] > -1) { + ++num_matches; + } + } + } + } + return num_matches; +} + +inline bool IsEligibleMining(const MiningType mining_type, const int match_idx, + const float match_overlap, const float neg_overlap) { + if (mining_type == MultiBoxLossParameter_MiningType_MAX_NEGATIVE) { + return match_idx == -1 && match_overlap < neg_overlap; + } else if (mining_type == MultiBoxLossParameter_MiningType_HARD_EXAMPLE) { + return true; + } else { + return false; + } +} + +template +void MineHardExamples(const Blob& conf_blob, + const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector& prior_bboxes, + const vector >& prior_variances, + const vector > >& all_match_overlaps, + const MultiBoxLossParameter& multibox_loss_param, + int* num_matches, int* num_negs, + vector > >* all_match_indices, + vector >* all_neg_indices) { + int num = all_loc_preds.size(); + // CHECK_EQ(num, all_match_overlaps.size()); + // CHECK_EQ(num, all_match_indices->size()); + // all_neg_indices->clear(); + *num_matches = CountNumMatches(*all_match_indices, num); + *num_negs = 0; + int num_priors = prior_bboxes.size(); + CHECK_EQ(num_priors, prior_variances.size()); + // Get parameters. + CHECK(multibox_loss_param.has_num_classes()) << "Must provide num_classes."; + const int num_classes = multibox_loss_param.num_classes(); + CHECK_GE(num_classes, 1) << "num_classes should not be less than 1."; + const int background_label_id = multibox_loss_param.background_label_id(); + const bool use_prior_for_nms = multibox_loss_param.use_prior_for_nms(); + const ConfLossType conf_loss_type = multibox_loss_param.conf_loss_type(); + const MiningType mining_type = multibox_loss_param.mining_type(); + if (mining_type == MultiBoxLossParameter_MiningType_NONE) { + return; + } + const LocLossType loc_loss_type = multibox_loss_param.loc_loss_type(); + const float neg_pos_ratio = multibox_loss_param.neg_pos_ratio(); + const float neg_overlap = multibox_loss_param.neg_overlap(); + const CodeType code_type = multibox_loss_param.code_type(); + const bool encode_variance_in_target = + multibox_loss_param.encode_variance_in_target(); + const bool has_nms_param = multibox_loss_param.has_nms_param(); + float nms_threshold = 0; + int top_k = -1; + if (has_nms_param) { + nms_threshold = multibox_loss_param.nms_param().nms_threshold(); + top_k = multibox_loss_param.nms_param().top_k(); + } + const int sample_size = multibox_loss_param.sample_size(); + // Compute confidence losses based on matching results. + vector > all_conf_loss; +#ifdef CPU_ONLY + ComputeConfLoss(conf_blob.cpu_data(), num, num_priors, num_classes, + background_label_id, conf_loss_type, *all_match_indices, all_gt_bboxes, + &all_conf_loss); +#else + ComputeConfLossGPU(conf_blob, num, num_priors, num_classes, + background_label_id, conf_loss_type, *all_match_indices, all_gt_bboxes, + &all_conf_loss); +#endif + vector > all_loc_loss; + if (mining_type == MultiBoxLossParameter_MiningType_HARD_EXAMPLE) { + // Compute localization losses based on matching results. + Blob loc_pred, loc_gt; + if (*num_matches != 0) { + vector loc_shape(2, 1); + loc_shape[1] = *num_matches * 4; + loc_pred.Reshape(loc_shape); + loc_gt.Reshape(loc_shape); + Dtype* loc_pred_data = loc_pred.mutable_cpu_data(); + Dtype* loc_gt_data = loc_gt.mutable_cpu_data(); + EncodeLocPrediction(all_loc_preds, all_gt_bboxes, *all_match_indices, + prior_bboxes, prior_variances, multibox_loss_param, + loc_pred_data, loc_gt_data); + } + ComputeLocLoss(loc_pred, loc_gt, *all_match_indices, num, + num_priors, loc_loss_type, &all_loc_loss); + } else { + // No localization loss. + for (int i = 0; i < num; ++i) { + vector loc_loss(num_priors, 0.f); + all_loc_loss.push_back(loc_loss); + } + } + for (int i = 0; i < num; ++i) { + map >& match_indices = (*all_match_indices)[i]; + const map >& match_overlaps = all_match_overlaps[i]; + // loc + conf loss. + const vector& conf_loss = all_conf_loss[i]; + const vector& loc_loss = all_loc_loss[i]; + vector loss; + std::transform(conf_loss.begin(), conf_loss.end(), loc_loss.begin(), + std::back_inserter(loss), std::plus()); + // Pick negatives or hard examples based on loss. + set sel_indices; + vector neg_indices; + for (map >::iterator it = match_indices.begin(); + it != match_indices.end(); ++it) { + const int label = it->first; + int num_sel = 0; + // Get potential indices and loss pairs. + vector > loss_indices; + for (int m = 0; m < match_indices[label].size(); ++m) { + if (IsEligibleMining(mining_type, match_indices[label][m], + match_overlaps.find(label)->second[m], neg_overlap)) { + loss_indices.push_back(std::make_pair(loss[m], m)); + ++num_sel; + } + } + if (mining_type == MultiBoxLossParameter_MiningType_MAX_NEGATIVE) { + int num_pos = 0; + for (int m = 0; m < match_indices[label].size(); ++m) { + if (match_indices[label][m] > -1) { + ++num_pos; + } + } + num_sel = std::min(static_cast(num_pos * neg_pos_ratio), num_sel); + } else if (mining_type == MultiBoxLossParameter_MiningType_HARD_EXAMPLE) { + CHECK_GT(sample_size, 0); + num_sel = std::min(sample_size, num_sel); + } + // Select samples. + if (has_nms_param && nms_threshold > 0) { + // Do nms before selecting samples. + vector sel_loss; + vector sel_bboxes; + if (use_prior_for_nms) { + for (int m = 0; m < match_indices[label].size(); ++m) { + if (IsEligibleMining(mining_type, match_indices[label][m], + match_overlaps.find(label)->second[m], neg_overlap)) { + sel_loss.push_back(loss[m]); + sel_bboxes.push_back(prior_bboxes[m]); + } + } + } else { + // Decode the prediction into bbox first. + vector loc_bboxes; + bool clip_bbox = false; + DecodeBBoxes(prior_bboxes, prior_variances, + code_type, encode_variance_in_target, clip_bbox, + all_loc_preds[i].find(label)->second, &loc_bboxes); + for (int m = 0; m < match_indices[label].size(); ++m) { + if (IsEligibleMining(mining_type, match_indices[label][m], + match_overlaps.find(label)->second[m], neg_overlap)) { + sel_loss.push_back(loss[m]); + sel_bboxes.push_back(loc_bboxes[m]); + } + } + } + // Do non-maximum suppression based on the loss. + vector nms_indices; + ApplyNMS(sel_bboxes, sel_loss, nms_threshold, top_k, &nms_indices); + if (nms_indices.size() < num_sel) { + LOG(INFO) << "not enough sample after nms: " << nms_indices.size(); + } + // Pick top example indices after nms. + num_sel = std::min(static_cast(nms_indices.size()), num_sel); + for (int n = 0; n < num_sel; ++n) { + sel_indices.insert(loss_indices[nms_indices[n]].second); + } + } else { + // Pick top example indices based on loss. + std::sort(loss_indices.begin(), loss_indices.end(), + SortScorePairDescend); + for (int n = 0; n < num_sel; ++n) { + sel_indices.insert(loss_indices[n].second); + } + } + // Update the match_indices and select neg_indices. + for (int m = 0; m < match_indices[label].size(); ++m) { + if (match_indices[label][m] > -1) { + if (mining_type == MultiBoxLossParameter_MiningType_HARD_EXAMPLE && + sel_indices.find(m) == sel_indices.end()) { + match_indices[label][m] = -1; + *num_matches -= 1; + } + } else if (match_indices[label][m] == -1) { + if (sel_indices.find(m) != sel_indices.end()) { + neg_indices.push_back(m); + *num_negs += 1; + } + } + } + } + all_neg_indices->push_back(neg_indices); + } +} + +// Explicite initialization. +template void MineHardExamples(const Blob& conf_blob, + const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector& prior_bboxes, + const vector >& prior_variances, + const vector > >& all_match_overlaps, + const MultiBoxLossParameter& multibox_loss_param, + int* num_matches, int* num_negs, + vector > >* all_match_indices, + vector >* all_neg_indices); +template void MineHardExamples(const Blob& conf_blob, + const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector& prior_bboxes, + const vector >& prior_variances, + const vector > >& all_match_overlaps, + const MultiBoxLossParameter& multibox_loss_param, + int* num_matches, int* num_negs, + vector > >* all_match_indices, + vector >* all_neg_indices); + +template +void GetGroundTruth(const Dtype* gt_data, const int num_gt, + const int background_label_id, const bool use_difficult_gt, + map >* all_gt_bboxes) { + all_gt_bboxes->clear(); + for (int i = 0; i < num_gt; ++i) { + int start_idx = i * 8; + int item_id = gt_data[start_idx]; + if (item_id == -1) { + continue; + } + int label = gt_data[start_idx + 1]; + CHECK_NE(background_label_id, label) + << "Found background label in the dataset."; + bool difficult = static_cast(gt_data[start_idx + 7]); + if (!use_difficult_gt && difficult) { + // Skip reading difficult ground truth. + continue; + } + NormalizedBBox bbox; + bbox.set_label(label); + bbox.set_xmin(gt_data[start_idx + 3]); + bbox.set_ymin(gt_data[start_idx + 4]); + bbox.set_xmax(gt_data[start_idx + 5]); + bbox.set_ymax(gt_data[start_idx + 6]); + bbox.set_difficult(difficult); + float bbox_size = BBoxSize(bbox); + bbox.set_size(bbox_size); + (*all_gt_bboxes)[item_id].push_back(bbox); + } +} + +// Explicit initialization. +template void GetGroundTruth(const float* gt_data, const int num_gt, + const int background_label_id, const bool use_difficult_gt, + map >* all_gt_bboxes); +template void GetGroundTruth(const double* gt_data, const int num_gt, + const int background_label_id, const bool use_difficult_gt, + map >* all_gt_bboxes); + +template +void GetGroundTruth(const Dtype* gt_data, const int num_gt, + const int background_label_id, const bool use_difficult_gt, + map* all_gt_bboxes) { + all_gt_bboxes->clear(); + for (int i = 0; i < num_gt; ++i) { + int start_idx = i * 8; + int item_id = gt_data[start_idx]; + if (item_id == -1) { + break; + } + NormalizedBBox bbox; + int label = gt_data[start_idx + 1]; + CHECK_NE(background_label_id, label) + << "Found background label in the dataset."; + bool difficult = static_cast(gt_data[start_idx + 7]); + if (!use_difficult_gt && difficult) { + // Skip reading difficult ground truth. + continue; + } + bbox.set_xmin(gt_data[start_idx + 3]); + bbox.set_ymin(gt_data[start_idx + 4]); + bbox.set_xmax(gt_data[start_idx + 5]); + bbox.set_ymax(gt_data[start_idx + 6]); + bbox.set_difficult(difficult); + float bbox_size = BBoxSize(bbox); + bbox.set_size(bbox_size); + (*all_gt_bboxes)[item_id][label].push_back(bbox); + } +} + +// Explicit initialization. +template void GetGroundTruth(const float* gt_data, const int num_gt, + const int background_label_id, const bool use_difficult_gt, + map* all_gt_bboxes); +template void GetGroundTruth(const double* gt_data, const int num_gt, + const int background_label_id, const bool use_difficult_gt, + map* all_gt_bboxes); + +template +void GetLocPredictions(const Dtype* loc_data, const int num, + const int num_preds_per_class, const int num_loc_classes, + const bool share_location, vector* loc_preds) { + loc_preds->clear(); + if (share_location) { + CHECK_EQ(num_loc_classes, 1); + } + loc_preds->resize(num); + for (int i = 0; i < num; ++i) { + LabelBBox& label_bbox = (*loc_preds)[i]; + for (int p = 0; p < num_preds_per_class; ++p) { + int start_idx = p * num_loc_classes * 4; + for (int c = 0; c < num_loc_classes; ++c) { + int label = share_location ? -1 : c; + if (label_bbox.find(label) == label_bbox.end()) { + label_bbox[label].resize(num_preds_per_class); + } + label_bbox[label][p].set_xmin(loc_data[start_idx + c * 4]); + label_bbox[label][p].set_ymin(loc_data[start_idx + c * 4 + 1]); + label_bbox[label][p].set_xmax(loc_data[start_idx + c * 4 + 2]); + label_bbox[label][p].set_ymax(loc_data[start_idx + c * 4 + 3]); + } + } + loc_data += num_preds_per_class * num_loc_classes * 4; + } +} + +// Explicit initialization. +template void GetLocPredictions(const float* loc_data, const int num, + const int num_preds_per_class, const int num_loc_classes, + const bool share_location, vector* loc_preds); +template void GetLocPredictions(const double* loc_data, const int num, + const int num_preds_per_class, const int num_loc_classes, + const bool share_location, vector* loc_preds); + +template +void EncodeLocPrediction(const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector > >& all_match_indices, + const vector& prior_bboxes, + const vector >& prior_variances, + const MultiBoxLossParameter& multibox_loss_param, + Dtype* loc_pred_data, Dtype* loc_gt_data) { + int num = all_loc_preds.size(); + // CHECK_EQ(num, all_match_indices.size()); + // Get parameters. + const CodeType code_type = multibox_loss_param.code_type(); + const bool encode_variance_in_target = + multibox_loss_param.encode_variance_in_target(); + const bool bp_inside = multibox_loss_param.bp_inside(); + const bool use_prior_for_matching = + multibox_loss_param.use_prior_for_matching(); + int count = 0; + for (int i = 0; i < num; ++i) { + for (map >::const_iterator + it = all_match_indices[i].begin(); + it != all_match_indices[i].end(); ++it) { + const int label = it->first; + const vector& match_index = it->second; + CHECK(all_loc_preds[i].find(label) != all_loc_preds[i].end()); + const vector& loc_pred = + all_loc_preds[i].find(label)->second; + for (int j = 0; j < match_index.size(); ++j) { + if (match_index[j] <= -1) { + continue; + } + // Store encoded ground truth. + const int gt_idx = match_index[j]; + CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end()); + CHECK_LT(gt_idx, all_gt_bboxes.find(i)->second.size()); + const NormalizedBBox& gt_bbox = all_gt_bboxes.find(i)->second[gt_idx]; + NormalizedBBox gt_encode; + CHECK_LT(j, prior_bboxes.size()); + EncodeBBox(prior_bboxes[j], prior_variances[j], code_type, + encode_variance_in_target, gt_bbox, >_encode); + loc_gt_data[count * 4] = gt_encode.xmin(); + loc_gt_data[count * 4 + 1] = gt_encode.ymin(); + loc_gt_data[count * 4 + 2] = gt_encode.xmax(); + loc_gt_data[count * 4 + 3] = gt_encode.ymax(); + // Store location prediction. + CHECK_LT(j, loc_pred.size()); + if (bp_inside) { + NormalizedBBox match_bbox = prior_bboxes[j]; + if (!use_prior_for_matching) { + const bool clip_bbox = false; + DecodeBBox(prior_bboxes[j], prior_variances[j], code_type, + encode_variance_in_target, clip_bbox, loc_pred[j], + &match_bbox); + } + // When a dimension of match_bbox is outside of image region, use + // gt_encode to simulate zero gradient. + loc_pred_data[count * 4] = + (match_bbox.xmin() < 0 || match_bbox.xmin() > 1) ? + gt_encode.xmin() : loc_pred[j].xmin(); + loc_pred_data[count * 4 + 1] = + (match_bbox.ymin() < 0 || match_bbox.ymin() > 1) ? + gt_encode.ymin() : loc_pred[j].ymin(); + loc_pred_data[count * 4 + 2] = + (match_bbox.xmax() < 0 || match_bbox.xmax() > 1) ? + gt_encode.xmax() : loc_pred[j].xmax(); + loc_pred_data[count * 4 + 3] = + (match_bbox.ymax() < 0 || match_bbox.ymax() > 1) ? + gt_encode.ymax() : loc_pred[j].ymax(); + } else { + loc_pred_data[count * 4] = loc_pred[j].xmin(); + loc_pred_data[count * 4 + 1] = loc_pred[j].ymin(); + loc_pred_data[count * 4 + 2] = loc_pred[j].xmax(); + loc_pred_data[count * 4 + 3] = loc_pred[j].ymax(); + } + if (encode_variance_in_target) { + for (int k = 0; k < 4; ++k) { + CHECK_GT(prior_variances[j][k], 0); + loc_pred_data[count * 4 + k] /= prior_variances[j][k]; + loc_gt_data[count * 4 + k] /= prior_variances[j][k]; + } + } + ++count; + } + } + } +} + +// Explicit initialization. +template void EncodeLocPrediction(const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector > >& all_match_indices, + const vector& prior_bboxes, + const vector >& prior_variances, + const MultiBoxLossParameter& multibox_loss_param, + float* loc_pred_data, float* loc_gt_data); +template void EncodeLocPrediction(const vector& all_loc_preds, + const map >& all_gt_bboxes, + const vector > >& all_match_indices, + const vector& prior_bboxes, + const vector >& prior_variances, + const MultiBoxLossParameter& multibox_loss_param, + double* loc_pred_data, double* loc_gt_data); + +template +void ComputeLocLoss(const Blob& loc_pred, const Blob& loc_gt, + const vector > >& all_match_indices, + const int num, const int num_priors, const LocLossType loc_loss_type, + vector >* all_loc_loss) { + int loc_count = loc_pred.count(); + CHECK_EQ(loc_count, loc_gt.count()); + Blob diff; + const Dtype* diff_data = NULL; + if (loc_count != 0) { + diff.Reshape(loc_pred.shape()); + caffe_sub(loc_count, loc_pred.cpu_data(), loc_gt.cpu_data(), + diff.mutable_cpu_data()); + diff_data = diff.cpu_data(); + } + CHECK_NOTNULL(diff_data); + int count = 0; + for (int i = 0; i < num; ++i) { + vector loc_loss(num_priors, 0.f); + for (map >::const_iterator + it = all_match_indices[i].begin(); + it != all_match_indices[i].end(); ++it) { + const vector& match_index = it->second; + CHECK_EQ(num_priors, match_index.size()); + for (int j = 0; j < match_index.size(); ++j) { + if (match_index[j] <= -1) { + continue; + } + Dtype loss = 0; + for (int k = 0; k < 4; ++k) { + Dtype val = diff_data[count * 4 + k]; + if (loc_loss_type == MultiBoxLossParameter_LocLossType_SMOOTH_L1) { + Dtype abs_val = fabs(val); + if (abs_val < 1.) { + loss += 0.5 * val * val; + } else { + loss += abs_val - 0.5; + } + } else if (loc_loss_type == MultiBoxLossParameter_LocLossType_L2) { + loss += 0.5 * val * val; + } else { + LOG(FATAL) << "Unknown loc loss type."; + } + } + loc_loss[j] = loss; + ++count; + } + } + all_loc_loss->push_back(loc_loss); + } +} + +// Explicit initialization. +template void ComputeLocLoss(const Blob& loc_pred, + const Blob& loc_gt, + const vector > >& all_match_indices, + const int num, const int num_priors, const LocLossType loc_loss_type, + vector >* all_loc_loss); +template void ComputeLocLoss(const Blob& loc_pred, + const Blob& loc_gt, + const vector > >& all_match_indices, + const int num, const int num_priors, const LocLossType loc_loss_type, + vector >* all_loc_loss); + +template +void GetConfidenceScores(const Dtype* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + vector > >* conf_preds) { + conf_preds->clear(); + conf_preds->resize(num); + for (int i = 0; i < num; ++i) { + map >& label_scores = (*conf_preds)[i]; + for (int p = 0; p < num_preds_per_class; ++p) { + int start_idx = p * num_classes; + for (int c = 0; c < num_classes; ++c) { + label_scores[c].push_back(conf_data[start_idx + c]); + } + } + conf_data += num_preds_per_class * num_classes; + } +} + +// Explicit initialization. +template void GetConfidenceScores(const float* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + vector > >* conf_preds); +template void GetConfidenceScores(const double* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + vector > >* conf_preds); + +template +void GetConfidenceScores(const Dtype* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const bool class_major, vector > >* conf_preds) { + conf_preds->clear(); + conf_preds->resize(num); + for (int i = 0; i < num; ++i) { + map >& label_scores = (*conf_preds)[i]; + if (class_major) { + for (int c = 0; c < num_classes; ++c) { + label_scores[c].assign(conf_data, conf_data + num_preds_per_class); + conf_data += num_preds_per_class; + } + } else { + for (int p = 0; p < num_preds_per_class; ++p) { + int start_idx = p * num_classes; + for (int c = 0; c < num_classes; ++c) { + label_scores[c].push_back(conf_data[start_idx + c]); + } + } + conf_data += num_preds_per_class * num_classes; + } + } +} + +// Explicit initialization. +template void GetConfidenceScores(const float* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const bool class_major, vector > >* conf_preds); +template void GetConfidenceScores(const double* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const bool class_major, vector > >* conf_preds); + +template +void ComputeConfLoss(const Dtype* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + vector >* all_conf_loss) { + all_conf_loss->clear(); + for (int i = 0; i < num; ++i) { + vector conf_loss; + for (int p = 0; p < num_preds_per_class; ++p) { + int start_idx = p * num_classes; + int label = background_label_id; + Dtype loss = 0; + if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) { + CHECK_GE(label, 0); + CHECK_LT(label, num_classes); + // Compute softmax probability. + // We need to subtract the max to avoid numerical issues. + Dtype maxval = -FLT_MAX; + for (int c = 0; c < num_classes; ++c) { + maxval = std::max(conf_data[start_idx + c], maxval); + } + Dtype sum = 0.; + for (int c = 0; c < num_classes; ++c) { + sum += std::exp(conf_data[start_idx + c] - maxval); + } + Dtype prob = std::exp(conf_data[start_idx + label] - maxval) / sum; + loss = -log(std::max(prob, Dtype(FLT_MIN))); + } else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) { + int target = 0; + for (int c = 0; c < num_classes; ++c) { + if (c == label) { + target = 1; + } else { + target = 0; + } + Dtype input = conf_data[start_idx + c]; + loss -= input * (target - (input >= 0)) - + log(1 + exp(input - 2 * input * (input >= 0))); + } + } else { + LOG(FATAL) << "Unknown conf loss type."; + } + conf_loss.push_back(loss); + } + conf_data += num_preds_per_class * num_classes; + all_conf_loss->push_back(conf_loss); + } +} + +// Explicit initialization. +template void ComputeConfLoss(const float* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + vector >* all_conf_loss); +template void ComputeConfLoss(const double* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + vector >* all_conf_loss); + +template +void ComputeConfLoss(const Dtype* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + const vector > >& all_match_indices, + const map >& all_gt_bboxes, + vector >* all_conf_loss) { + CHECK_LT(background_label_id, num_classes); + // CHECK_EQ(num, all_match_indices.size()); + all_conf_loss->clear(); + for (int i = 0; i < num; ++i) { + vector conf_loss; + const map >& match_indices = all_match_indices[i]; + for (int p = 0; p < num_preds_per_class; ++p) { + int start_idx = p * num_classes; + // Get the label index. + int label = background_label_id; + for (map >::const_iterator it = + match_indices.begin(); it != match_indices.end(); ++it) { + const vector& match_index = it->second; + CHECK_EQ(match_index.size(), num_preds_per_class); + if (match_index[p] > -1) { + CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end()); + const vector& gt_bboxes = + all_gt_bboxes.find(i)->second; + CHECK_LT(match_index[p], gt_bboxes.size()); + label = gt_bboxes[match_index[p]].label(); + CHECK_GE(label, 0); + CHECK_NE(label, background_label_id); + CHECK_LT(label, num_classes); + // A prior can only be matched to one gt bbox. + break; + } + } + Dtype loss = 0; + if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) { + CHECK_GE(label, 0); + CHECK_LT(label, num_classes); + // Compute softmax probability. + // We need to subtract the max to avoid numerical issues. + Dtype maxval = conf_data[start_idx]; + for (int c = 1; c < num_classes; ++c) { + maxval = std::max(conf_data[start_idx + c], maxval); + } + Dtype sum = 0.; + for (int c = 0; c < num_classes; ++c) { + sum += std::exp(conf_data[start_idx + c] - maxval); + } + Dtype prob = std::exp(conf_data[start_idx + label] - maxval) / sum; + loss = -log(std::max(prob, Dtype(FLT_MIN))); + } else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) { + int target = 0; + for (int c = 0; c < num_classes; ++c) { + if (c == label) { + target = 1; + } else { + target = 0; + } + Dtype input = conf_data[start_idx + c]; + loss -= input * (target - (input >= 0)) - + log(1 + exp(input - 2 * input * (input >= 0))); + } + } else { + LOG(FATAL) << "Unknown conf loss type."; + } + conf_loss.push_back(loss); + } + conf_data += num_preds_per_class * num_classes; + all_conf_loss->push_back(conf_loss); + } +} + +// Explicit initialization. +template void ComputeConfLoss(const float* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + const vector > >& all_match_indices, + const map >& all_gt_bboxes, + vector >* all_conf_loss); +template void ComputeConfLoss(const double* conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + const vector > >& all_match_indices, + const map >& all_gt_bboxes, + vector >* all_conf_loss); + +template +void EncodeConfPrediction(const Dtype* conf_data, const int num, + const int num_priors, const MultiBoxLossParameter& multibox_loss_param, + const vector > >& all_match_indices, + const vector >& all_neg_indices, + const map >& all_gt_bboxes, + Dtype* conf_pred_data, Dtype* conf_gt_data) { + // CHECK_EQ(num, all_match_indices.size()); + // CHECK_EQ(num, all_neg_indices.size()); + // Retrieve parameters. + CHECK(multibox_loss_param.has_num_classes()) << "Must provide num_classes."; + const int num_classes = multibox_loss_param.num_classes(); + CHECK_GE(num_classes, 1) << "num_classes should not be less than 1."; + const int background_label_id = multibox_loss_param.background_label_id(); + const bool map_object_to_agnostic = + multibox_loss_param.map_object_to_agnostic(); + if (map_object_to_agnostic) { + if (background_label_id >= 0) { + CHECK_EQ(num_classes, 2); + } else { + CHECK_EQ(num_classes, 1); + } + } + const MiningType mining_type = multibox_loss_param.mining_type(); + bool do_neg_mining; + if (multibox_loss_param.has_do_neg_mining()) { + LOG(WARNING) << "do_neg_mining is deprecated, use mining_type instead."; + do_neg_mining = multibox_loss_param.do_neg_mining(); + CHECK_EQ(do_neg_mining, + mining_type != MultiBoxLossParameter_MiningType_NONE); + } + do_neg_mining = mining_type != MultiBoxLossParameter_MiningType_NONE; + const ConfLossType conf_loss_type = multibox_loss_param.conf_loss_type(); + int count = 0; + for (int i = 0; i < num; ++i) { + if (all_gt_bboxes.find(i) != all_gt_bboxes.end()) { + // Save matched (positive) bboxes scores and labels. + const map >& match_indices = all_match_indices[i]; + for (map >::const_iterator it = + match_indices.begin(); it != match_indices.end(); ++it) { + const vector& match_index = it->second; + CHECK_EQ(match_index.size(), num_priors); + for (int j = 0; j < num_priors; ++j) { + if (match_index[j] <= -1) { + continue; + } + const int gt_label = map_object_to_agnostic ? + background_label_id + 1 : + all_gt_bboxes.find(i)->second[match_index[j]].label(); + int idx = do_neg_mining ? count : j; + switch (conf_loss_type) { + case MultiBoxLossParameter_ConfLossType_SOFTMAX: + conf_gt_data[idx] = gt_label; + break; + case MultiBoxLossParameter_ConfLossType_LOGISTIC: + conf_gt_data[idx * num_classes + gt_label] = 1; + break; + default: + LOG(FATAL) << "Unknown conf loss type."; + } + if (do_neg_mining) { + // Copy scores for matched bboxes. + caffe_copy(num_classes, conf_data + j * num_classes, + conf_pred_data + count * num_classes); + ++count; + } + } + } + // Go to next image. + if (do_neg_mining) { + // Save negative bboxes scores and labels. + for (int n = 0; n < all_neg_indices[i].size(); ++n) { + int j = all_neg_indices[i][n]; + CHECK_LT(j, num_priors); + caffe_copy(num_classes, conf_data + j * num_classes, + conf_pred_data + count * num_classes); + switch (conf_loss_type) { + case MultiBoxLossParameter_ConfLossType_SOFTMAX: + conf_gt_data[count] = background_label_id; + break; + case MultiBoxLossParameter_ConfLossType_LOGISTIC: + if (background_label_id >= 0 && + background_label_id < num_classes) { + conf_gt_data[count * num_classes + background_label_id] = 1; + } + break; + default: + LOG(FATAL) << "Unknown conf loss type."; + } + ++count; + } + } + } + if (do_neg_mining) { + conf_data += num_priors * num_classes; + } else { + conf_gt_data += num_priors; + } + } +} + +// Explicite initialization. +template void EncodeConfPrediction(const float* conf_data, const int num, + const int num_priors, const MultiBoxLossParameter& multibox_loss_param, + const vector > >& all_match_indices, + const vector >& all_neg_indices, + const map >& all_gt_bboxes, + float* conf_pred_data, float* conf_gt_data); +template void EncodeConfPrediction(const double* conf_data, const int num, + const int num_priors, const MultiBoxLossParameter& multibox_loss_param, + const vector > >& all_match_indices, + const vector >& all_neg_indices, + const map >& all_gt_bboxes, + double* conf_pred_data, double* conf_gt_data); + +template +void GetPriorBBoxes(const Dtype* prior_data, const int num_priors, + vector* prior_bboxes, + vector >* prior_variances) { + prior_bboxes->clear(); + prior_variances->clear(); + for (int i = 0; i < num_priors; ++i) { + int start_idx = i * 4; + NormalizedBBox bbox; + bbox.set_xmin(prior_data[start_idx]); + bbox.set_ymin(prior_data[start_idx + 1]); + bbox.set_xmax(prior_data[start_idx + 2]); + bbox.set_ymax(prior_data[start_idx + 3]); + float bbox_size = BBoxSize(bbox); + bbox.set_size(bbox_size); + prior_bboxes->push_back(bbox); + } + + for (int i = 0; i < num_priors; ++i) { + int start_idx = (num_priors + i) * 4; + vector var; + for (int j = 0; j < 4; ++j) { + var.push_back(prior_data[start_idx + j]); + } + prior_variances->push_back(var); + } +} + +// Explicit initialization. +template void GetPriorBBoxes(const float* prior_data, const int num_priors, + vector* prior_bboxes, + vector >* prior_variances); +template void GetPriorBBoxes(const double* prior_data, const int num_priors, + vector* prior_bboxes, + vector >* prior_variances); + +template +void GetDetectionResults(const Dtype* det_data, const int num_det, + const int background_label_id, + map > >* all_detections) { + all_detections->clear(); + for (int i = 0; i < num_det; ++i) { + int start_idx = i * 7; + int item_id = det_data[start_idx]; + if (item_id == -1) { + continue; + } + int label = det_data[start_idx + 1]; + CHECK_NE(background_label_id, label) + << "Found background label in the detection results."; + NormalizedBBox bbox; + bbox.set_score(det_data[start_idx + 2]); + bbox.set_xmin(det_data[start_idx + 3]); + bbox.set_ymin(det_data[start_idx + 4]); + bbox.set_xmax(det_data[start_idx + 5]); + bbox.set_ymax(det_data[start_idx + 6]); + float bbox_size = BBoxSize(bbox); + bbox.set_size(bbox_size); + (*all_detections)[item_id][label].push_back(bbox); + } +} + +// Explicit initialization. +template void GetDetectionResults(const float* det_data, const int num_det, + const int background_label_id, + map > >* all_detections); +template void GetDetectionResults(const double* det_data, const int num_det, + const int background_label_id, + map > >* all_detections); + +void GetTopKScoreIndex(const vector& scores, const vector& indices, + const int top_k, vector >* score_index_vec) { + CHECK_EQ(scores.size(), indices.size()); + + // Generate index score pairs. + for (int i = 0; i < scores.size(); ++i) { + score_index_vec->push_back(std::make_pair(scores[i], indices[i])); + } + + // Sort the score pair according to the scores in descending order + std::stable_sort(score_index_vec->begin(), score_index_vec->end(), + SortScorePairDescend); + + // Keep top_k scores if needed. + if (top_k > -1 && top_k < score_index_vec->size()) { + score_index_vec->resize(top_k); + } +} + +void GetMaxScoreIndex(const vector& scores, const float threshold, + const int top_k, vector >* score_index_vec) { + // Generate index score pairs. + for (int i = 0; i < scores.size(); ++i) { + if (scores[i] > threshold) { + score_index_vec->push_back(std::make_pair(scores[i], i)); + } + } + + // Sort the score pair according to the scores in descending order + std::stable_sort(score_index_vec->begin(), score_index_vec->end(), + SortScorePairDescend); + + // Keep top_k scores if needed. + if (top_k > -1 && top_k < score_index_vec->size()) { + score_index_vec->resize(top_k); + } +} + +template +void GetMaxScoreIndex(const Dtype* scores, const int num, const float threshold, + const int top_k, vector >* score_index_vec) { + // Generate index score pairs. + for (int i = 0; i < num; ++i) { + if (scores[i] > threshold) { + score_index_vec->push_back(std::make_pair(scores[i], i)); + } + } + + // Sort the score pair according to the scores in descending order + std::sort(score_index_vec->begin(), score_index_vec->end(), + SortScorePairDescend); + + // Keep top_k scores if needed. + if (top_k > -1 && top_k < score_index_vec->size()) { + score_index_vec->resize(top_k); + } +} + +template +void GetMaxScoreIndex(const float* scores, const int num, const float threshold, + const int top_k, vector >* score_index_vec); +template +void GetMaxScoreIndex(const double* scores, const int num, + const float threshold, const int top_k, + vector >* score_index_vec); + +void ApplyNMS(const vector& bboxes, const vector& scores, + const float threshold, const int top_k, const bool reuse_overlaps, + map >* overlaps, vector* indices) { + // Sanity check. + CHECK_EQ(bboxes.size(), scores.size()) + << "bboxes and scores have different size."; + + // Get top_k scores (with corresponding indices). + vector idx(boost::counting_iterator(0), + boost::counting_iterator(scores.size())); + vector > score_index_vec; + GetTopKScoreIndex(scores, idx, top_k, &score_index_vec); + + // Do nms. + indices->clear(); + while (score_index_vec.size() != 0) { + // Get the current highest score box. + int best_idx = score_index_vec.front().second; + const NormalizedBBox& best_bbox = bboxes[best_idx]; + if (BBoxSize(best_bbox) < 1e-5) { + // Erase small box. + score_index_vec.erase(score_index_vec.begin()); + continue; + } + indices->push_back(best_idx); + // Erase the best box. + score_index_vec.erase(score_index_vec.begin()); + + if (top_k > -1 && indices->size() >= top_k) { + // Stop if finding enough bboxes for nms. + break; + } + + // Compute overlap between best_bbox and other remaining bboxes. + // Remove a bbox if the overlap with best_bbox is larger than nms_threshold. + for (vector >::iterator it = score_index_vec.begin(); + it != score_index_vec.end(); ) { + int cur_idx = it->second; + const NormalizedBBox& cur_bbox = bboxes[cur_idx]; + if (BBoxSize(cur_bbox) < 1e-5) { + // Erase small box. + it = score_index_vec.erase(it); + continue; + } + float cur_overlap = 0.; + if (reuse_overlaps) { + if (overlaps->find(best_idx) != overlaps->end() && + overlaps->find(best_idx)->second.find(cur_idx) != + (*overlaps)[best_idx].end()) { + // Use the computed overlap. + cur_overlap = (*overlaps)[best_idx][cur_idx]; + } else if (overlaps->find(cur_idx) != overlaps->end() && + overlaps->find(cur_idx)->second.find(best_idx) != + (*overlaps)[cur_idx].end()) { + // Use the computed overlap. + cur_overlap = (*overlaps)[cur_idx][best_idx]; + } else { + cur_overlap = JaccardOverlap(best_bbox, cur_bbox); + // Store the overlap for future use. + (*overlaps)[best_idx][cur_idx] = cur_overlap; + } + } else { + cur_overlap = JaccardOverlap(best_bbox, cur_bbox); + } + + // Remove it if necessary + if (cur_overlap > threshold) { + it = score_index_vec.erase(it); + } else { + ++it; + } + } + } +} + +void ApplyNMS(const vector& bboxes, const vector& scores, + const float threshold, const int top_k, vector* indices) { + bool reuse_overlap = false; + map > overlaps; + ApplyNMS(bboxes, scores, threshold, top_k, reuse_overlap, &overlaps, indices); +} + +void ApplyNMS(const bool* overlapped, const int num, vector* indices) { + vector index_vec(boost::counting_iterator(0), + boost::counting_iterator(num)); + // Do nms. + indices->clear(); + while (index_vec.size() != 0) { + // Get the current highest score box. + int best_idx = index_vec.front(); + indices->push_back(best_idx); + // Erase the best box. + index_vec.erase(index_vec.begin()); + + for (vector::iterator it = index_vec.begin(); it != index_vec.end();) { + int cur_idx = *it; + + // Remove it if necessary + if (overlapped[best_idx * num + cur_idx]) { + it = index_vec.erase(it); + } else { + ++it; + } + } + } +} + +inline int clamp(const int v, const int a, const int b) { + return v < a ? a : v > b ? b : v; +} + +void ApplyNMSFast(const vector& bboxes, + const vector& scores, const float score_threshold, + const float nms_threshold, const float eta, const int top_k, + vector* indices) { + // Sanity check. + CHECK_EQ(bboxes.size(), scores.size()) + << "bboxes and scores have different size."; + + // Get top_k scores (with corresponding indices). + vector > score_index_vec; + GetMaxScoreIndex(scores, score_threshold, top_k, &score_index_vec); + + // Do nms. + float adaptive_threshold = nms_threshold; + indices->clear(); + while (score_index_vec.size() != 0) { + const int idx = score_index_vec.front().second; + bool keep = true; + for (int k = 0; k < indices->size(); ++k) { + if (keep) { + const int kept_idx = (*indices)[k]; + float overlap = JaccardOverlap(bboxes[idx], bboxes[kept_idx]); + keep = overlap <= adaptive_threshold; + } else { + break; + } + } + if (keep) { + indices->push_back(idx); + } + score_index_vec.erase(score_index_vec.begin()); + if (keep && eta < 1 && adaptive_threshold > 0.5) { + adaptive_threshold *= eta; + } + } +} + +template +void ApplyNMSFast(const Dtype* bboxes, const Dtype* scores, const int num, + const float score_threshold, const float nms_threshold, + const float eta, const int top_k, vector* indices) { + // Get top_k scores (with corresponding indices). + vector > score_index_vec; + GetMaxScoreIndex(scores, num, score_threshold, top_k, &score_index_vec); + + // Do nms. + float adaptive_threshold = nms_threshold; + indices->clear(); + while (score_index_vec.size() != 0) { + const int idx = score_index_vec.front().second; + bool keep = true; + for (int k = 0; k < indices->size(); ++k) { + if (keep) { + const int kept_idx = (*indices)[k]; + float overlap = JaccardOverlap(bboxes + idx * 4, bboxes + kept_idx * 4); + keep = overlap <= adaptive_threshold; + } else { + break; + } + } + if (keep) { + indices->push_back(idx); + } + score_index_vec.erase(score_index_vec.begin()); + if (keep && eta < 1 && adaptive_threshold > 0.5) { + adaptive_threshold *= eta; + } + } +} + +template +void ApplyNMSFast(const float* bboxes, const float* scores, const int num, + const float score_threshold, const float nms_threshold, + const float eta, const int top_k, vector* indices); +template +void ApplyNMSFast(const double* bboxes, const double* scores, const int num, + const float score_threshold, const float nms_threshold, + const float eta, const int top_k, vector* indices); + +void CumSum(const vector >& pairs, vector* cumsum) { + // Sort the pairs based on first item of the pair. + vector > sort_pairs = pairs; + std::stable_sort(sort_pairs.begin(), sort_pairs.end(), + SortScorePairDescend); + + cumsum->clear(); + for (int i = 0; i < sort_pairs.size(); ++i) { + if (i == 0) { + cumsum->push_back(sort_pairs[i].second); + } else { + cumsum->push_back(cumsum->back() + sort_pairs[i].second); + } + } +} + +void ComputeAP(const vector >& tp, const int num_pos, + const vector >& fp, const string ap_version, + vector* prec, vector* rec, float* ap) { + const float eps = 1e-6; + CHECK_EQ(tp.size(), fp.size()) << "tp must have same size as fp."; + const int num = tp.size(); + // Make sure that tp and fp have complement value. + for (int i = 0; i < num; ++i) { + CHECK_LE(fabs(tp[i].first - fp[i].first), eps); + CHECK_EQ(tp[i].second, 1 - fp[i].second); + } + prec->clear(); + rec->clear(); + *ap = 0; + if (tp.size() == 0 || num_pos == 0) { + return; + } + + // Compute cumsum of tp. + vector tp_cumsum; + CumSum(tp, &tp_cumsum); + CHECK_EQ(tp_cumsum.size(), num); + + // Compute cumsum of fp. + vector fp_cumsum; + CumSum(fp, &fp_cumsum); + CHECK_EQ(fp_cumsum.size(), num); + + // Compute precision. + for (int i = 0; i < num; ++i) { + prec->push_back(static_cast(tp_cumsum[i]) / + (tp_cumsum[i] + fp_cumsum[i])); + } + + // Compute recall. + for (int i = 0; i < num; ++i) { + CHECK_LE(tp_cumsum[i], num_pos); + rec->push_back(static_cast(tp_cumsum[i]) / num_pos); + } + + if (ap_version == "11point") { + // VOC2007 style for computing AP. + vector max_precs(11, 0.); + int start_idx = num - 1; + for (int j = 10; j >= 0; --j) { + for (int i = start_idx; i >= 0 ; --i) { + if ((*rec)[i] < j / 10.) { + start_idx = i; + if (j > 0) { + max_precs[j-1] = max_precs[j]; + } + break; + } else { + if (max_precs[j] < (*prec)[i]) { + max_precs[j] = (*prec)[i]; + } + } + } + } + for (int j = 10; j >= 0; --j) { + *ap += max_precs[j] / 11; + } + } else if (ap_version == "MaxIntegral") { + // VOC2012 or ILSVRC style for computing AP. + float cur_rec = rec->back(); + float cur_prec = prec->back(); + for (int i = num - 2; i >= 0; --i) { + cur_prec = std::max((*prec)[i], cur_prec); + if (fabs(cur_rec - (*rec)[i]) > eps) { + *ap += cur_prec * fabs(cur_rec - (*rec)[i]); + } + cur_rec = (*rec)[i]; + } + *ap += cur_rec * cur_prec; + } else if (ap_version == "Integral") { + // Natural integral. + float prev_rec = 0.; + for (int i = 0; i < num; ++i) { + if (fabs((*rec)[i] - prev_rec) > eps) { + *ap += (*prec)[i] * fabs((*rec)[i] - prev_rec); + } + prev_rec = (*rec)[i]; + } + } else { + LOG(FATAL) << "Unknown ap_version: " << ap_version; + } +} + +#ifdef USE_OPENCV +cv::Scalar HSV2RGB(const float h, const float s, const float v) { + const int h_i = static_cast(h * 6); + const float f = h * 6 - h_i; + const float p = v * (1 - s); + const float q = v * (1 - f*s); + const float t = v * (1 - (1 - f) * s); + float r, g, b; + switch (h_i) { + case 0: + r = v; g = t; b = p; + break; + case 1: + r = q; g = v; b = p; + break; + case 2: + r = p; g = v; b = t; + break; + case 3: + r = p; g = q; b = v; + break; + case 4: + r = t; g = p; b = v; + break; + case 5: + r = v; g = p; b = q; + break; + default: + r = 1; g = 1; b = 1; + break; + } + return cv::Scalar(r * 255, g * 255, b * 255); +} + +// http://martin.ankerl.com/2009/12/09/how-to-create-random-colors-programmatically +vector GetColors(const int n) { + vector colors; + cv::RNG rng(12345); + const float golden_ratio_conjugate = 0.618033988749895; + const float s = 0.3; + const float v = 0.99; + for (int i = 0; i < n; ++i) { + const float h = std::fmod(rng.uniform(0.f, 1.f) + golden_ratio_conjugate, + 1.f); + colors.push_back(HSV2RGB(h, s, v)); + } + return colors; +} + +static clock_t start_clock = clock(); +static cv::VideoWriter cap_out; + +template +void VisualizeBBox(const vector& images, const Blob* detections, + const float threshold, const vector& colors, + const map& label_to_display_name, + const string& save_file) { + // Retrieve detections. + CHECK_EQ(detections->width(), 7); + const int num_det = detections->height(); + const int num_img = images.size(); + if (num_det == 0 || num_img == 0) { + return; + } + // Comute FPS. + float fps = num_img / (static_cast(clock() - start_clock) / + CLOCKS_PER_SEC); + + const Dtype* detections_data = detections->cpu_data(); + const int width = images[0].cols; + const int height = images[0].rows; + vector all_detections(num_img); + for (int i = 0; i < num_det; ++i) { + const int img_idx = detections_data[i * 7]; + CHECK_LT(img_idx, num_img); + const int label = detections_data[i * 7 + 1]; + const float score = detections_data[i * 7 + 2]; + if (score < threshold) { + continue; + } + NormalizedBBox bbox; + bbox.set_xmin(detections_data[i * 7 + 3] * width); + bbox.set_ymin(detections_data[i * 7 + 4] * height); + bbox.set_xmax(detections_data[i * 7 + 5] * width); + bbox.set_ymax(detections_data[i * 7 + 6] * height); + bbox.set_score(score); + all_detections[img_idx][label].push_back(bbox); + } + + int fontface = cv::FONT_HERSHEY_SIMPLEX; + double scale = 1; + int thickness = 2; + int baseline = 0; + char buffer[50]; + for (int i = 0; i < num_img; ++i) { + cv::Mat image = images[i]; + // Show FPS. + snprintf(buffer, sizeof(buffer), "FPS: %.2f", fps); + cv::Size text = cv::getTextSize(buffer, fontface, scale, thickness, + &baseline); + cv::rectangle(image, cv::Point(0, 0), + cv::Point(text.width, text.height + baseline), + CV_RGB(255, 255, 255), CV_FILLED); + cv::putText(image, buffer, cv::Point(0, text.height + baseline / 2.), + fontface, scale, CV_RGB(0, 0, 0), thickness, 8); + // Draw bboxes. + for (map >::iterator it = + all_detections[i].begin(); it != all_detections[i].end(); ++it) { + int label = it->first; + string label_name = "Unknown"; + if (label_to_display_name.find(label) != label_to_display_name.end()) { + label_name = label_to_display_name.find(label)->second; + } + CHECK_LT(label, colors.size()); + const cv::Scalar& color = colors[label]; + const vector& bboxes = it->second; + for (int j = 0; j < bboxes.size(); ++j) { + cv::Point top_left_pt(bboxes[j].xmin(), bboxes[j].ymin()); + cv::Point bottom_right_pt(bboxes[j].xmax(), bboxes[j].ymax()); + cv::rectangle(image, top_left_pt, bottom_right_pt, color, 4); + cv::Point bottom_left_pt(bboxes[j].xmin(), bboxes[j].ymax()); + snprintf(buffer, sizeof(buffer), "%s: %.2f", label_name.c_str(), + bboxes[j].score()); + cv::Size text = cv::getTextSize(buffer, fontface, scale, thickness, + &baseline); + cv::rectangle( + image, bottom_left_pt + cv::Point(0, 0), + bottom_left_pt + cv::Point(text.width, -text.height-baseline), + color, CV_FILLED); + cv::putText(image, buffer, bottom_left_pt - cv::Point(0, baseline), + fontface, scale, CV_RGB(0, 0, 0), thickness, 8); + } + } + // Save result if required. + if (!save_file.empty()) { + if (!cap_out.isOpened()) { + cv::Size size(image.size().width, image.size().height); + cv::VideoWriter outputVideo(save_file, CV_FOURCC('D', 'I', 'V', 'X'), + 30, size, true); + cap_out = outputVideo; + } + cap_out.write(image); + } + cv::imshow("detections", image); + if (cv::waitKey(1) == 27) { + raise(SIGINT); + } + } + start_clock = clock(); +} + +template +void VisualizeBBox(const vector& images, + const Blob* detections, + const float threshold, const vector& colors, + const map& label_to_display_name, + const string& save_file); +template +void VisualizeBBox(const vector& images, + const Blob* detections, + const float threshold, const vector& colors, + const map& label_to_display_name, + const string& save_file); + +#endif // USE_OPENCV + +} // namespace caffe diff --git a/src/caffe/util/bbox_util.cu b/src/caffe/util/bbox_util.cu new file mode 100644 index 00000000000..538c227ee7c --- /dev/null +++ b/src/caffe/util/bbox_util.cu @@ -0,0 +1,649 @@ +#include +#include +#include +#include + +#include "thrust/functional.h" +#include "thrust/sort.h" + +#include "caffe/common.hpp" +#include "caffe/util/bbox_util.hpp" + +namespace caffe { + +template +__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox, + const bool normalized) { + if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) { + // If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0. + return Dtype(0.); + } else { + const Dtype width = bbox[2] - bbox[0]; + const Dtype height = bbox[3] - bbox[1]; + if (normalized) { + return width * height; + } else { + // If bbox is not within range [0, 1]. + return (width + 1) * (height + 1); + } + } +} + +template __host__ __device__ float BBoxSizeGPU(const float* bbox, + const bool normalized); +template __host__ __device__ double BBoxSizeGPU(const double* bbox, + const bool normalized); + +template +__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1, + const Dtype* bbox2) { + if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] || + bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) { + return Dtype(0.); + } else { + const Dtype inter_xmin = max(bbox1[0], bbox2[0]); + const Dtype inter_ymin = max(bbox1[1], bbox2[1]); + const Dtype inter_xmax = min(bbox1[2], bbox2[2]); + const Dtype inter_ymax = min(bbox1[3], bbox2[3]); + + const Dtype inter_width = inter_xmax - inter_xmin; + const Dtype inter_height = inter_ymax - inter_ymin; + const Dtype inter_size = inter_width * inter_height; + + const Dtype bbox1_size = BBoxSizeGPU(bbox1); + const Dtype bbox2_size = BBoxSizeGPU(bbox2); + + return inter_size / (bbox1_size + bbox2_size - inter_size); + } +} + +template __host__ __device__ float JaccardOverlapGPU(const float* bbox1, + const float* bbox2); +template __host__ __device__ double JaccardOverlapGPU(const double* bbox1, + const double* bbox2); + +template +__device__ Dtype Min(const Dtype x, const Dtype y) { + return x < y ? x : y; +} + +template +__device__ Dtype Max(const Dtype x, const Dtype y) { + return x > y ? x : y; +} + +template +__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) { + for (int i = 0; i < 4; ++i) { + clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.)); + } +} + +template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox); +template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox); + +template +__global__ void DecodeBBoxesKernel(const int nthreads, + const Dtype* loc_data, const Dtype* prior_data, + const CodeType code_type, const bool variance_encoded_in_target, + const int num_priors, const bool share_location, + const int num_loc_classes, const int background_label_id, + const bool clip_bbox, Dtype* bbox_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int i = index % 4; + const int c = (index / 4) % num_loc_classes; + const int d = (index / 4 / num_loc_classes) % num_priors; + if (!share_location && c == background_label_id) { + // Ignore background class if not share_location. + return; + } + const int pi = d * 4; + const int vi = pi + num_priors * 4; + if (code_type == PriorBoxParameter_CodeType_CORNER) { + if (variance_encoded_in_target) { + // variance is encoded in target, we simply need to add the offset + // predictions. + bbox_data[index] = prior_data[pi + i] + loc_data[index]; + } else { + // variance is encoded in bbox, we need to scale the offset accordingly. + bbox_data[index] = + prior_data[pi + i] + loc_data[index] * prior_data[vi + i]; + } + } else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) { + const Dtype p_xmin = prior_data[pi]; + const Dtype p_ymin = prior_data[pi + 1]; + const Dtype p_xmax = prior_data[pi + 2]; + const Dtype p_ymax = prior_data[pi + 3]; + const Dtype prior_width = p_xmax - p_xmin; + const Dtype prior_height = p_ymax - p_ymin; + const Dtype prior_center_x = (p_xmin + p_xmax) / 2.; + const Dtype prior_center_y = (p_ymin + p_ymax) / 2.; + + const Dtype xmin = loc_data[index - i]; + const Dtype ymin = loc_data[index - i + 1]; + const Dtype xmax = loc_data[index - i + 2]; + const Dtype ymax = loc_data[index - i + 3]; + + Dtype decode_bbox_center_x, decode_bbox_center_y; + Dtype decode_bbox_width, decode_bbox_height; + if (variance_encoded_in_target) { + // variance is encoded in target, we simply need to retore the offset + // predictions. + decode_bbox_center_x = xmin * prior_width + prior_center_x; + decode_bbox_center_y = ymin * prior_height + prior_center_y; + decode_bbox_width = exp(xmax) * prior_width; + decode_bbox_height = exp(ymax) * prior_height; + } else { + // variance is encoded in bbox, we need to scale the offset accordingly. + decode_bbox_center_x = + prior_data[vi] * xmin * prior_width + prior_center_x; + decode_bbox_center_y = + prior_data[vi + 1] * ymin * prior_height + prior_center_y; + decode_bbox_width = + exp(prior_data[vi + 2] * xmax) * prior_width; + decode_bbox_height = + exp(prior_data[vi + 3] * ymax) * prior_height; + } + + switch (i) { + case 0: + bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.; + break; + case 1: + bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.; + break; + case 2: + bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.; + break; + case 3: + bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.; + break; + } + } else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) { + const Dtype p_xmin = prior_data[pi]; + const Dtype p_ymin = prior_data[pi + 1]; + const Dtype p_xmax = prior_data[pi + 2]; + const Dtype p_ymax = prior_data[pi + 3]; + const Dtype prior_width = p_xmax - p_xmin; + const Dtype prior_height = p_ymax - p_ymin; + Dtype p_size; + if (i == 0 || i == 2) { + p_size = prior_width; + } else { + p_size = prior_height; + } + if (variance_encoded_in_target) { + // variance is encoded in target, we simply need to add the offset + // predictions. + bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size; + } else { + // variance is encoded in bbox, we need to scale the offset accordingly. + bbox_data[index] = + prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size; + } + } else { + // Unknown code type. + } + if (clip_bbox) { + bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.)); + } + } +} + +template +void DecodeBBoxesGPU(const int nthreads, + const Dtype* loc_data, const Dtype* prior_data, + const CodeType code_type, const bool variance_encoded_in_target, + const int num_priors, const bool share_location, + const int num_loc_classes, const int background_label_id, + const bool clip_bbox, Dtype* bbox_data) { + // NOLINT_NEXT_LINE(whitespace/operators) + DecodeBBoxesKernel<<>>(nthreads, loc_data, prior_data, code_type, + variance_encoded_in_target, num_priors, share_location, num_loc_classes, + background_label_id, clip_bbox, bbox_data); + CUDA_POST_KERNEL_CHECK; +} + +template void DecodeBBoxesGPU(const int nthreads, + const float* loc_data, const float* prior_data, + const CodeType code_type, const bool variance_encoded_in_target, + const int num_priors, const bool share_location, + const int num_loc_classes, const int background_label_id, + const bool clip_bbox, float* bbox_data); +template void DecodeBBoxesGPU(const int nthreads, + const double* loc_data, const double* prior_data, + const CodeType code_type, const bool variance_encoded_in_target, + const int num_priors, const bool share_location, + const int num_loc_classes, const int background_label_id, + const bool clip_bbox, double* bbox_data); + +template +__global__ void PermuteDataKernel(const int nthreads, + const Dtype* data, const int num_classes, const int num_data, + const int num_dim, Dtype* new_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int i = index % num_dim; + const int c = (index / num_dim) % num_classes; + const int d = (index / num_dim / num_classes) % num_data; + const int n = index / num_dim / num_classes / num_data; + const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i; + new_data[new_index] = data[index]; + } +} + +template +void PermuteDataGPU(const int nthreads, + const Dtype* data, const int num_classes, const int num_data, + const int num_dim, Dtype* new_data) { + // NOLINT_NEXT_LINE(whitespace/operators) + PermuteDataKernel<<>>(nthreads, data, num_classes, num_data, + num_dim, new_data); + CUDA_POST_KERNEL_CHECK; +} + +template void PermuteDataGPU(const int nthreads, + const float* data, const int num_classes, const int num_data, + const int num_dim, float* new_data); +template void PermuteDataGPU(const int nthreads, + const double* data, const int num_classes, const int num_data, + const int num_dim, double* new_data); + +template +__global__ void kernel_channel_max(const int num, const int channels, + const int spatial_dim, const Dtype* data, Dtype* out) { + CUDA_KERNEL_LOOP(index, num * spatial_dim) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype maxval = -FLT_MAX; + for (int c = 0; c < channels; ++c) { + maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); + } + out[index] = maxval; + } +} + +template +__global__ void kernel_channel_subtract(const int count, + const int num, const int channels, + const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max, + Dtype* data) { + CUDA_KERNEL_LOOP(index, count) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] = channel_data[index] - channel_max[n * spatial_dim + s]; + } +} + +template +__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { + CUDA_KERNEL_LOOP(index, count) { + out[index] = exp(data[index]); + } +} + +template +__global__ void kernel_channel_sum(const int num, const int channels, + const int spatial_dim, const Dtype* data, Dtype* channel_sum) { + CUDA_KERNEL_LOOP(index, num * spatial_dim) { + int n = index / spatial_dim; + int s = index % spatial_dim; + Dtype sum = 0; + for (int c = 0; c < channels; ++c) { + sum += data[(n * channels + c) * spatial_dim + s]; + } + channel_sum[index] = sum; + } +} + +template +__global__ void kernel_channel_div(const int count, + const int num, const int channels, + const int spatial_dim, const Dtype* channel_sum, Dtype* data) { + CUDA_KERNEL_LOOP(index, count) { + int n = index / channels / spatial_dim; + int s = index % spatial_dim; + data[index] /= channel_sum[n * spatial_dim + s]; + } +} + +template +void SoftMaxGPU(const Dtype* data, const int outer_num, + const int channels, const int inner_num, Dtype* prob) { + vector shape(4, 1); + shape[0] = outer_num; + shape[1] = channels; + shape[2] = inner_num; + Blob scale(shape); + Dtype* scale_data = scale.mutable_gpu_data(); + int count = outer_num * channels * inner_num; + // We need to subtract the max to avoid numerical issues, compute the exp, + // and then normalize. + // compute max + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_max<<>>(outer_num, channels, inner_num, data, + scale_data); + // subtract + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_subtract<<>>(count, outer_num, channels, inner_num, + data, scale_data, prob); + // exponentiate + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_exp<<>>( + count, prob, prob); + // sum after exp + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_sum<<>>(outer_num, channels, inner_num, prob, + scale_data); + // divide + // NOLINT_NEXT_LINE(whitespace/operators) + kernel_channel_div<<>>(count, outer_num, channels, inner_num, + scale_data, prob); +} + +template void SoftMaxGPU(const float* data, const int outer_num, + const int channels, const int inner_num, float* prob); +template void SoftMaxGPU(const double* data, const int outer_num, + const int channels, const int inner_num, double* prob); + +template +__global__ void ComputeOverlappedKernel(const int nthreads, + const Dtype* bbox_data, const int num_bboxes, const int num_classes, + const Dtype overlap_threshold, bool* overlapped_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int j = index % num_bboxes; + const int i = (index / num_bboxes) % num_bboxes; + if (i == j) { + // Ignore same bbox. + return; + } + const int c = (index / num_bboxes / num_bboxes) % num_classes; + const int n = index / num_bboxes / num_bboxes / num_classes; + // Compute overlap between i-th bbox and j-th bbox. + const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4; + const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4; + const Dtype overlap = JaccardOverlapGPU(bbox_data + start_loc_i, + bbox_data + start_loc_j); + if (overlap > overlap_threshold) { + overlapped_data[index] = true; + } + } +} + +template +void ComputeOverlappedGPU(const int nthreads, + const Dtype* bbox_data, const int num_bboxes, const int num_classes, + const Dtype overlap_threshold, bool* overlapped_data) { + // NOLINT_NEXT_LINE(whitespace/operators) + ComputeOverlappedKernel<<>>(nthreads, bbox_data, num_bboxes, num_classes, + overlap_threshold, overlapped_data); + CUDA_POST_KERNEL_CHECK; +} + +template void ComputeOverlappedGPU(const int nthreads, + const float* bbox_data, const int num_bboxes, const int num_classes, + const float overlap_threshold, bool* overlapped_data); +template void ComputeOverlappedGPU(const int nthreads, + const double* bbox_data, const int num_bboxes, const int num_classes, + const double overlap_threshold, bool* overlapped_data); + +template +__global__ void ComputeOverlappedByIdxKernel(const int nthreads, + const Dtype* bbox_data, const Dtype overlap_threshold, + const int* idx, const int num_idx, bool* overlapped_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int j = index % num_idx; + const int i = (index / num_idx); + if (i == j) { + // Ignore same bbox. + return; + } + // Compute overlap between i-th bbox and j-th bbox. + const int start_loc_i = idx[i] * 4; + const int start_loc_j = idx[j] * 4; + const Dtype overlap = JaccardOverlapGPU(bbox_data + start_loc_i, + bbox_data + start_loc_j); + if (overlap > overlap_threshold) { + overlapped_data[index] = true; + } + } +} + +template +void ComputeOverlappedByIdxGPU(const int nthreads, + const Dtype* bbox_data, const Dtype overlap_threshold, + const int* idx, const int num_idx, bool* overlapped_data) { + // NOLINT_NEXT_LINE(whitespace/operators) + ComputeOverlappedByIdxKernel<<>>(nthreads, bbox_data, overlap_threshold, + idx, num_idx, overlapped_data); + CUDA_POST_KERNEL_CHECK; +} + +template void ComputeOverlappedByIdxGPU(const int nthreads, + const float* bbox_data, const float overlap_threshold, + const int* idx, const int num_idx, bool* overlapped_data); +template void ComputeOverlappedByIdxGPU(const int nthreads, + const double* bbox_data, const double overlap_threshold, + const int* idx, const int num_idx, bool* overlapped_data); + +template +void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data, + const int num_bboxes, const float confidence_threshold, + const int top_k, const float nms_threshold, vector* indices) { + // Keep part of detections whose scores are higher than confidence threshold. + vector idx; + vector confidences; + for (int i = 0; i < num_bboxes; ++i) { + if (conf_data[i] > confidence_threshold) { + idx.push_back(i); + confidences.push_back(conf_data[i]); + } + } + int num_remain = confidences.size(); + if (num_remain == 0) { + return; + } + // Sort detections based on score. + thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0], + thrust::greater()); + if (top_k > -1 && top_k < num_remain) { + num_remain = top_k; + } + + // Compute overlap between remaining detections. + Blob idx_blob(1, 1, 1, num_remain); + int* idx_data = idx_blob.mutable_cpu_data(); + std::copy(idx.begin(), idx.begin() + num_remain, idx_data); + + Blob overlapped(1, 1, num_remain, num_remain); + const int total_bboxes = overlapped.count(); + bool* overlapped_data = overlapped.mutable_gpu_data(); + ComputeOverlappedByIdxGPU(total_bboxes, bbox_data, nms_threshold, + idx_blob.gpu_data(), num_remain, overlapped_data); + + // Do non-maximum suppression based on overlapped results. + const bool* overlapped_results = overlapped.cpu_data(); + vector selected_indices; + ApplyNMS(overlapped_results, num_remain, &selected_indices); + + // Put back the selected information. + for (int i = 0; i < selected_indices.size(); ++i) { + indices->push_back(idx[selected_indices[i]]); + } +} + +template +void ApplyNMSGPU(const float* bbox_data, const float* conf_data, + const int num_bboxes, const float confidence_threshold, + const int top_k, const float nms_threshold, vector* indices); +template +void ApplyNMSGPU(const double* bbox_data, const double* conf_data, + const int num_bboxes, const float confidence_threshold, + const int top_k, const float nms_threshold, vector* indices); + +template +__global__ void GetDetectionsKernel(const int nthreads, + const Dtype* bbox_data, const Dtype* conf_data, const int image_id, + const int label, const int* indices, const bool clip_bbox, + Dtype* detection_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + const int det_idx = indices[index]; + detection_data[index * 7] = image_id; + detection_data[index * 7 + 1] = label; + detection_data[index * 7 + 2] = conf_data[det_idx]; + if (clip_bbox) { + ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3])); + } else { + for (int i = 0; i < 4; ++i) { + detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i]; + } + } + } +} + +template +void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data, + const int image_id, const int label, const vector& indices, + const bool clip_bbox, Blob* detection_blob) { + // Store selected indices in array. + int num_det = indices.size(); + if (num_det == 0) { + return; + } + Blob idx_blob(1, 1, 1, num_det); + int* idx_data = idx_blob.mutable_cpu_data(); + std::copy(indices.begin(), indices.end(), idx_data); + // Prepare detection_blob. + detection_blob->Reshape(1, 1, num_det, 7); + Dtype* detection_data = detection_blob->mutable_gpu_data(); + // NOLINT_NEXT_LINE(whitespace/operators) + GetDetectionsKernel<<>>(num_det, bbox_data, conf_data, image_id, label, + idx_blob.gpu_data(), clip_bbox, detection_data); + CUDA_POST_KERNEL_CHECK; +} + +template void GetDetectionsGPU(const float* bbox_data, const float* conf_data, + const int image_id, const int label, const vector& indices, + const bool clip_bbox, Blob* detection_blob); +template void GetDetectionsGPU(const double* bbox_data, const double* conf_data, + const int image_id, const int label, const vector& indices, + const bool clip_bbox, Blob* detection_blob); + +template +__global__ void ComputeConfLossKernel(const int nthreads, + const Dtype* conf_data, const int num_preds_per_class, + const int num_classes, const ConfLossType loss_type, + const Dtype* match_data, Dtype* conf_loss_data) { + CUDA_KERNEL_LOOP(index, nthreads) { + int label = match_data[index]; + int num = index / num_preds_per_class; + int p = index % num_preds_per_class; + int start_idx = (num * num_preds_per_class + p) * num_classes; + Dtype loss = 0; + if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) { + // Compute softmax probability. + Dtype prob = conf_data[start_idx + label]; + loss = -log(Max(prob, Dtype(FLT_MIN))); + } else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) { + int target = 0; + for (int c = 0; c < num_classes; ++c) { + if (c == label) { + target = 1; + } else { + target = 0; + } + Dtype input = conf_data[start_idx + c]; + loss -= input * (target - (input >= 0)) - + log(1 + exp(input - 2 * input * (input >= 0))); + } + } + conf_loss_data[index] = loss; + } +} + +template +void ComputeConfLossGPU(const Blob& conf_blob, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + const vector > >& all_match_indices, + const map >& all_gt_bboxes, + vector >* all_conf_loss) { + CHECK_LT(background_label_id, num_classes); + Blob match_blob(num, num_preds_per_class, 1, 1); + Dtype* match_data = match_blob.mutable_cpu_data(); + for (int i = 0; i < num; ++i) { + const map >& match_indices = all_match_indices[i]; + for (int p = 0; p < num_preds_per_class; ++p) { + // Get the label index. + int label = background_label_id; + for (map >::const_iterator it = + match_indices.begin(); it != match_indices.end(); ++it) { + const vector& match_index = it->second; + CHECK_EQ(match_index.size(), num_preds_per_class); + if (match_index[p] > -1) { + CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end()); + const vector& gt_bboxes = + all_gt_bboxes.find(i)->second; + CHECK_LT(match_index[p], gt_bboxes.size()); + label = gt_bboxes[match_index[p]].label(); + CHECK_GE(label, 0); + CHECK_NE(label, background_label_id); + CHECK_LT(label, num_classes); + // A prior can only be matched to one gt bbox. + break; + } + } + match_data[i * num_preds_per_class + p] = label; + } + } + // Get probability data. + const Dtype* conf_gpu_data = conf_blob.gpu_data(); + Blob prob_blob; + prob_blob.ReshapeLike(conf_blob); + if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) { + Dtype* prob_gpu_data = prob_blob.mutable_gpu_data(); + SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1, + prob_gpu_data); + conf_gpu_data = prob_blob.gpu_data(); + } + // Compute the loss. + Blob conf_loss_blob(num, num_preds_per_class, 1, 1); + Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data(); + const int num_threads = num * num_preds_per_class; + // NOLINT_NEXT_LINE(whitespace/operators) + ComputeConfLossKernel<<>>(num_threads, conf_gpu_data, num_preds_per_class, + num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data); + // Save the loss. + all_conf_loss->clear(); + const Dtype* loss_data = conf_loss_blob.cpu_data(); + for (int i = 0; i < num; ++i) { + vector conf_loss(loss_data, loss_data + num_preds_per_class); + all_conf_loss->push_back(conf_loss); + loss_data += num_preds_per_class; + } +} + +// Explicit initialization. +template void ComputeConfLossGPU(const Blob& conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + const vector > >& all_match_indices, + const map >& all_gt_bboxes, + vector >* all_conf_loss); +template void ComputeConfLossGPU(const Blob& conf_data, const int num, + const int num_preds_per_class, const int num_classes, + const int background_label_id, const ConfLossType loss_type, + const vector > >& all_match_indices, + const map >& all_gt_bboxes, + vector >* all_conf_loss); + +} // namespace caffe diff --git a/src/caffe/util/benchmark.cpp b/src/caffe/util/benchmark.cpp index 1d269c351c1..a9058ba3234 100644 --- a/src/caffe/util/benchmark.cpp +++ b/src/caffe/util/benchmark.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/common.hpp" diff --git a/src/caffe/util/blocking_queue.cpp b/src/caffe/util/blocking_queue.cpp index 058668fe28c..2231568a4a1 100644 --- a/src/caffe/util/blocking_queue.cpp +++ b/src/caffe/util/blocking_queue.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include @@ -88,9 +125,10 @@ size_t BlockingQueue::size() const { template class BlockingQueue*>; template class BlockingQueue*>; -template class BlockingQueue; +template class BlockingQueue; template class BlockingQueue >; template class BlockingQueue*>; template class BlockingQueue*>; +template class BlockingQueue; } // namespace caffe diff --git a/src/caffe/util/cpu_info.cpp b/src/caffe/util/cpu_info.cpp new file mode 100644 index 00000000000..ff535d819aa --- /dev/null +++ b/src/caffe/util/cpu_info.cpp @@ -0,0 +1,485 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include + +#include +#include +#include +#include + +#include "caffe/util/cpu_info.hpp" + +namespace caffe { +namespace cpu { + +Processor::Processor() { + processor = 0; + physicalId = 0; + siblings = 0; + coreId = 0; + cpuCores = 0; + speedMHz = 0; +} + +CpuInfo::CpuInfo() { + loadContentFromFile("/proc/cpuinfo"); +} + +CpuInfo::CpuInfo(const char *content) { + loadContent(content); +} + +void CpuInfo::loadContentFromFile(const char *fileName) { + std::ifstream file(fileName); + std::string content( + (std::istreambuf_iterator(file)), + (std::istreambuf_iterator())); + + loadContent(content.c_str()); +} + +void CpuInfo::loadContent(const char *content) { + size_t contentLength = strlen(content); + char *contentCopy = new char[contentLength + 1]; + snprintf(contentCopy, contentLength + 1, "%s", content); + + parseLines(contentCopy); + + fileContentBegin = contentCopy; + fileContentEnd = &contentCopy[contentLength]; + currentLine = NULL; +} + +CpuInfo::~CpuInfo() { + delete [] fileContentBegin; +} + +void CpuInfo::parseLines(char *content) { + for (; *content; content++) { + if (*content == '\n') { + *content = '\0'; + } + } +} + +const char *CpuInfo::getFirstLine() { + currentLine = fileContentBegin < fileContentEnd ? fileContentBegin : NULL; + return getNextLine(); +} + +const char *CpuInfo::getNextLine() { + if (!currentLine) { + return NULL; + } + + const char *savedCurrentLine = currentLine; + while (*(currentLine++)) { + } + + if (currentLine >= fileContentEnd) { + currentLine = NULL; + } + + return savedCurrentLine; +} + +Collection::Collection(CpuInfoInterface *cpuInfo) : cpuInfo(*cpuInfo) { + totalNumberOfSockets = 0; + totalNumberOfCpuCores = 0; + currentProcessor = NULL; + + processors.reserve(96); + + parseCpuInfo(); + collectBasicCpuInformation(); +} + +unsigned Collection::getProcessorSpeedMHz() { + return processors.size() ? processors[0].speedMHz : 0; +} + +unsigned Collection::getTotalNumberOfSockets() { + return totalNumberOfSockets; +} + +unsigned Collection::getTotalNumberOfCpuCores() { + return totalNumberOfCpuCores; +} + +unsigned Collection::getNumberOfProcessors() { + return processors.size(); +} + +const Processor &Collection::getProcessor(unsigned processorId) { + return processors[processorId]; +} + +void Collection::parseCpuInfo() { + const char *cpuInfoLine = cpuInfo.getFirstLine(); + for (; cpuInfoLine; cpuInfoLine = cpuInfo.getNextLine()) { + parseCpuInfoLine(cpuInfoLine); + } +} + +void Collection::parseCpuInfoLine(const char *cpuInfoLine) { + int delimiterPosition = strcspn(cpuInfoLine, ":"); + + if (cpuInfoLine[delimiterPosition] == '\0') { + currentProcessor = NULL; + } else { + parseValue(cpuInfoLine, &cpuInfoLine[delimiterPosition + 2]); + } +} + +void Collection::parseValue(const char *fieldName, const char *valueString) { + if (!currentProcessor) { + appendNewProcessor(); + } + + if (beginsWith(fieldName, "processor")) { + currentProcessor->processor = parseInteger(valueString); + } + + if (beginsWith(fieldName, "physical id")) { + currentProcessor->physicalId = parseInteger(valueString); + } + + if (beginsWith(fieldName, "siblings")) { + currentProcessor->siblings = parseInteger(valueString); + } + + if (beginsWith(fieldName, "core id")) { + currentProcessor->coreId = parseInteger(valueString); + } + + if (beginsWith(fieldName, "cpu cores")) { + currentProcessor->cpuCores = parseInteger(valueString); + } + + if (beginsWith(fieldName, "model name")) { + currentProcessor->speedMHz = extractSpeedFromModelName(valueString); + } +} + +void Collection::appendNewProcessor() { + processors.push_back(Processor()); + currentProcessor = &processors.back(); +} + +bool Collection::beginsWith(const char *lineBuffer, const char *text) const { + while (*text) { + if (*(lineBuffer++) != *(text++)) { + return false; + } + } + + return true; +} + +unsigned Collection::parseInteger(const char *text) const { + return atol(text); +} + +/* Function extracts CPU speed from model name. If unit is not set it is + assumed that values below 100 are specified in GHz, otherwise MHz */ +unsigned Collection::extractSpeedFromModelName(const char *text) const { + text = strstr(text, "@"); + if (!text) { + return 0; + } + + char *unit; + double speed = strtod(&text[1], &unit); + + while (isspace(*unit)) { + unit++; + } + + bool isMHz = !strncmp(unit, "MHz", 3); + bool isGHz = !strncmp(unit, "GHz", 3); + bool isGHzPossible = (speed < 100); + + if (isGHz || (isGHzPossible && !isMHz)) { + return 1000 * speed + 0.5; + } else { + return speed + 0.5; + } +} + +void Collection::collectBasicCpuInformation() { + std::set uniquePhysicalId; + std::vector::iterator processor = processors.begin(); + for (; processor != processors.end(); processor++) { + uniquePhysicalId.insert(processor->physicalId); + updateCpuInformation(*processor, uniquePhysicalId.size()); + } +} + +void Collection::updateCpuInformation(const Processor &processor, + unsigned numberOfUniquePhysicalId) { + if (totalNumberOfSockets == numberOfUniquePhysicalId) { + return; + } + + totalNumberOfSockets = numberOfUniquePhysicalId; + totalNumberOfCpuCores += processor.cpuCores; +} + +#ifdef _OPENMP + +/* The OpenMpManager class is responsible for determining a set of all of + available CPU cores and delegating each core to perform other tasks. The + first of available cores is delegated for background threads, while other + remaining cores are dedicated for OpenMP threads. Each OpenMP thread owns + one core for exclusive use. The number of OpenMP threads is then limited + to the number of available cores minus one. The amount of CPU cores may + be limited by system eg. when numactl was used. */ + +#include +#include + +static const char *openMpEnvVars[] = { + "OMP_CANCELLATION", "OMP_DISPLAY_ENV", "OMP_DEFAULT_DEVICE", "OMP_DYNAMIC", + "OMP_MAX_ACTIVE_LEVELS", "OMP_MAX_TASK_PRIORITY", "OMP_NESTED", + "OMP_NUM_THREADS", "OMP_PROC_BIND", "OMP_PLACES", "OMP_STACKSIZE", + "OMP_SCHEDULE", "OMP_THREAD_LIMIT", "OMP_WAIT_POLICY", "GOMP_CPU_AFFINITY", + "GOMP_DEBUG", "GOMP_STACKSIZE", "GOMP_SPINCOUNT", "GOMP_RTEMS_THREAD_POOLS", + "KMP_AFFINITY", "KMP_NUM_THREADS", "MIC_KMP_AFFINITY", + "MIC_OMP_NUM_THREADS", "MIC_OMP_PROC_BIND", "PHI_KMP_AFFINITY", + "PHI_OMP_NUM_THREADS", "PHI_KMP_PLACE_THREADS", "MKL_NUM_THREADS", + "MKL_DYNAMIC", "MKL_DOMAIN_NUM_THREADS" +}; + +static const unsigned numberOfOpenMpEnvVars = + sizeof(openMpEnvVars) / sizeof(openMpEnvVars[0]); + +OpenMpManager::OpenMpManager(Collection *collection) : + mainThreadId(boost::this_thread::get_id()), + collection(*collection) { + getOpenMpEnvVars(); + getCurrentCpuSet(); + getCurrentCoreSet(); +} + +OpenMpManager &OpenMpManager::getInstance() { + static CpuInfo cpuInfo; + static Collection collection(&cpuInfo); + static OpenMpManager openMpManager(&collection); + return openMpManager; +} + +void OpenMpManager::setGpuEnabled() { + OpenMpManager &openMpManager = getInstance(); + openMpManager.isGpuEnabled = true; +} + +void OpenMpManager::setGpuDisabled() { + OpenMpManager &openMpManager = getInstance(); + openMpManager.isGpuEnabled = false; +} + +bool OpenMpManager::isMajorThread(boost::thread::id currentThread) { + OpenMpManager &openMpManager = getInstance(); + return (boost::this_thread::get_id() == openMpManager.mainThreadId); +} + +// Ideally bind given thread to secondary logical core, if +// only one thread exists then bind to primary one +void OpenMpManager::bindCurrentThreadToNonPrimaryCoreIfPossible() { + OpenMpManager &openMpManager = getInstance(); + if (openMpManager.isThreadsBindAllowed()) { + int totalNumberOfAvailableCores = CPU_COUNT(&openMpManager.currentCoreSet); + int logicalCoreToBindTo = totalNumberOfAvailableCores > 1 ? 1 : 0; + openMpManager.bindCurrentThreadToLogicalCoreCpus(logicalCoreToBindTo); + } +} + +void OpenMpManager::bindOpenMpThreads() { + OpenMpManager &openMpManager = getInstance(); + + if (!openMpManager.isThreadsBindAllowed()) + return; + + openMpManager.setOpenMpThreadNumberLimit(); + #pragma omp parallel + { + unsigned logicalCoreId = omp_get_thread_num(); + openMpManager.bindCurrentThreadToLogicalCoreCpu(logicalCoreId); + } +} + +void OpenMpManager::getOpenMpEnvVars() { + isAnyOpenMpEnvVarSpecified = false; + for (unsigned i = 0; i < numberOfOpenMpEnvVars; i++) { + if (getenv(openMpEnvVars[i])) { + isAnyOpenMpEnvVarSpecified = true; + } + } +} + +void OpenMpManager::getCurrentCpuSet() { + if (sched_getaffinity(0, sizeof(currentCpuSet), ¤tCpuSet)) { + getDefaultCpuSet(¤tCpuSet); + } +} + +void OpenMpManager::getDefaultCpuSet(cpu_set_t *defaultCpuSet) { + CPU_ZERO(defaultCpuSet); + unsigned numberOfProcessors = collection.getNumberOfProcessors(); + for (int processorId = 0; processorId < numberOfProcessors; processorId++) { + CPU_SET(processorId, defaultCpuSet); + } +} + +/* Function getCurrentCoreSet() fills currentCoreSet variable with a set of + available CPUs, where only one CPU per core is chosen. When multiple CPUs + of single core are used, function is selecting only first one of all + available. */ + +void OpenMpManager::getCurrentCoreSet() { + unsigned numberOfProcessors = collection.getNumberOfProcessors(); + unsigned totalNumberOfCpuCores = collection.getTotalNumberOfCpuCores(); + + cpu_set_t usedCoreSet; + CPU_ZERO(&usedCoreSet); + CPU_ZERO(¤tCoreSet); + + for (int processorId = 0; processorId < numberOfProcessors; processorId++) { + if (CPU_ISSET(processorId, ¤tCpuSet)) { + unsigned coreId = processorId % totalNumberOfCpuCores; + if (!CPU_ISSET(coreId, &usedCoreSet)) { + CPU_SET(coreId, &usedCoreSet); + CPU_SET(processorId, ¤tCoreSet); + } + } + } +} + +void OpenMpManager::selectAllCoreCpus(cpu_set_t *set, unsigned physicalCoreId) { + unsigned numberOfProcessors = collection.getNumberOfProcessors(); + unsigned totalNumberOfCpuCores = collection.getTotalNumberOfCpuCores(); + + int processorId = physicalCoreId % totalNumberOfCpuCores; + while (processorId < numberOfProcessors) { + if (CPU_ISSET(processorId, ¤tCpuSet)) { + CPU_SET(processorId, set); + } + + processorId += totalNumberOfCpuCores; + } +} + +unsigned OpenMpManager::getPhysicalCoreId(unsigned logicalCoreId) { + unsigned numberOfProcessors = collection.getNumberOfProcessors(); + + for (int processorId = 0; processorId < numberOfProcessors; processorId++) { + if (CPU_ISSET(processorId, ¤tCoreSet)) { + if (!logicalCoreId--) { + return processorId; + } + } + } + + LOG(FATAL) << "This should never happen!"; + return 0; +} + +bool OpenMpManager::isThreadsBindAllowed() { + return !isAnyOpenMpEnvVarSpecified && !isGpuEnabled; +} + +// Limit of threads to number of logical cores available +void OpenMpManager::setOpenMpThreadNumberLimit() { + omp_set_num_threads(CPU_COUNT(¤tCoreSet)); +} + +void OpenMpManager::bindCurrentThreadToLogicalCoreCpu(unsigned logicalCoreId) { + unsigned physicalCoreId = getPhysicalCoreId(logicalCoreId); + + cpu_set_t set; + CPU_ZERO(&set); + CPU_SET(physicalCoreId, &set); + sched_setaffinity(0, sizeof(set), &set); +} + +void OpenMpManager::bindCurrentThreadToLogicalCoreCpus(unsigned logicalCoreId) { + unsigned physicalCoreId = getPhysicalCoreId(logicalCoreId); + + cpu_set_t set; + CPU_ZERO(&set); + selectAllCoreCpus(&set, physicalCoreId); + sched_setaffinity(0, sizeof(set), &set); +} + +void OpenMpManager::printVerboseInformation() { + OpenMpManager &openMpManager = getInstance(); + + LOG(INFO) << "Processor speed [MHz]: " + << openMpManager.collection.getProcessorSpeedMHz(); + + LOG(INFO) << "Total number of sockets: " + << openMpManager.collection.getTotalNumberOfSockets(); + + LOG(INFO) << "Total number of CPU cores: " + << openMpManager.collection.getTotalNumberOfCpuCores(); + + LOG(INFO) << "Total number of processors: " + << openMpManager.collection.getNumberOfProcessors(); + + LOG(INFO) << "GPU is used: " + << (openMpManager.isGpuEnabled ? "yes" : "no"); + + LOG(INFO) << "OpenMP environmental variables are specified: " + << (openMpManager.isAnyOpenMpEnvVarSpecified ? "yes" : "no"); + + LOG(INFO) << "OpenMP thread bind allowed: " + << (openMpManager.isThreadsBindAllowed() ? "yes" : "no"); + + LOG(INFO) << "Number of OpenMP threads: " + << omp_get_max_threads(); +} + +unsigned OpenMpManager::getProcessorSpeedMHz() { + OpenMpManager &openMpManager = getInstance(); + return openMpManager.collection.getProcessorSpeedMHz(); +} + +#endif // _OPENMP + +} // namespace cpu +} // namespace caffe diff --git a/src/caffe/util/cudnn.cpp b/src/caffe/util/cudnn.cpp index 1772f0099ce..c15ab5ac649 100644 --- a/src/caffe/util/cudnn.cpp +++ b/src/caffe/util/cudnn.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_CUDNN #include "caffe/util/cudnn.hpp" diff --git a/src/caffe/util/db.cpp b/src/caffe/util/db.cpp index 7f22509b56e..3ded12fffd3 100644 --- a/src/caffe/util/db.cpp +++ b/src/caffe/util/db.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include "caffe/util/db.hpp" #include "caffe/util/db_leveldb.hpp" #include "caffe/util/db_lmdb.hpp" diff --git a/src/caffe/util/db_leveldb.cpp b/src/caffe/util/db_leveldb.cpp index f5c4d8a660d..7dc6b310874 100644 --- a/src/caffe/util/db_leveldb.cpp +++ b/src/caffe/util/db_leveldb.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_LEVELDB #include "caffe/util/db_leveldb.hpp" diff --git a/src/caffe/util/db_lmdb.cpp b/src/caffe/util/db_lmdb.cpp index fb1d4956aa1..257ae9153d9 100644 --- a/src/caffe/util/db_lmdb.cpp +++ b/src/caffe/util/db_lmdb.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #ifdef USE_LMDB #include "caffe/util/db_lmdb.hpp" diff --git a/src/caffe/util/hdf5.cpp b/src/caffe/util/hdf5.cpp index 7730e76ab87..63de1dce1b0 100644 --- a/src/caffe/util/hdf5.cpp +++ b/src/caffe/util/hdf5.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include "caffe/util/hdf5.hpp" #include @@ -163,6 +200,42 @@ void hdf5_save_int(hid_t loc_id, const string& dataset_name, int i) { << "Failed to save int dataset with name " << dataset_name; } +template <> +float hdf5_load_float(hid_t loc_id, const string& dataset_name) { + float val; + herr_t status = H5LTread_dataset_float(loc_id, dataset_name.c_str(), &val); + CHECK_GE(status, 0) + << "Failed to load int dataset with name " << dataset_name; + return val; +} +template <> +double hdf5_load_float(hid_t loc_id, const string& dataset_name) { + double val; + herr_t status = H5LTread_dataset_double(loc_id, dataset_name.c_str(), &val); + CHECK_GE(status, 0) + << "Failed to load int dataset with name " << dataset_name; + return val; +} + +template <> +void hdf5_save_float(hid_t loc_id, + const string& dataset_name, float f) { + hsize_t one = 1; + herr_t status = \ + H5LTmake_dataset_float(loc_id, dataset_name.c_str(), 1, &one, &f); + CHECK_GE(status, 0) + << "Failed to save int dataset with name " << dataset_name; +} +template <> +void hdf5_save_float(hid_t loc_id, + const string& dataset_name, double f) { + hsize_t one = 1; + herr_t status = \ + H5LTmake_dataset_double(loc_id, dataset_name.c_str(), 1, &one, &f); + CHECK_GE(status, 0) + << "Failed to save int dataset with name " << dataset_name; +} + int hdf5_get_num_links(hid_t loc_id) { H5G_info_t info; herr_t status = H5Gget_info(loc_id, &info); diff --git a/src/caffe/util/im2col.cpp b/src/caffe/util/im2col.cpp old mode 100644 new mode 100755 index 114a86cb81e..43f07cbf877 --- a/src/caffe/util/im2col.cpp +++ b/src/caffe/util/im2col.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include "caffe/util/im2col.hpp" @@ -22,6 +59,7 @@ void im2col_cpu(const Dtype* data_im, const int channels, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_col) { +#if 0 const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + 2 * pad_w - @@ -52,6 +90,85 @@ void im2col_cpu(const Dtype* data_im, const int channels, } } } +#else + int dil_kernel_h = (kernel_h - 1) * dilation_h + 1; + int dil_kernel_w = (kernel_w - 1) * dilation_w + 1; + int height_col = (height + 2 * pad_h - dil_kernel_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - dil_kernel_w) / stride_w + 1; + int channels_col = channels * kernel_h * kernel_w; + #ifdef _OPENMP + #pragma omp parallel for + #endif + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % kernel_w; + int h_offset = (c / kernel_w) % kernel_h; + int c_im = c / kernel_h / kernel_w; + + const int hc0 = h_offset * dilation_h - pad_h; + const int wc0 = w_offset * dilation_w - pad_w; + for (int h = 0; h < height_col; ++h) { + int h_pad = h * stride_h + hc0; + + const int row_offset = (c * height_col + h) * width_col; + const int srow_offset = (c_im * height + h_pad) * width; + for (int w = 0; w < width_col; ++w) { + int w_pad = w * stride_w + wc0; + if ((((unsigned)h_pad) < ((unsigned)height)) && (((unsigned)w_pad) < ((unsigned)width))) + data_col[row_offset + w] = data_im[srow_offset + w_pad]; + else { + data_col[row_offset + w] = 0.; + } + } + } + } +#endif +} + +template +void im3d2col_cpu(const Dtype* data_im, const int channels, + const int depth, const int height, const int width, + const int kernel_d, const int kernel_h, const int kernel_w, + const int pad_d, const int pad_h, const int pad_w, + const int stride_d, const int stride_h, const int stride_w, + const int dilation_d, const int dilation_h, const int dilation_w, + Dtype* data_col) { + // LOG(ERROR) << "image size: " << depth << ", " << height << ", " << width; + // LOG(ERROR) << "kernel size: " << kernel_d << ", " << kernel_h << ", " << kernel_w; + + // Implicit dilated kernel size + long dil_kernel_h = (kernel_h - 1) * dilation_h + 1; + long dil_kernel_w = (kernel_w - 1) * dilation_w + 1; + long dil_kernel_d = (kernel_d - 1) * dilation_d + 1; + long height_col = (height + 2 * pad_h - dil_kernel_h) / stride_h + 1; + long width_col = (width + 2 * pad_w - dil_kernel_w) / stride_w + 1; + long depth_col = (depth + 2 * pad_d - dil_kernel_d) / stride_d + 1; + long channels_col = channels * kernel_h * kernel_w * kernel_d; + #ifdef _OPENMP + #pragma omp parallel for + #endif + for (long c = 0; c < channels_col; ++c) { + long w_offset = c % kernel_w; + long h_offset = (c / kernel_w) % kernel_h; + long d_offset = (c / kernel_w / kernel_h) % kernel_d; + long c_im = c / kernel_h / kernel_w / kernel_d; + for (int d = 0; d < depth_col; ++d) { + long d_pad = d * stride_d - pad_d + d_offset * dilation_d; + for (long h = 0; h < height_col; ++h) { + long h_pad = h * stride_h - pad_h + h_offset * dilation_h; + for (long w = 0; w < width_col; ++w) { + long w_pad = w * stride_w - pad_w + w_offset * dilation_w; + if (((unsigned long)h_pad < (unsigned long)height) && + ((unsigned long)w_pad < (unsigned long)width) && + ((unsigned long)d_pad < (unsigned long)depth)) { + data_col[((c * depth_col + d) * height_col + h) * width_col + w] = + data_im[((c_im * depth + d_pad) * height + h_pad) * width + w_pad]; + } else { + data_col[((c * depth_col + d) * height_col + h) * width_col + w] = 0.; + } + } + } + } + } } // Explicit instantiation @@ -65,6 +182,20 @@ template void im2col_cpu(const double* data_im, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, double* data_col); +template void im3d2col_cpu(const float* data_im, const int channels, + const int depth, const int height, const int width, + const int kernel_d, const int kernel_h, const int kernel_w, + const int pad_d, const int pad_h, const int pad_w, + const int stride_d, const int stride_h, const int stride_w, + const int dilation_d, const int dilation_h, const int dilation_w, + float* data_col); +template void im3d2col_cpu(const double* data_im, const int channels, + const int depth, const int height, const int width, + const int kernel_d, const int kernel_h, const int kernel_w, + const int pad_d, const int pad_h, const int pad_w, + const int stride_d, const int stride_h, const int stride_w, + const int dilation_d, const int dilation_h, const int dilation_w, + double* data_col); template inline void im2col_nd_core_cpu(const Dtype* data_input, const bool im2col, @@ -166,6 +297,7 @@ void col2im_cpu(const Dtype* data_col, const int channels, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_im) { +#if 0 caffe_set(height * width * channels, Dtype(0), data_im); const int output_h = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; @@ -194,6 +326,93 @@ void col2im_cpu(const Dtype* data_col, const int channels, } } } +#else + int dil_patch_h = (kernel_h - 1) * dilation_h + 1; + int dil_patch_w = (kernel_w - 1) * dilation_w + 1; + int height_col = (height + 2 * pad_h - dil_patch_h) / stride_h + 1; + int width_col = (width + 2 * pad_w - dil_patch_w) / stride_w + 1; + long chunk_len = kernel_h * kernel_w; + + caffe_set(height * width * channels, Dtype(0), data_im); + + #ifdef _OPENMP + #pragma omp parallel for if (channels > 1) + #endif + for (int idx = 0; idx < channels; ++idx) { + for (int inner_idx = 0; inner_idx < chunk_len; ++inner_idx) { + int c = idx * chunk_len + inner_idx; + int w_offset = c % kernel_w; + int h_offset = (c / kernel_w) % kernel_h; + int c_im = c / kernel_h / kernel_w; + + const int hc0 = h_offset * dilation_h - pad_h; + const int wc0 = w_offset * dilation_w - pad_w; + for (int h = 0; h < height_col; ++h) { + for (int w = 0; w < width_col; ++w) { + int h_pad = h * stride_h + hc0; + const int srow_offset = (c_im * height + h_pad) * width; + const int row_offset = (c * height_col + h) * width_col; + int w_pad = w * stride_w + wc0; + if ((((unsigned)h_pad) < ((unsigned)height)) && (((unsigned)w_pad) < ((unsigned)width))) { + data_im[srow_offset + w_pad] += data_col[row_offset + w]; + } + } + } + } + } +#endif +} + +template +void col2im3d_cpu(const Dtype* data_col, const int channels, + const int depth, const int height, const int width, + const int kernel_d, const int kernel_h, const int kernel_w, + const int pad_d, const int pad_h, const int pad_w, + const int stride_d, const int stride_h, const int stride_w, + const int dilation_d, const int dilation_h, const int dilation_w, + Dtype* data_im) { + // Implicit dilated patch + long dil_patch_h = (kernel_h - 1) * dilation_h + 1; + long dil_patch_w = (kernel_w - 1) * dilation_w + 1; + long dil_patch_d = (kernel_d - 1) * dilation_d + 1; + long height_col = (height + 2 * pad_h - dil_patch_h) / stride_h + 1; + long width_col = (width + 2 * pad_w - dil_patch_w) / stride_w + 1; + long depth_col = (depth + 2 * pad_d - dil_patch_d) / stride_d + 1; + long num_kernels = channels * height * width * depth; + long chunk_len = kernel_h * kernel_w * kernel_d; + + caffe_set(num_kernels, Dtype(0), data_im); + + #ifdef _OPENMP + #pragma omp parallel for if (channels > 1) + #endif + for (long c_im = 0; c_im < channels; ++c_im) { + for (long c = c_im * chunk_len; c < chunk_len * (c_im + 1); ++c) { + long w_offset = c % kernel_w; + long h_offset = (c / kernel_w) % kernel_h; + long d_offset = (c / kernel_w / kernel_h) % kernel_d; + + long dc0 = d_offset * dilation_d - pad_d; + long hc0 = h_offset * dilation_h - pad_h; + long wc0 = w_offset * dilation_w - pad_w; + for (long d = 0; d < depth_col; ++d) { + long d_pad = d * stride_d + dc0; + for (long h = 0; h < height_col; ++h) { + long h_pad = h * stride_h + hc0; + for (long w = 0; w < width_col; ++w) { + long w_pad = w * stride_w + wc0; + + if (((unsigned long)h_pad < (unsigned long)height) && + ((unsigned long)w_pad < (unsigned long)width) && + ((unsigned long)d_pad < (unsigned long)depth)) { + data_im[((c_im * depth + d_pad) * height + h_pad) * width + w_pad] += + data_col[((c * depth_col + d) * height_col + h) * width_col + w]; + } + } + } + } + } + } } // Explicit instantiation @@ -207,6 +426,20 @@ template void col2im_cpu(const double* data_col, const int channels, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, double* data_im); +template void col2im3d_cpu(const float* data_col, const int channels, + const int depth, const int height, const int width, + const int kernel_d, const int kernel_h, const int kernel_w, + const int pad_d, const int pad_h, const int pad_w, + const int stride_d, const int stride_h, const int stride_w, + const int dilation_d, const int dilation_h, const int dilation_w, + float* data_im); +template void col2im3d_cpu(const double* data_col, const int channels, + const int depth, const int height, const int width, + const int kernel_d, const int kernel_h, const int kernel_w, + const int pad_d, const int pad_h, const int pad_w, + const int stride_d, const int stride_h, const int stride_w, + const int dilation_d, const int dilation_h, const int dilation_w, + double* data_im); template void col2im_nd_cpu(const Dtype* data_col, const int num_spatial_axes, diff --git a/src/caffe/util/im_transforms.cpp b/src/caffe/util/im_transforms.cpp new file mode 100644 index 00000000000..1f20bd2c5cb --- /dev/null +++ b/src/caffe/util/im_transforms.cpp @@ -0,0 +1,770 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifdef USE_OPENCV +#include + +#if CV_VERSION_MAJOR == 3 +#include +#define CV_GRAY2BGR cv::COLOR_GRAY2BGR +#define CV_BGR2GRAY cv::COLOR_BGR2GRAY +#define CV_BGR2YCrCb cv::COLOR_BGR2YCrCb +#define CV_YCrCb2BGR cv::COLOR_YCrCb2BGR +#define CV_IMWRITE_JPEG_QUALITY cv::IMWRITE_JPEG_QUALITY +#define CV_LOAD_IMAGE_COLOR cv::IMREAD_COLOR +#define CV_THRESH_BINARY_INV cv::THRESH_BINARY_INV +#define CV_THRESH_OTSU cv::THRESH_OTSU +#endif +#endif // USE_OPENCV + +#include +#include +#include + +#include "caffe/util/im_transforms.hpp" +#include "caffe/util/math_functions.hpp" + +namespace caffe { + +const float prob_eps = 0.01; + +int roll_weighted_die(const vector& probabilities) { + vector cumulative; + std::partial_sum(&probabilities[0], &probabilities[0] + probabilities.size(), + std::back_inserter(cumulative)); + float val; + caffe_rng_uniform(1, static_cast(0), cumulative.back(), &val); + + // Find the position within the sequence and add 1 + return (std::lower_bound(cumulative.begin(), cumulative.end(), val) + - cumulative.begin()); +} + +void UpdateBBoxByResizePolicy(const ResizeParameter& param, + const int old_width, const int old_height, + NormalizedBBox* bbox) { + float new_height = param.height(); + float new_width = param.width(); + float orig_aspect = static_cast(old_width) / old_height; + float new_aspect = new_width / new_height; + + float x_min = bbox->xmin() * old_width; + float y_min = bbox->ymin() * old_height; + float x_max = bbox->xmax() * old_width; + float y_max = bbox->ymax() * old_height; + float padding; + switch (param.resize_mode()) { + case ResizeParameter_Resize_mode_WARP: + x_min = std::max(0.f, x_min * new_width / old_width); + x_max = std::min(new_width, x_max * new_width / old_width); + y_min = std::max(0.f, y_min * new_height / old_height); + y_max = std::min(new_height, y_max * new_height / old_height); + break; + case ResizeParameter_Resize_mode_FIT_LARGE_SIZE_AND_PAD: + if (orig_aspect > new_aspect) { + padding = (new_height - new_width / orig_aspect) / 2; + x_min = std::max(0.f, x_min * new_width / old_width); + x_max = std::min(new_width, x_max * new_width / old_width); + y_min = y_min * (new_height - 2 * padding) / old_height; + y_min = padding + std::max(0.f, y_min); + y_max = y_max * (new_height - 2 * padding) / old_height; + y_max = padding + std::min(new_height, y_max); + } else { + padding = (new_width - orig_aspect * new_height) / 2; + x_min = x_min * (new_width - 2 * padding) / old_width; + x_min = padding + std::max(0.f, x_min); + x_max = x_max * (new_width - 2 * padding) / old_width; + x_max = padding + std::min(new_width, x_max); + y_min = std::max(0.f, y_min * new_height / old_height); + y_max = std::min(new_height, y_max * new_height / old_height); + } + break; + case ResizeParameter_Resize_mode_FIT_SMALL_SIZE: + if (orig_aspect < new_aspect) { + new_height = new_width / orig_aspect; + } else { + new_width = orig_aspect * new_height; + } + x_min = std::max(0.f, x_min * new_width / old_width); + x_max = std::min(new_width, x_max * new_width / old_width); + y_min = std::max(0.f, y_min * new_height / old_height); + y_max = std::min(new_height, y_max * new_height / old_height); + break; + default: + LOG(FATAL) << "Unknown resize mode."; + } + bbox->set_xmin(x_min / new_width); + bbox->set_ymin(y_min / new_height); + bbox->set_xmax(x_max / new_width); + bbox->set_ymax(y_max / new_height); +} + +void InferNewSize(const ResizeParameter& resize_param, + const int old_width, const int old_height, + int* new_width, int* new_height) { + int height = resize_param.height(); + int width = resize_param.width(); + float orig_aspect = static_cast(old_width) / old_height; + float aspect = static_cast(width) / height; + + switch (resize_param.resize_mode()) { + case ResizeParameter_Resize_mode_WARP: + break; + case ResizeParameter_Resize_mode_FIT_LARGE_SIZE_AND_PAD: + break; + case ResizeParameter_Resize_mode_FIT_SMALL_SIZE: + if (orig_aspect < aspect) { + height = static_cast(width / orig_aspect); + } else { + width = static_cast(orig_aspect * height); + } + break; + default: + LOG(FATAL) << "Unknown resize mode."; + } + *new_height = height; + *new_width = width; +} + +#ifdef USE_OPENCV +template +bool is_border(const cv::Mat& edge, T color) { + cv::Mat im = edge.clone().reshape(0, 1); + bool res = true; + for (int i = 0; i < im.cols; ++i) { + res &= (color == im.at(0, i)); + } + return res; +} + +template +bool is_border(const cv::Mat& edge, uchar color); + +template +cv::Rect CropMask(const cv::Mat& src, T point, int padding) { + cv::Rect win(0, 0, src.cols, src.rows); + + vector edges; + edges.push_back(cv::Rect(0, 0, src.cols, 1)); + edges.push_back(cv::Rect(src.cols-2, 0, 1, src.rows)); + edges.push_back(cv::Rect(0, src.rows-2, src.cols, 1)); + edges.push_back(cv::Rect(0, 0, 1, src.rows)); + + cv::Mat edge; + int nborder = 0; + T color = src.at(0, 0); + for (int i = 0; i < edges.size(); ++i) { + edge = src(edges[i]); + nborder += is_border(edge, color); + } + + if (nborder < 4) { + return win; + } + + bool next; + do { + edge = src(cv::Rect(win.x, win.height - 2, win.width, 1)); + next = is_border(edge, color); + if (next) { + win.height--; + } + } while (next && (win.height > 0)); + + do { + edge = src(cv::Rect(win.width - 2, win.y, 1, win.height)); + next = is_border(edge, color); + if (next) { + win.width--; + } + } while (next && (win.width > 0)); + + do { + edge = src(cv::Rect(win.x, win.y, win.width, 1)); + next = is_border(edge, color); + if (next) { + win.y++; + win.height--; + } + } while (next && (win.y <= src.rows)); + + do { + edge = src(cv::Rect(win.x, win.y, 1, win.height)); + next = is_border(edge, color); + if (next) { + win.x++; + win.width--; + } + } while (next && (win.x <= src.cols)); + + // add padding + if (win.x > padding) { + win.x -= padding; + } + if (win.y > padding) { + win.y -= padding; + } + if ((win.width + win.x + padding) < src.cols) { + win.width += padding; + } + if ((win.height + win.y + padding) < src.rows) { + win.height += padding; + } + + return win; +} + +template +cv::Rect CropMask(const cv::Mat& src, uchar point, int padding); + +cv::Mat colorReduce(const cv::Mat& image, int div) { + cv::Mat out_img; + cv::Mat lookUpTable(1, 256, CV_8U); + uchar* p = lookUpTable.data; + const int div_2 = div / 2; + for ( int i = 0; i < 256; ++i ) { + p[i] = i / div * div + div_2; + } + cv::LUT(image, lookUpTable, out_img); + return out_img; +} + +void fillEdgeImage(const cv::Mat& edgesIn, cv::Mat* filledEdgesOut) { + cv::Mat edgesNeg = edgesIn.clone(); + cv::Scalar val(255, 255, 255); + cv::floodFill(edgesNeg, cv::Point(0, 0), val); + cv::floodFill(edgesNeg, cv::Point(edgesIn.cols - 1, edgesIn.rows - 1), val); + cv::floodFill(edgesNeg, cv::Point(0, edgesIn.rows - 1), val); + cv::floodFill(edgesNeg, cv::Point(edgesIn.cols - 1, 0), val); + cv::bitwise_not(edgesNeg, edgesNeg); + *filledEdgesOut = (edgesNeg | edgesIn); + return; +} + +void CenterObjectAndFillBg(const cv::Mat& in_img, const bool fill_bg, + cv::Mat* out_img) { + cv::Mat mask, crop_mask; + if (in_img.channels() > 1) { + cv::Mat in_img_gray; + cv::cvtColor(in_img, in_img_gray, CV_BGR2GRAY); + cv::threshold(in_img_gray, mask, 0, 255, + CV_THRESH_BINARY_INV | CV_THRESH_OTSU); + } else { + cv::threshold(in_img, mask, 0, 255, + CV_THRESH_BINARY_INV | CV_THRESH_OTSU); + } + cv::Rect crop_rect = CropMask(mask, mask.at(0, 0), 2); + + if (fill_bg) { + cv::Mat temp_img = in_img(crop_rect); + fillEdgeImage(mask, &mask); + crop_mask = mask(crop_rect).clone(); + *out_img = cv::Mat::zeros(crop_rect.size(), in_img.type()); + temp_img.copyTo(*out_img, crop_mask); + } else { + *out_img = in_img(crop_rect).clone(); + } +} + +cv::Mat AspectKeepingResizeAndPad(const cv::Mat& in_img, + const int new_width, const int new_height, + const int pad_type, const cv::Scalar pad_val, + const int interp_mode) { + cv::Mat img_resized; + float orig_aspect = static_cast(in_img.cols) / in_img.rows; + float new_aspect = static_cast(new_width) / new_height; + + if (orig_aspect > new_aspect) { + int height = floor(static_cast(new_width) / orig_aspect); + cv::resize(in_img, img_resized, cv::Size(new_width, height), 0, 0, + interp_mode); + cv::Size resSize = img_resized.size(); + int padding = floor((new_height - resSize.height) / 2.0); + cv::copyMakeBorder(img_resized, img_resized, padding, + new_height - resSize.height - padding, 0, 0, + pad_type, pad_val); + } else { + int width = floor(orig_aspect * new_height); + cv::resize(in_img, img_resized, cv::Size(width, new_height), 0, 0, + interp_mode); + cv::Size resSize = img_resized.size(); + int padding = floor((new_width - resSize.width) / 2.0); + cv::copyMakeBorder(img_resized, img_resized, 0, 0, padding, + new_width - resSize.width - padding, + pad_type, pad_val); + } + return img_resized; +} + +cv::Mat AspectKeepingResizeBySmall(const cv::Mat& in_img, + const int new_width, + const int new_height, + const int interp_mode) { + cv::Mat img_resized; + float orig_aspect = static_cast(in_img.cols) / in_img.rows; + float new_aspect = static_cast (new_width) / new_height; + + if (orig_aspect < new_aspect) { + int height = floor(static_cast(new_width) / orig_aspect); + cv::resize(in_img, img_resized, cv::Size(new_width, height), 0, 0, + interp_mode); + } else { + int width = floor(orig_aspect * new_height); + cv::resize(in_img, img_resized, cv::Size(width, new_height), 0, 0, + interp_mode); + } + return img_resized; +} + +void constantNoise(const int n, const vector& val, cv::Mat* image) { + const int cols = image->cols; + const int rows = image->rows; + + if (image->channels() == 1) { + for (int k = 0; k < n; ++k) { + const int i = caffe_rng_rand() % cols; + const int j = caffe_rng_rand() % rows; + uchar* ptr = image->ptr(j); + ptr[i]= val[0]; + } + } else if (image->channels() == 3) { // color image + for (int k = 0; k < n; ++k) { + const int i = caffe_rng_rand() % cols; + const int j = caffe_rng_rand() % rows; + cv::Vec3b* ptr = image->ptr(j); + (ptr[i])[0] = val[0]; + (ptr[i])[1] = val[1]; + (ptr[i])[2] = val[2]; + } + } +} + +cv::Mat ApplyResize(const cv::Mat& in_img, const ResizeParameter& param) { + cv::Mat out_img; + + // Reading parameters + const int new_height = param.height(); + const int new_width = param.width(); + + int pad_mode = cv::BORDER_CONSTANT; + switch (param.pad_mode()) { + case ResizeParameter_Pad_mode_CONSTANT: + break; + case ResizeParameter_Pad_mode_MIRRORED: + pad_mode = cv::BORDER_REFLECT101; + break; + case ResizeParameter_Pad_mode_REPEAT_NEAREST: + pad_mode = cv::BORDER_REPLICATE; + break; + default: + LOG(FATAL) << "Unknown pad mode."; + } + + int interp_mode = cv::INTER_LINEAR; + int num_interp_mode = param.interp_mode_size(); + if (num_interp_mode > 0) { + vector probs(num_interp_mode, 1.f / num_interp_mode); + int prob_num = roll_weighted_die(probs); + switch (param.interp_mode(prob_num)) { + case ResizeParameter_Interp_mode_AREA: + interp_mode = cv::INTER_AREA; + break; + case ResizeParameter_Interp_mode_CUBIC: + interp_mode = cv::INTER_CUBIC; + break; + case ResizeParameter_Interp_mode_LINEAR: + interp_mode = cv::INTER_LINEAR; + break; + case ResizeParameter_Interp_mode_NEAREST: + interp_mode = cv::INTER_NEAREST; + break; + case ResizeParameter_Interp_mode_LANCZOS4: + interp_mode = cv::INTER_LANCZOS4; + break; + default: + LOG(FATAL) << "Unknown interp mode."; + } + } + + cv::Scalar pad_val = cv::Scalar(0, 0, 0); + const int img_channels = in_img.channels(); + if (param.pad_value_size() > 0) { + CHECK(param.pad_value_size() == 1 || + param.pad_value_size() == img_channels) << + "Specify either 1 pad_value or as many as channels: " << img_channels; + vector pad_values; + for (int i = 0; i < param.pad_value_size(); ++i) { + pad_values.push_back(param.pad_value(i)); + } + if (img_channels > 1 && param.pad_value_size() == 1) { + // Replicate the pad_value for simplicity + for (int c = 1; c < img_channels; ++c) { + pad_values.push_back(pad_values[0]); + } + } + pad_val = cv::Scalar(pad_values[0], pad_values[1], pad_values[2]); + } + + switch (param.resize_mode()) { + case ResizeParameter_Resize_mode_WARP: + cv::resize(in_img, out_img, cv::Size(new_width, new_height), 0, 0, + interp_mode); + break; + case ResizeParameter_Resize_mode_FIT_LARGE_SIZE_AND_PAD: + out_img = AspectKeepingResizeAndPad(in_img, new_width, new_height, + pad_mode, pad_val, interp_mode); + break; + case ResizeParameter_Resize_mode_FIT_SMALL_SIZE: + out_img = AspectKeepingResizeBySmall(in_img, new_width, new_height, + interp_mode); + break; + default: + LOG(INFO) << "Unknown resize mode."; + } + return out_img; +} + +cv::Mat ApplyNoise(const cv::Mat& in_img, const NoiseParameter& param) { + cv::Mat out_img; + + if (param.decolorize()) { + cv::Mat grayscale_img; + cv::cvtColor(in_img, grayscale_img, CV_BGR2GRAY); + cv::cvtColor(grayscale_img, out_img, CV_GRAY2BGR); + } else { + out_img = in_img; + } + + if (param.gauss_blur()) { + cv::GaussianBlur(out_img, out_img, cv::Size(7, 7), 1.5); + } + + if (param.hist_eq()) { + if (out_img.channels() > 1) { + cv::Mat ycrcb_image; + cv::cvtColor(out_img, ycrcb_image, CV_BGR2YCrCb); + // Extract the L channel + vector ycrcb_planes(3); + cv::split(ycrcb_image, ycrcb_planes); + // now we have the L image in ycrcb_planes[0] + cv::Mat dst; + cv::equalizeHist(ycrcb_planes[0], dst); + ycrcb_planes[0] = dst; + cv::merge(ycrcb_planes, ycrcb_image); + // convert back to RGB + cv::cvtColor(ycrcb_image, out_img, CV_YCrCb2BGR); + } else { + cv::Mat temp_img; + cv::equalizeHist(out_img, temp_img); + out_img = temp_img; + } + } + + if (param.clahe()) { + cv::Ptr clahe = cv::createCLAHE(); + clahe->setClipLimit(4); + if (out_img.channels() > 1) { + cv::Mat ycrcb_image; + cv::cvtColor(out_img, ycrcb_image, CV_BGR2YCrCb); + // Extract the L channel + vector ycrcb_planes(3); + cv::split(ycrcb_image, ycrcb_planes); + // now we have the L image in ycrcb_planes[0] + cv::Mat dst; + clahe->apply(ycrcb_planes[0], dst); + ycrcb_planes[0] = dst; + cv::merge(ycrcb_planes, ycrcb_image); + // convert back to RGB + cv::cvtColor(ycrcb_image, out_img, CV_YCrCb2BGR); + } else { + cv::Ptr clahe = cv::createCLAHE(); + clahe->setClipLimit(4); + cv::Mat temp_img; + clahe->apply(out_img, temp_img); + out_img = temp_img; + } + } + + if (param.jpeg() > 0) { + vector buf; + vector params; + params.push_back(CV_IMWRITE_JPEG_QUALITY); + params.push_back(param.jpeg()); + cv::imencode(".jpg", out_img, buf, params); + out_img = cv::imdecode(buf, CV_LOAD_IMAGE_COLOR); + } + + if (param.erode()) { + cv::Mat element = cv::getStructuringElement( + 2, cv::Size(3, 3), cv::Point(1, 1)); + cv::erode(out_img, out_img, element); + } + + if (param.posterize()) { + cv::Mat tmp_img; + tmp_img = colorReduce(out_img); + out_img = tmp_img; + } + + if (param.inverse()) { + cv::Mat tmp_img; + cv::bitwise_not(out_img, tmp_img); + out_img = tmp_img; + } + + vector noise_values; + if (param.saltpepper_param().value_size() > 0) { + CHECK(param.saltpepper_param().value_size() == 1 + || param.saltpepper_param().value_size() == out_img.channels()) + << "Specify either 1 pad_value or as many as channels: " + << out_img.channels(); + + for (int i = 0; i < param.saltpepper_param().value_size(); i++) { + noise_values.push_back(uchar(param.saltpepper_param().value(i))); + } + if (out_img.channels() > 1 + && param.saltpepper_param().value_size() == 1) { + // Replicate the pad_value for simplicity + for (int c = 1; c < out_img.channels(); ++c) { + noise_values.push_back(uchar(noise_values[0])); + } + } + } + if (param.saltpepper()) { + const int noise_pixels_num = + floor(param.saltpepper_param().fraction() + * out_img.cols * out_img.rows); + constantNoise(noise_pixels_num, noise_values, &out_img); + } + + if (param.convert_to_hsv()) { + cv::Mat hsv_image; + cv::cvtColor(out_img, hsv_image, CV_BGR2HSV); + out_img = hsv_image; + } + if (param.convert_to_lab()) { + cv::Mat lab_image; + out_img.convertTo(lab_image, CV_32F); + lab_image *= 1.0 / 255; + cv::cvtColor(lab_image, out_img, CV_BGR2Lab); + } + return out_img; +} + +void RandomBrightness(const cv::Mat& in_img, cv::Mat* out_img, + const float brightness_prob, const float brightness_delta) { + float prob; + caffe_rng_uniform(1, 0.f, 1.f, &prob); + if (prob < brightness_prob) { + CHECK_GE(brightness_delta, 0) << "brightness_delta must be non-negative."; + float delta; + caffe_rng_uniform(1, -brightness_delta, brightness_delta, &delta); + AdjustBrightness(in_img, delta, out_img); + } else { + *out_img = in_img; + } +} + +void AdjustBrightness(const cv::Mat& in_img, const float delta, + cv::Mat* out_img) { + if (fabs(delta) > 0) { + in_img.convertTo(*out_img, -1, 1, delta); + } else { + *out_img = in_img; + } +} + +void RandomContrast(const cv::Mat& in_img, cv::Mat* out_img, + const float contrast_prob, const float lower, const float upper) { + float prob; + caffe_rng_uniform(1, 0.f, 1.f, &prob); + if (prob < contrast_prob) { + CHECK_GE(upper, lower) << "contrast upper must be >= lower."; + CHECK_GE(lower, 0) << "contrast lower must be non-negative."; + float delta; + caffe_rng_uniform(1, lower, upper, &delta); + AdjustContrast(in_img, delta, out_img); + } else { + *out_img = in_img; + } +} + +void AdjustContrast(const cv::Mat& in_img, const float delta, + cv::Mat* out_img) { + if (fabs(delta - 1.f) > 1e-3) { + in_img.convertTo(*out_img, -1, delta, 0); + } else { + *out_img = in_img; + } +} + +void RandomSaturation(const cv::Mat& in_img, cv::Mat* out_img, + const float saturation_prob, const float lower, const float upper) { + float prob; + caffe_rng_uniform(1, 0.f, 1.f, &prob); + if (prob < saturation_prob) { + CHECK_GE(upper, lower) << "saturation upper must be >= lower."; + CHECK_GE(lower, 0) << "saturation lower must be non-negative."; + float delta; + caffe_rng_uniform(1, lower, upper, &delta); + AdjustSaturation(in_img, delta, out_img); + } else { + *out_img = in_img; + } +} + +void AdjustSaturation(const cv::Mat& in_img, const float delta, + cv::Mat* out_img) { + if (fabs(delta - 1.f) != 1e-3) { + // Convert to HSV colorspae. + cv::cvtColor(in_img, *out_img, CV_BGR2HSV); + + // Split the image to 3 channels. + vector channels; + cv::split(*out_img, channels); + + // Adjust the saturation. + channels[1].convertTo(channels[1], -1, delta, 0); + cv::merge(channels, *out_img); + + // Back to BGR colorspace. + cvtColor(*out_img, *out_img, CV_HSV2BGR); + } else { + *out_img = in_img; + } +} + +void RandomHue(const cv::Mat& in_img, cv::Mat* out_img, + const float hue_prob, const float hue_delta) { + float prob; + caffe_rng_uniform(1, 0.f, 1.f, &prob); + if (prob < hue_prob) { + CHECK_GE(hue_delta, 0) << "hue_delta must be non-negative."; + float delta; + caffe_rng_uniform(1, -hue_delta, hue_delta, &delta); + AdjustHue(in_img, delta, out_img); + } else { + *out_img = in_img; + } +} + +void AdjustHue(const cv::Mat& in_img, const float delta, cv::Mat* out_img) { + if (fabs(delta) > 0) { + // Convert to HSV colorspae. + cv::cvtColor(in_img, *out_img, CV_BGR2HSV); + + // Split the image to 3 channels. + vector channels; + cv::split(*out_img, channels); + + // Adjust the hue. + channels[0].convertTo(channels[0], -1, 1, delta); + cv::merge(channels, *out_img); + + // Back to BGR colorspace. + cvtColor(*out_img, *out_img, CV_HSV2BGR); + } else { + *out_img = in_img; + } +} + +void RandomOrderChannels(const cv::Mat& in_img, cv::Mat* out_img, + const float random_order_prob) { + float prob; + caffe_rng_uniform(1, 0.f, 1.f, &prob); + if (prob < random_order_prob) { + // Split the image to 3 channels. + vector channels; + cv::split(*out_img, channels); + CHECK_EQ(channels.size(), 3); + + // Shuffle the channels. + std::random_shuffle(channels.begin(), channels.end()); + cv::merge(channels, *out_img); + } else { + *out_img = in_img; + } +} + +cv::Mat ApplyDistort(const cv::Mat& in_img, const DistortionParameter& param) { + cv::Mat out_img = in_img; + float prob; + caffe_rng_uniform(1, 0.f, 1.f, &prob); + + if (prob > 0.5) { + // Do random brightness distortion. + RandomBrightness(out_img, &out_img, param.brightness_prob(), + param.brightness_delta()); + + // Do random contrast distortion. + RandomContrast(out_img, &out_img, param.contrast_prob(), + param.contrast_lower(), param.contrast_upper()); + + // Do random saturation distortion. + RandomSaturation(out_img, &out_img, param.saturation_prob(), + param.saturation_lower(), param.saturation_upper()); + + // Do random hue distortion. + RandomHue(out_img, &out_img, param.hue_prob(), param.hue_delta()); + + // Do random reordering of the channels. + RandomOrderChannels(out_img, &out_img, param.random_order_prob()); + } else { + // Do random brightness distortion. + RandomBrightness(out_img, &out_img, param.brightness_prob(), + param.brightness_delta()); + + // Do random saturation distortion. + RandomSaturation(out_img, &out_img, param.saturation_prob(), + param.saturation_lower(), param.saturation_upper()); + + // Do random hue distortion. + RandomHue(out_img, &out_img, param.hue_prob(), param.hue_delta()); + + // Do random contrast distortion. + RandomContrast(out_img, &out_img, param.contrast_prob(), + param.contrast_lower(), param.contrast_upper()); + + // Do random reordering of the channels. + RandomOrderChannels(out_img, &out_img, param.random_order_prob()); + } + + return out_img; +} +#endif // USE_OPENCV + +} // namespace caffe diff --git a/src/caffe/util/insert_splits.cpp b/src/caffe/util/insert_splits.cpp index 7a899c69787..63556b49d90 100644 --- a/src/caffe/util/insert_splits.cpp +++ b/src/caffe/util/insert_splits.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include diff --git a/src/caffe/util/io.cpp b/src/caffe/util/io.cpp index 835d2d4e4ff..e172923bf65 100644 --- a/src/caffe/util/io.cpp +++ b/src/caffe/util/io.cpp @@ -1,3 +1,47 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include +#include #include #include #include @@ -12,6 +56,7 @@ #include #include // NOLINT(readability/streams) +#include #include #include @@ -23,6 +68,7 @@ const int kProtoReadBytesLimit = INT_MAX; // Max size of 2 GB minus 1 byte. namespace caffe { +using namespace boost::property_tree; // NOLINT(build/namespaces) using google::protobuf::io::FileInputStream; using google::protobuf::io::FileOutputStream; using google::protobuf::io::ZeroCopyInputStream; @@ -105,7 +151,7 @@ cv::Mat ReadImageToCVMat(const string& filename) { // Do the file extension and encoding match? static bool matchExt(const std::string & fn, std::string en) { - size_t p = fn.rfind('.'); + size_t p = fn.rfind('.') + 1; std::string ext = p != fn.npos ? fn.substr(p) : fn; std::transform(ext.begin(), ext.end(), ext.begin(), ::tolower); std::transform(en.begin(), en.end(), en.begin(), ::tolower); @@ -119,27 +165,117 @@ static bool matchExt(const std::string & fn, bool ReadImageToDatum(const string& filename, const int label, const int height, const int width, const bool is_color, const std::string & encoding, Datum* datum) { - cv::Mat cv_img = ReadImageToCVMat(filename, height, width, is_color); - if (cv_img.data) { - if (encoding.size()) { - if ( (cv_img.channels() == 3) == is_color && !height && !width && - matchExt(filename, encoding) ) - return ReadFileToDatum(filename, label, datum); - std::vector buf; - cv::imencode("."+encoding, cv_img, buf); - datum->set_data(std::string(reinterpret_cast(&buf[0]), - buf.size())); + if (!encoding.size()) { + cv::Mat cv_img = ReadImageToCVMat(filename, height, width, is_color); + if (cv_img.data) { + CVMatToDatum(cv_img, datum); datum->set_label(label); - datum->set_encoded(true); return true; + } else { + return false; } - CVMatToDatum(cv_img, datum); + } else { + cv::Mat cv_img = cv::imread(filename, -1); + if (!cv_img.data) { + LOG(ERROR) << "Could not open or find file " << filename; + return false; + } + + bool is_img_grayscale = cv_img.channels() == 1; + bool is_img_bgr = cv_img.channels() == 3; + bool is_img_bgra = cv_img.channels() == 4; + + if ( !(is_img_grayscale || is_img_bgr || is_img_bgra) ) { + LOG(ERROR) << "Images with " << cv_img.channels() << + " channels unsupported: " << filename; + return false; + } + + bool need_convert = is_img_bgra || (is_img_grayscale == is_color); + bool need_resize = height > 0 && width > 0; + + if (!need_convert && !need_resize && matchExt(filename, encoding)) { + datum->set_channels(cv_img.channels()); + datum->set_height(cv_img.rows); + datum->set_width(cv_img.cols); + return ReadFileToDatum(filename, label, datum); + } + + if (need_resize) + cv::resize(cv_img, cv_img, cv::Size(width, height)); + + if (need_convert) { + int conv_code = + (is_img_grayscale && is_color) ? cv::COLOR_GRAY2BGR + : (is_img_bgr && !is_color) ? cv::COLOR_BGR2GRAY + : (is_img_bgra && is_color) ? cv::COLOR_BGRA2BGR + : (is_img_bgra && !is_color) ? cv::COLOR_BGRA2GRAY + : -1; + + cv::cvtColor(cv_img, cv_img, conv_code); + } + + std::vector buf; + cv::imencode("."+encoding, cv_img, buf); + datum->set_data(std::string(reinterpret_cast(&buf[0]), + buf.size())); datum->set_label(label); + datum->set_encoded(true); return true; - } else { - return false; } } + +void GetImageSize(const string& filename, int* height, int* width) { + cv::Mat cv_img = cv::imread(filename); + if (!cv_img.data) { + LOG(ERROR) << "Could not open or find file " << filename; + return; + } + *height = cv_img.rows; + *width = cv_img.cols; +} + +bool ReadRichImageToAnnotatedDatum(const string& filename, + const string& labelfile, const int height, const int width, + const int min_dim, const int max_dim, const bool is_color, + const string& encoding, const AnnotatedDatum_AnnotationType type, + const string& labeltype, const std::map& name_to_label, + AnnotatedDatum* anno_datum) { + // Read image to datum. + bool status = ReadImageToDatum(filename, -1, height, width, + is_color, encoding, + anno_datum->mutable_datum()); + if (status == false) { + return status; + } + anno_datum->clear_annotation_group(); + if (!boost::filesystem::exists(labelfile)) { + return true; + } + switch (type) { + case AnnotatedDatum_AnnotationType_BBOX: + int ori_height, ori_width; + GetImageSize(filename, &ori_height, &ori_width); + if (labeltype == "xml") { + return ReadXMLToAnnotatedDatum(labelfile, ori_height, ori_width, + name_to_label, anno_datum); + } else if (labeltype == "json") { + return ReadJSONToAnnotatedDatum(labelfile, ori_height, ori_width, + name_to_label, anno_datum); + } else if (labeltype == "txt") { + return ReadTxtToAnnotatedDatum(labelfile, ori_height, ori_width, + anno_datum); + } else { + LOG(FATAL) << "Unknown label file type."; + return false; + } + break; + default: + LOG(FATAL) << "Unknown annotation type."; + return false; + } +} + #endif // USE_OPENCV bool ReadFileToDatum(const string& filename, const int label, @@ -162,6 +298,399 @@ bool ReadFileToDatum(const string& filename, const int label, } } +// Parse VOC/ILSVRC detection annotation. +bool ReadXMLToAnnotatedDatum(const string& labelfile, const int img_height, + const int img_width, const std::map& name_to_label, + AnnotatedDatum* anno_datum) { + ptree pt; + read_xml(labelfile, pt); + + // Parse annotation. + int width = 0, height = 0; + try { + height = pt.get("annotation.size.height"); + width = pt.get("annotation.size.width"); + } catch (const ptree_error &e) { + LOG(WARNING) << "When parsing " << labelfile << ": " << e.what(); + height = img_height; + width = img_width; + } + LOG_IF(WARNING, height != img_height) << labelfile << + " inconsistent image height."; + LOG_IF(WARNING, width != img_width) << labelfile << + " inconsistent image width."; + CHECK(width != 0 && height != 0) << labelfile << + " no valid image width/height."; + int instance_id = 0; + BOOST_FOREACH(ptree::value_type &v1, pt.get_child("annotation")) { + ptree pt1 = v1.second; + if (v1.first == "object") { + Annotation* anno = NULL; + bool difficult = false; + ptree object = v1.second; + BOOST_FOREACH(ptree::value_type &v2, object.get_child("")) { + ptree pt2 = v2.second; + if (v2.first == "name") { + string name = pt2.data(); + if (name_to_label.find(name) == name_to_label.end()) { + LOG(FATAL) << "Unknown name: " << name; + } + int label = name_to_label.find(name)->second; + bool found_group = false; + for (int g = 0; g < anno_datum->annotation_group_size(); ++g) { + AnnotationGroup* anno_group = + anno_datum->mutable_annotation_group(g); + if (label == anno_group->group_label()) { + if (anno_group->annotation_size() == 0) { + instance_id = 0; + } else { + instance_id = anno_group->annotation( + anno_group->annotation_size() - 1).instance_id() + 1; + } + anno = anno_group->add_annotation(); + found_group = true; + } + } + if (!found_group) { + // If there is no such annotation_group, create a new one. + AnnotationGroup* anno_group = anno_datum->add_annotation_group(); + anno_group->set_group_label(label); + anno = anno_group->add_annotation(); + instance_id = 0; + } + anno->set_instance_id(instance_id++); + } else if (v2.first == "difficult") { + difficult = pt2.data() == "1"; + } else if (v2.first == "bndbox") { + int xmin = pt2.get("xmin", 0); + int ymin = pt2.get("ymin", 0); + int xmax = pt2.get("xmax", 0); + int ymax = pt2.get("ymax", 0); + CHECK_NOTNULL(anno); + LOG_IF(WARNING, xmin > width) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymin > height) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmax > width) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymax > height) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmin < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymin < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmax < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymax < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmin > xmax) << labelfile << + " bounding box irregular."; + LOG_IF(WARNING, ymin > ymax) << labelfile << + " bounding box irregular."; + // Store the normalized bounding box. + NormalizedBBox* bbox = anno->mutable_bbox(); + CHECK_NOTNULL(bbox); + bbox->set_xmin(static_cast(xmin) / width); + bbox->set_ymin(static_cast(ymin) / height); + bbox->set_xmax(static_cast(xmax) / width); + bbox->set_ymax(static_cast(ymax) / height); + bbox->set_difficult(difficult); + } + } + } + } + return true; +} + +// Parse MSCOCO detection annotation. +bool ReadJSONToAnnotatedDatum(const string& labelfile, const int img_height, + const int img_width, const std::map& name_to_label, + AnnotatedDatum* anno_datum) { + ptree pt; + read_json(labelfile, pt); + + // Get image info. + int width = 0, height = 0; + try { + height = pt.get("image.height"); + width = pt.get("image.width"); + } catch (const ptree_error &e) { + LOG(WARNING) << "When parsing " << labelfile << ": " << e.what(); + height = img_height; + width = img_width; + } + LOG_IF(WARNING, height != img_height) << labelfile << + " inconsistent image height."; + LOG_IF(WARNING, width != img_width) << labelfile << + " inconsistent image width."; + CHECK(width != 0 && height != 0) << labelfile << + " no valid image width/height."; + + // Get annotation info. + int instance_id = 0; + BOOST_FOREACH(ptree::value_type& v1, pt.get_child("annotation")) { + Annotation* anno = NULL; + bool iscrowd = false; + ptree object = v1.second; + // Get category_id. + string name = object.get("category_id"); + if (name_to_label.find(name) == name_to_label.end()) { + LOG(FATAL) << "Unknown name: " << name; + } + int label = name_to_label.find(name)->second; + bool found_group = false; + for (int g = 0; g < anno_datum->annotation_group_size(); ++g) { + AnnotationGroup* anno_group = + anno_datum->mutable_annotation_group(g); + if (label == anno_group->group_label()) { + if (anno_group->annotation_size() == 0) { + instance_id = 0; + } else { + instance_id = anno_group->annotation( + anno_group->annotation_size() - 1).instance_id() + 1; + } + anno = anno_group->add_annotation(); + found_group = true; + } + } + if (!found_group) { + // If there is no such annotation_group, create a new one. + AnnotationGroup* anno_group = anno_datum->add_annotation_group(); + anno_group->set_group_label(label); + anno = anno_group->add_annotation(); + instance_id = 0; + } + anno->set_instance_id(instance_id++); + + // Get iscrowd. + iscrowd = object.get("iscrowd", 0); + + // Get bbox. + vector bbox_items; + BOOST_FOREACH(ptree::value_type& v2, object.get_child("bbox")) { + bbox_items.push_back(v2.second.get_value()); + } + CHECK_EQ(bbox_items.size(), 4); + float xmin = bbox_items[0]; + float ymin = bbox_items[1]; + float xmax = bbox_items[0] + bbox_items[2]; + float ymax = bbox_items[1] + bbox_items[3]; + CHECK_NOTNULL(anno); + LOG_IF(WARNING, xmin > width) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymin > height) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmax > width) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymax > height) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmin < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymin < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmax < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymax < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmin > xmax) << labelfile << + " bounding box irregular."; + LOG_IF(WARNING, ymin > ymax) << labelfile << + " bounding box irregular."; + // Store the normalized bounding box. + NormalizedBBox* bbox = anno->mutable_bbox(); + bbox->set_xmin(xmin / width); + bbox->set_ymin(ymin / height); + bbox->set_xmax(xmax / width); + bbox->set_ymax(ymax / height); + bbox->set_difficult(iscrowd); + } + return true; +} + +// Parse plain txt detection annotation: label_id, xmin, ymin, xmax, ymax. +bool ReadTxtToAnnotatedDatum(const string& labelfile, const int height, + const int width, AnnotatedDatum* anno_datum) { + std::ifstream infile(labelfile.c_str()); + if (!infile.good()) { + LOG(INFO) << "Cannot open " << labelfile; + return false; + } + int label; + float xmin, ymin, xmax, ymax; + while (infile >> label >> xmin >> ymin >> xmax >> ymax) { + Annotation* anno = NULL; + int instance_id = 0; + bool found_group = false; + for (int g = 0; g < anno_datum->annotation_group_size(); ++g) { + AnnotationGroup* anno_group = anno_datum->mutable_annotation_group(g); + if (label == anno_group->group_label()) { + if (anno_group->annotation_size() == 0) { + instance_id = 0; + } else { + instance_id = anno_group->annotation( + anno_group->annotation_size() - 1).instance_id() + 1; + } + anno = anno_group->add_annotation(); + found_group = true; + } + } + if (!found_group) { + // If there is no such annotation_group, create a new one. + AnnotationGroup* anno_group = anno_datum->add_annotation_group(); + anno_group->set_group_label(label); + anno = anno_group->add_annotation(); + instance_id = 0; + } + anno->set_instance_id(instance_id++); + LOG_IF(WARNING, xmin > width) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymin > height) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmax > width) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymax > height) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmin < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymin < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmax < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, ymax < 0) << labelfile << + " bounding box exceeds image boundary."; + LOG_IF(WARNING, xmin > xmax) << labelfile << + " bounding box irregular."; + LOG_IF(WARNING, ymin > ymax) << labelfile << + " bounding box irregular."; + // Store the normalized bounding box. + NormalizedBBox* bbox = anno->mutable_bbox(); + bbox->set_xmin(xmin / width); + bbox->set_ymin(ymin / height); + bbox->set_xmax(xmax / width); + bbox->set_ymax(ymax / height); + bbox->set_difficult(false); + } + return true; +} + +bool ReadLabelFileToLabelMap(const string& filename, bool include_background, + const string& delimiter, LabelMap* map) { + // cleanup + map->Clear(); + + std::ifstream file(filename.c_str()); + string line; + // Every line can have [1, 3] number of fields. + // The delimiter between fields can be one of " :;". + // The order of the fields are: + // name [label] [display_name] + // ... + int field_size = -1; + int label = 0; + LabelMapItem* map_item; + // Add background (none_of_the_above) class. + if (include_background) { + map_item = map->add_item(); + map_item->set_name("none_of_the_above"); + map_item->set_label(label++); + map_item->set_display_name("background"); + } + while (std::getline(file, line)) { + vector fields; + fields.clear(); + boost::split(fields, line, boost::is_any_of(delimiter)); + if (field_size == -1) { + field_size = fields.size(); + } else { + CHECK_EQ(field_size, fields.size()) + << "Inconsistent number of fields per line."; + } + map_item = map->add_item(); + map_item->set_name(fields[0]); + switch (field_size) { + case 1: + map_item->set_label(label++); + map_item->set_display_name(fields[0]); + break; + case 2: + label = std::atoi(fields[1].c_str()); + map_item->set_label(label); + map_item->set_display_name(fields[0]); + break; + case 3: + label = std::atoi(fields[1].c_str()); + map_item->set_label(label); + map_item->set_display_name(fields[2]); + break; + default: + LOG(FATAL) << "The number of fields should be [1, 3]."; + break; + } + } + return true; +} + +bool MapNameToLabel(const LabelMap& map, const bool strict_check, + std::map* name_to_label) { + // cleanup + name_to_label->clear(); + + for (int i = 0; i < map.item_size(); ++i) { + const string& name = map.item(i).name(); + const int label = map.item(i).label(); + if (strict_check) { + if (!name_to_label->insert(std::make_pair(name, label)).second) { + LOG(FATAL) << "There are many duplicates of name: " << name; + return false; + } + } else { + (*name_to_label)[name] = label; + } + } + return true; +} + +bool MapLabelToName(const LabelMap& map, const bool strict_check, + std::map* label_to_name) { + // cleanup + label_to_name->clear(); + + for (int i = 0; i < map.item_size(); ++i) { + const string& name = map.item(i).name(); + const int label = map.item(i).label(); + if (strict_check) { + if (!label_to_name->insert(std::make_pair(label, name)).second) { + LOG(FATAL) << "There are many duplicates of label: " << label; + return false; + } + } else { + (*label_to_name)[label] = name; + } + } + return true; +} + +bool MapLabelToDisplayName(const LabelMap& map, const bool strict_check, + std::map* label_to_display_name) { + // cleanup + label_to_display_name->clear(); + + for (int i = 0; i < map.item_size(); ++i) { + const string& display_name = map.item(i).display_name(); + const int label = map.item(i).label(); + if (strict_check) { + if (!label_to_display_name->insert( + std::make_pair(label, display_name)).second) { + LOG(FATAL) << "There are many duplicates of label: " << label; + return false; + } + } else { + (*label_to_display_name)[label] = display_name; + } + } + return true; +} + #ifdef USE_OPENCV cv::Mat DecodeDatumToCVMatNative(const Datum& datum) { cv::Mat cv_img; @@ -209,6 +738,18 @@ bool DecodeDatum(Datum* datum, bool is_color) { } } +void EncodeCVMatToDatum(const cv::Mat& cv_img, const string& encoding, + Datum* datum) { + std::vector buf; + cv::imencode("."+encoding, cv_img, buf); + datum->set_data(std::string(reinterpret_cast(&buf[0]), + buf.size())); + datum->set_channels(cv_img.channels()); + datum->set_height(cv_img.rows); + datum->set_width(cv_img.cols); + datum->set_encoded(true); +} + void CVMatToDatum(const cv::Mat& cv_img, Datum* datum) { CHECK(cv_img.depth() == CV_8U) << "Image data type must be unsigned byte"; datum->set_channels(cv_img.channels()); diff --git a/src/caffe/util/math_functions.cpp b/src/caffe/util/math_functions.cpp index 71c02274a75..3052c09a21d 100644 --- a/src/caffe/util/math_functions.cpp +++ b/src/caffe/util/math_functions.cpp @@ -1,9 +1,57 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#if USE_MKL +#include +#include +#endif + +#ifdef _OPENMP +#include +#endif + #include #include +#include #include #include "caffe/common.hpp" +#include "caffe/util/cpu_info.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/rng.hpp" @@ -46,188 +94,275 @@ void caffe_cpu_gemv(const CBLAS_TRANSPOSE TransA, const int M, } template <> -void caffe_axpy(const int N, const float alpha, const float* X, +void caffe_axpy(const long N, const float alpha, const float* X, float* Y) { cblas_saxpy(N, alpha, X, 1, Y, 1); } template <> -void caffe_axpy(const int N, const double alpha, const double* X, +void caffe_axpy(const long N, const double alpha, const double* X, double* Y) { cblas_daxpy(N, alpha, X, 1, Y, 1); } template -void caffe_set(const int N, const Dtype alpha, Dtype* Y) { - if (alpha == 0) { - memset(Y, 0, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) +void caffe_set(const size_t N, const Dtype alpha, Dtype* Y) { + // If we are executing parallel region already then do not start another one + // if also number of data to be processed is smaller than arbitrary: + // threashold 12*4 cachelines per thread then no parallelization is to be made + #ifdef _OPENMP + + int nthr = omp_get_max_threads(); + int threshold = nthr * caffe::cpu::OpenMpManager::getProcessorSpeedMHz() / 3; + bool run_parallel = // Do not do parallel computation from non major threads + caffe::cpu::OpenMpManager::isMajorThread(boost::this_thread::get_id()); + + // Note: we Assume GPU's CPU path is single threaded + if (omp_in_parallel() == 0) { + // inactive parallel region may mean also batch 1, + // but no new threads are to be created + run_parallel = run_parallel && (Caffe::mode() != Caffe::GPU) && + (N >= threshold); + } else { + // If we are running active parallel region then it is CPU + run_parallel = run_parallel && (N >= threshold); + } + + if (run_parallel) { + #pragma omp parallel for + for (size_t i = 0; i < N; ++i) { + Y[i] = alpha; + } + return; } - for (int i = 0; i < N; ++i) { - Y[i] = alpha; + + #endif + + if (alpha == 0) { + memset(Y, 0, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) + } else { + std::fill(Y, Y + N, alpha); } } -template void caffe_set(const int N, const int alpha, int* Y); -template void caffe_set(const int N, const float alpha, float* Y); -template void caffe_set(const int N, const double alpha, double* Y); +template void caffe_set(const size_t N, const char alpha, char* Y); +template void caffe_set(const size_t N, const int alpha, int* Y); +template void caffe_set(const size_t N, const float alpha, float* Y); +template void caffe_set(const size_t N, const double alpha, double* Y); +template void caffe_set(const size_t N, const size_t alpha, size_t* Y); template <> -void caffe_add_scalar(const int N, const float alpha, float* Y) { - for (int i = 0; i < N; ++i) { +void caffe_add_scalar(const long N, const float alpha, float* Y) { + for (long i = 0; i < N; ++i) { Y[i] += alpha; } } template <> -void caffe_add_scalar(const int N, const double alpha, double* Y) { - for (int i = 0; i < N; ++i) { +void caffe_add_scalar(const long N, const double alpha, double* Y) { + for (long i = 0; i < N; ++i) { Y[i] += alpha; } } template -void caffe_copy(const int N, const Dtype* X, Dtype* Y) { +void caffe_cpu_copy(const size_t N, const Dtype* X, Dtype* Y) { + if (X == Y) return; + +#ifdef _OPENMP + static const int threshold = omp_get_max_threads() * + caffe::cpu::OpenMpManager::getProcessorSpeedMHz() / 3; + const bool run_parallel = + (N >= threshold) && + (omp_in_parallel() == 0) && + (Caffe::mode() != Caffe::GPU) && + (caffe::cpu::OpenMpManager::isMajorThread(boost::this_thread::get_id())); + + if (run_parallel) { + const int block_mem_size = 256 * 1024; + const int block_size = block_mem_size / sizeof(Dtype); + #pragma omp parallel for + for (size_t i = 0; i < N; i += block_size) + memcpy(Y + i, X + i, + (i + block_size > N) ? (N - i) * sizeof(Dtype) : block_mem_size); + + return; + } +#endif + + memcpy(Y, X, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) +} + +template void caffe_cpu_copy(const size_t N, const int* X, int* Y); +template void caffe_cpu_copy(const size_t N, const unsigned int* X, + unsigned int* Y); +template void caffe_cpu_copy(const size_t N, const float* X, float* Y); +template void caffe_cpu_copy(const size_t N, const double* X, double* Y); + +template +void caffe_copy(const size_t N, const Dtype* X, Dtype* Y) { if (X != Y) { - if (Caffe::mode() == Caffe::GPU) { #ifndef CPU_ONLY + if ( +#ifdef _OPENMP + // If there are more than one openmp thread (we are in active region) + // then checking Caffe::mode can create additional GPU Context + (omp_in_parallel() == 0) && +#endif + (Caffe::mode() == Caffe::GPU)) { // NOLINT_NEXT_LINE(caffe/alt_fn) CUDA_CHECK(cudaMemcpy(Y, X, sizeof(Dtype) * N, cudaMemcpyDefault)); -#else - NO_GPU; -#endif } else { - memcpy(Y, X, sizeof(Dtype) * N); // NOLINT(caffe/alt_fn) +#endif + caffe_cpu_copy(N, X, Y); +#ifndef CPU_ONLY } +#endif } } -template void caffe_copy(const int N, const int* X, int* Y); -template void caffe_copy(const int N, const unsigned int* X, +template void caffe_copy(const size_t N, const bool* X, bool* Y); +template void caffe_copy(const size_t N, const int* X, int* Y); +template void caffe_copy(const size_t N, const unsigned int* X, unsigned int* Y); -template void caffe_copy(const int N, const float* X, float* Y); -template void caffe_copy(const int N, const double* X, double* Y); +template void caffe_copy(const size_t N, const float* X, float* Y); +template void caffe_copy(const size_t N, const double* X, double* Y); +template void caffe_copy(const size_t N, const char* X, char* Y); +template void caffe_copy(const size_t N, const size_t* X, size_t* Y); template <> -void caffe_scal(const int N, const float alpha, float *X) { +void caffe_scal(const long N, const float alpha, float *X) { cblas_sscal(N, alpha, X, 1); } template <> -void caffe_scal(const int N, const double alpha, double *X) { +void caffe_scal(const long N, const double alpha, double *X) { cblas_dscal(N, alpha, X, 1); } template <> -void caffe_cpu_axpby(const int N, const float alpha, const float* X, +void caffe_scal(const long N, const size_t alpha, size_t *X) { +} + +template <> +void caffe_cpu_axpby(const long N, const float alpha, const float* X, const float beta, float* Y) { cblas_saxpby(N, alpha, X, 1, beta, Y, 1); } template <> -void caffe_cpu_axpby(const int N, const double alpha, const double* X, +void caffe_cpu_axpby(const long N, const double alpha, const double* X, const double beta, double* Y) { cblas_daxpby(N, alpha, X, 1, beta, Y, 1); } template <> -void caffe_add(const int n, const float* a, const float* b, +void caffe_axpy(const long N, const size_t alpha, const size_t* X, + size_t* Y) { } + +template <> +void caffe_add(const long n, const float* a, const float* b, float* y) { vsAdd(n, a, b, y); } template <> -void caffe_add(const int n, const double* a, const double* b, +void caffe_add(const long n, const double* a, const double* b, double* y) { vdAdd(n, a, b, y); } template <> -void caffe_sub(const int n, const float* a, const float* b, +void caffe_sub(const long n, const float* a, const float* b, float* y) { vsSub(n, a, b, y); } template <> -void caffe_sub(const int n, const double* a, const double* b, +void caffe_sub(const long n, const double* a, const double* b, double* y) { vdSub(n, a, b, y); } template <> -void caffe_mul(const int n, const float* a, const float* b, +void caffe_mul(const long n, const float* a, const float* b, float* y) { vsMul(n, a, b, y); } template <> -void caffe_mul(const int n, const double* a, const double* b, +void caffe_mul(const long n, const double* a, const double* b, double* y) { vdMul(n, a, b, y); } template <> -void caffe_div(const int n, const float* a, const float* b, +void caffe_div(const long n, const float* a, const float* b, float* y) { vsDiv(n, a, b, y); } template <> -void caffe_div(const int n, const double* a, const double* b, +void caffe_div(const long n, const double* a, const double* b, double* y) { vdDiv(n, a, b, y); } template <> -void caffe_powx(const int n, const float* a, const float b, +void caffe_powx(const long n, const float* a, const float b, float* y) { vsPowx(n, a, b, y); } template <> -void caffe_powx(const int n, const double* a, const double b, +void caffe_powx(const long n, const double* a, const double b, double* y) { vdPowx(n, a, b, y); } template <> -void caffe_sqr(const int n, const float* a, float* y) { +void caffe_sqr(const long n, const float* a, float* y) { vsSqr(n, a, y); } template <> -void caffe_sqr(const int n, const double* a, double* y) { +void caffe_sqr(const long n, const double* a, double* y) { vdSqr(n, a, y); } template <> -void caffe_exp(const int n, const float* a, float* y) { +void caffe_exp(const long n, const float* a, float* y) { vsExp(n, a, y); } template <> -void caffe_exp(const int n, const double* a, double* y) { +void caffe_exp(const long n, const double* a, double* y) { vdExp(n, a, y); } template <> -void caffe_log(const int n, const float* a, float* y) { +void caffe_log(const long n, const float* a, float* y) { vsLn(n, a, y); } template <> -void caffe_log(const int n, const double* a, double* y) { +void caffe_log(const long n, const double* a, double* y) { vdLn(n, a, y); } template <> -void caffe_abs(const int n, const float* a, float* y) { +void caffe_abs(const long n, const float* a, float* y) { vsAbs(n, a, y); } template <> -void caffe_abs(const int n, const double* a, double* y) { +void caffe_abs(const long n, const double* a, double* y) { vdAbs(n, a, y); } unsigned int caffe_rng_rand() { - return (*caffe_rng())(); +#ifdef DETERMINISTIC + return 5153; +#else + return (*caffe_rng())(); +#endif } template @@ -243,28 +378,28 @@ template double caffe_nextafter(const double b); template -void caffe_rng_uniform(const int n, const Dtype a, const Dtype b, Dtype* r) { +void caffe_rng_uniform(const long n, const Dtype a, const Dtype b, Dtype* r) { CHECK_GE(n, 0); CHECK(r); CHECK_LE(a, b); boost::uniform_real random_distribution(a, caffe_nextafter(b)); boost::variate_generator > variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { + for (long i = 0; i < n; ++i) { r[i] = variate_generator(); } } template -void caffe_rng_uniform(const int n, const float a, const float b, +void caffe_rng_uniform(const long n, const float a, const float b, float* r); template -void caffe_rng_uniform(const int n, const double a, const double b, +void caffe_rng_uniform(const long n, const double a, const double b, double* r); template -void caffe_rng_gaussian(const int n, const Dtype a, +void caffe_rng_gaussian(const long n, const Dtype a, const Dtype sigma, Dtype* r) { CHECK_GE(n, 0); CHECK(r); @@ -272,101 +407,162 @@ void caffe_rng_gaussian(const int n, const Dtype a, boost::normal_distribution random_distribution(a, sigma); boost::variate_generator > variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { + for (long i = 0; i < n; ++i) { r[i] = variate_generator(); } } template -void caffe_rng_gaussian(const int n, const float mu, +void caffe_rng_gaussian(const long n, const float mu, const float sigma, float* r); template -void caffe_rng_gaussian(const int n, const double mu, +void caffe_rng_gaussian(const long n, const double mu, const double sigma, double* r); +#ifdef USE_MKL +static void bernoulli_generate(long n, double p, int* r) { + int seed = 17 + caffe_rng_rand() % 4096; + +#ifdef _OPENMP + int nthr = omp_get_max_threads(); + int threshold = nthr * caffe::cpu::OpenMpManager::getProcessorSpeedMHz() / 3; + bool run_parallel = + (Caffe::mode() != Caffe::GPU) && + (omp_in_parallel() == 0) && + (n >= threshold); + if (!run_parallel) nthr = 1; + +# pragma omp parallel num_threads(nthr) + { + const int ithr = omp_get_thread_num(); + const long avg_amount = (n + nthr - 1) / nthr; + const long my_offset = ithr * avg_amount; + const long my_amount = std::min(my_offset + avg_amount, n) - my_offset; +#else + { + const long my_amount = n; + const long my_offset = 0; +#endif + + if (my_amount > 0) { + VSLStreamStatePtr stream; + vslNewStream(&stream, VSL_BRNG_MCG31, seed); + vslSkipAheadStream(stream, my_offset); + viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, + r + my_offset, p); + vslDeleteStream(&stream); + } + } +} +#endif + template -void caffe_rng_bernoulli(const int n, const Dtype p, int* r) { +void caffe_rng_bernoulli(const long n, const Dtype p, int* r) { CHECK_GE(n, 0); CHECK(r); CHECK_GE(p, 0); CHECK_LE(p, 1); +#ifdef USE_MKL + bernoulli_generate(n, p, r); +#else boost::bernoulli_distribution random_distribution(p); boost::variate_generator > variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { + for (long i = 0; i < n; ++i) { r[i] = variate_generator(); } +#endif } template -void caffe_rng_bernoulli(const int n, const double p, int* r); +void caffe_rng_bernoulli(const long n, const double p, int* r); template -void caffe_rng_bernoulli(const int n, const float p, int* r); +void caffe_rng_bernoulli(const long n, const float p, int* r); template -void caffe_rng_bernoulli(const int n, const Dtype p, unsigned int* r) { +void caffe_rng_bernoulli(const long n, const Dtype p, unsigned int* r) { CHECK_GE(n, 0); CHECK(r); CHECK_GE(p, 0); CHECK_LE(p, 1); +#ifdef USE_MKL + bernoulli_generate(n, p, reinterpret_cast(r)); +#else boost::bernoulli_distribution random_distribution(p); boost::variate_generator > variate_generator(caffe_rng(), random_distribution); - for (int i = 0; i < n; ++i) { + for (long i = 0; i < n; ++i) { r[i] = static_cast(variate_generator()); } +#endif } template -void caffe_rng_bernoulli(const int n, const double p, unsigned int* r); +void caffe_rng_bernoulli(const long n, const double p, unsigned int* r); template -void caffe_rng_bernoulli(const int n, const float p, unsigned int* r); +void caffe_rng_bernoulli(const long n, const float p, unsigned int* r); template <> -float caffe_cpu_strided_dot(const int n, const float* x, const int incx, +float caffe_cpu_strided_dot(const long n, const float* x, const int incx, const float* y, const int incy) { return cblas_sdot(n, x, incx, y, incy); } template <> -double caffe_cpu_strided_dot(const int n, const double* x, +double caffe_cpu_strided_dot(const long n, const double* x, const int incx, const double* y, const int incy) { return cblas_ddot(n, x, incx, y, incy); } +template <> +size_t caffe_cpu_strided_dot(const long n, const size_t* x, + const int incx, const size_t* y, const int incy) { + NOT_IMPLEMENTED; + return 0; +} + template -Dtype caffe_cpu_dot(const int n, const Dtype* x, const Dtype* y) { +Dtype caffe_cpu_dot(const long n, const Dtype* x, const Dtype* y) { return caffe_cpu_strided_dot(n, x, 1, y, 1); } template -float caffe_cpu_dot(const int n, const float* x, const float* y); +float caffe_cpu_dot(const long n, const float* x, const float* y); + +template +double caffe_cpu_dot(const long n, const double* x, const double* y); template -double caffe_cpu_dot(const int n, const double* x, const double* y); +size_t caffe_cpu_dot(const long n, const size_t* x, const size_t* y); template <> -float caffe_cpu_asum(const int n, const float* x) { +float caffe_cpu_asum(const long n, const float* x) { return cblas_sasum(n, x, 1); } template <> -double caffe_cpu_asum(const int n, const double* x) { +double caffe_cpu_asum(const long n, const double* x) { return cblas_dasum(n, x, 1); } template <> -void caffe_cpu_scale(const int n, const float alpha, const float *x, +size_t caffe_cpu_asum(const long n, const size_t* x) { + NOT_IMPLEMENTED; + return 0; +} + +template <> +void caffe_cpu_scale(const long n, const float alpha, const float *x, float* y) { cblas_scopy(n, x, 1, y, 1); cblas_sscal(n, alpha, y, 1); } template <> -void caffe_cpu_scale(const int n, const double alpha, const double *x, +void caffe_cpu_scale(const long n, const double alpha, const double *x, double* y) { cblas_dcopy(n, x, 1, y, 1); cblas_dscal(n, alpha, y, 1); diff --git a/src/caffe/util/math_functions.cu b/src/caffe/util/math_functions.cu index 4c587537435..42f93a27d0b 100644 --- a/src/caffe/util/math_functions.cu +++ b/src/caffe/util/math_functions.cu @@ -74,6 +74,12 @@ void caffe_gpu_axpy(const int N, const double alpha, const double* X, CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1)); } +template <> +void caffe_gpu_axpy(const int N, const size_t alpha, const size_t* X, + size_t* Y) { + NOT_IMPLEMENTED; +} + void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) { if (X != Y) { CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn) @@ -91,6 +97,12 @@ void caffe_gpu_scal(const int N, const double alpha, double *X) { } template <> +void caffe_gpu_scal(const int N, const size_t alpha, size_t *X) { + NOT_IMPLEMENTED; +} + + +template <> void caffe_gpu_axpby(const int N, const float alpha, const float* X, const float beta, float* Y) { caffe_gpu_scal(N, beta, Y); @@ -105,6 +117,12 @@ void caffe_gpu_axpby(const int N, const double alpha, const double* X, } template <> +void caffe_gpu_axpby(const int N, const size_t alpha, const size_t* X, + const size_t beta, size_t* Y) { + NOT_IMPLEMENTED; +} + +template <> void caffe_gpu_dot(const int n, const float* x, const float* y, float* out) { CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out)); @@ -117,6 +135,13 @@ void caffe_gpu_dot(const int n, const double* x, const double* y, } template <> +void caffe_gpu_dot(const int n, const size_t* x, const size_t* y, + size_t* out) { + NOT_IMPLEMENTED; +} + + +template <> void caffe_gpu_asum(const int n, const float* x, float* y) { CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y)); } @@ -127,6 +152,11 @@ void caffe_gpu_asum(const int n, const double* x, double* y) { } template <> +void caffe_gpu_asum(const int n, const size_t* x, size_t* y) { + NOT_IMPLEMENTED; +} + +template <> void caffe_gpu_scale(const int n, const float alpha, const float *x, float* y) { CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1)); diff --git a/src/caffe/util/remove_batch_norm.cpp b/src/caffe/util/remove_batch_norm.cpp new file mode 100644 index 00000000000..8c56639fc7c --- /dev/null +++ b/src/caffe/util/remove_batch_norm.cpp @@ -0,0 +1,306 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include "caffe/blob.hpp" +#include "caffe/util/remove_batch_norm.hpp" +#include "caffe/util/math_functions.hpp" +#include "caffe/net.hpp" +namespace caffe { + +template +void RecoverScaleFromBN(const LayerParameter& bn_layer_param, LayerParameter& scale_layer_param, Dtype default_scale_weights, Dtype default_scale_bias) { + CHECK(bn_layer_param.blobs_size() >= 3) << "BatchNorm Layer's blob size must be 3 at least!" << std::endl; + CHECK(bn_layer_param.type().compare("BatchNorm") == 0) << "Scale layer can only be recovered from batch norm layer!" << std::endl; + scale_layer_param.set_name("scale_" + bn_layer_param.name()); + scale_layer_param.set_type("Scale"); + scale_layer_param.set_phase(TEST); + //Assume the scale layer commonly use in-place top/bottom + scale_layer_param.add_top(const_cast(bn_layer_param.top(0))); + scale_layer_param.add_bottom(const_cast(bn_layer_param.top(0))); + int bn_layer_blob_size = bn_layer_param.blobs_size(); + //Pre-assumption: scale layer weight and bias blob have same shape + if (bn_layer_blob_size == 5) { + scale_layer_param.add_blobs()->CopyFrom(bn_layer_param.blobs(3)); + scale_layer_param.add_blobs()->CopyFrom(bn_layer_param.blobs(4)); + } else if (bn_layer_blob_size == 4) { + scale_layer_param.add_blobs()->CopyFrom(bn_layer_param.blobs(3)); + Blob scale_bias_blob, scale_weight_blob; + scale_weight_blob.FromProto(scale_layer_param.blobs(0)); + scale_bias_blob.ReshapeLike(scale_weight_blob); + caffe_set(scale_bias_blob.count(), default_scale_bias, scale_bias_blob.mutable_cpu_data()); + BlobProto scale_bias_blob_proto; + scale_bias_blob.ToProto(&scale_bias_blob_proto, false); + scale_layer_param.add_blobs()->CopyFrom(scale_bias_blob_proto); + } else { + Blob scale_weight_blob, scale_bias_blob, bn_mean_blob; + BlobProto scale_weight_blob_proto, scale_bias_blob_proto; + bn_mean_blob.FromProto(bn_layer_param.blobs(0)); + vector scale_shape_vec; + scale_shape_vec.resize(1); + scale_shape_vec[0] = bn_mean_blob.shape(0); + scale_weight_blob.Reshape(scale_shape_vec); + scale_bias_blob.Reshape(scale_shape_vec); + caffe_set(scale_weight_blob.count(), default_scale_weights, scale_weight_blob.mutable_cpu_data()); + caffe_set(scale_bias_blob.count(), default_scale_bias, scale_bias_blob.mutable_cpu_data()); + scale_weight_blob.ToProto(&scale_weight_blob_proto, false); + scale_bias_blob.ToProto(&scale_bias_blob_proto, false); + scale_layer_param.add_blobs()->CopyFrom(scale_weight_blob_proto); + scale_layer_param.add_blobs()->CopyFrom(scale_bias_blob_proto); + } +} +void MergeLayer(LayerParameter &layer1, + const LayerParameter &layer2) +{ + string &layer1_top_blob_name = const_cast(layer1.top(0)); + const string &layer2_top_blob_name = layer2.top(0); + + // Replace Conv top name with Scale top name + layer1_top_blob_name.resize(layer2_top_blob_name.size()); + layer1_top_blob_name.replace(0, layer2_top_blob_name.size(), layer2_top_blob_name); + return; +} + + +template +void AdjustConvLayer(LayerParameter &conv_layer, + const LayerParameter &batch_norm_layer, + const LayerParameter &scale_layer, bool is_net_init) { + if (is_net_init) { + if (!conv_layer.convolution_param().bias_term()) { + //We will merge batch norm and scale layer to con layer, if conv layer doesn't use bias, adjust it! + conv_layer.mutable_convolution_param()->set_bias_term(true); + } + } else { + Blob conv_weight_blob, conv_bias_blob; + Blob scale_weight_blob, scale_bias_blob; + Blob bn_mean_blob, bn_variance_blob, bn_scale_factor_blob; + Dtype bn_scale_factor; + Dtype bn_eps = batch_norm_layer.batch_norm_param().eps(); + + conv_weight_blob.FromProto(conv_layer.blobs(0), true); + if (!conv_layer.convolution_param().bias_term()) { + conv_layer.mutable_convolution_param()->set_bias_term(true); + vector conv_bias_shape_vec; + conv_bias_shape_vec.resize(1); + conv_bias_shape_vec[0] = conv_weight_blob.shape(0); + conv_bias_blob.Reshape(conv_bias_shape_vec); + caffe_set(conv_bias_blob.count(), (Dtype)0, conv_bias_blob.mutable_cpu_data()); + BlobProto conv_bias_blob_proto; + conv_bias_blob.ToProto(&conv_bias_blob_proto, false); + conv_layer.add_blobs()->CopyFrom(conv_bias_blob_proto); + } else { + conv_bias_blob.FromProto(conv_layer.blobs(1), true); + } + + //We assume scale layer use weight & bias, but is bias necessary? Need confirm! + scale_weight_blob.FromProto(scale_layer.blobs(0), true); + scale_bias_blob.FromProto(scale_layer.blobs(1), true); + bn_mean_blob.FromProto(batch_norm_layer.blobs(0), true); + bn_variance_blob.FromProto(batch_norm_layer.blobs(1), true); + bn_scale_factor_blob.FromProto(batch_norm_layer.blobs(2), true); + bn_scale_factor = bn_scale_factor_blob.cpu_data()[0] == 0 ? 1 : (1 / bn_scale_factor_blob.cpu_data()[0]); + CHECK_EQ(bn_variance_blob.shape(0), scale_weight_blob.shape(0)); + CHECK_EQ(conv_weight_blob.shape(0), scale_weight_blob.shape(0)); + CHECK_EQ(scale_weight_blob.count(), bn_variance_blob.count()); + int alpha_count = scale_weight_blob.count(); + Dtype alpha, scale_weight_val, bn_variance_val; + Dtype * conv_weight_buf = conv_weight_blob.mutable_cpu_data(); + Dtype * conv_bias_buf = conv_bias_blob.mutable_cpu_data(); + const Dtype * scale_bias_buf = scale_bias_blob.cpu_data(); + const Dtype * bn_mean_buf = bn_mean_blob.cpu_data(); + int weight_count = conv_weight_blob.count() / conv_weight_blob.shape(0); + for (int i = 0; i < alpha_count; i++) { + scale_weight_val = scale_weight_blob.cpu_data()[i]; + bn_variance_val = bn_variance_blob.cpu_data()[i]; + alpha = scale_weight_val / (std::sqrt(bn_variance_val * bn_scale_factor + bn_eps)); + conv_bias_buf[i] = conv_bias_buf[i] * alpha + (scale_bias_buf[i] -(bn_mean_buf[i] * bn_scale_factor * alpha)); + Dtype * weight_area = conv_weight_buf + i * weight_count; + caffe_scal(weight_count, alpha, weight_area); + + } + BlobProto *updated_weight_blob_proto = conv_layer.mutable_blobs(0); + BlobProto *updated_bias_blob_proto = conv_layer.mutable_blobs(1); + conv_weight_blob.ToProto(updated_weight_blob_proto); + conv_bias_blob.ToProto(updated_bias_blob_proto); + } + +} + + +template +void RecoverBNScaleMergedNet(NetParameter * net_param, NetParameter* recovered_net_param) { + CHECK(net_param != NULL && recovered_net_param != NULL) << "Can NOT recover a NULL network!" << std::endl; + int kept_bn_layers_num = net_param->compile_net_state().kept_bn_layers_size(); + int idx; + bool in_kept_list = false; + for (int i = 0; i < net_param->layer_size(); ++i) { + const LayerParameter layer_param = net_param->layer(i); + recovered_net_param->add_layer()->CopyFrom(layer_param); + + if (layer_param.type().compare("BatchNorm") == 0 && layer_param.blobs_size() >= 3) { + for (idx = 0; idx < kept_bn_layers_num; ++idx) { + if (layer_param.name().compare(net_param->compile_net_state().kept_bn_layers(idx)) == 0) { + in_kept_list = true; + break; + } + } + + if (in_kept_list) continue; + shared_ptr scale_layer_param(new LayerParameter()); + RecoverScaleFromBN(layer_param, *scale_layer_param, (Dtype)1, (Dtype)0); + recovered_net_param->add_layer()->CopyFrom(*scale_layer_param); + } + } +} + +template +void RemoveBNScale(const NetParameter& param, NetParameter* param_compiled) { + + // - In TEST Phase, if we detect sequential layers conv->batch norm ->scale, + // We will merge batch norm and scale layer into conv layer. + if(param.state().phase() != TEST) { + param_compiled->CopyFrom(param); + param_compiled->mutable_compile_net_state()->set_bn_scale_remove(false); + return ; + } + + bool bn_scale_remove = false; + bool is_net_init = param.compile_net_state().is_init(); + std::set layers_to_drop; + for (int i = 0; i < param.layer_size(); ++i) { + LayerParameter *layer_param = (const_cast(param)).mutable_layer(i); + bool layer_included = true; + bool bn_use_global_stats_set = true; + if (layer_param->type().compare("Convolution") == 0) { + std::vector child_layers_params; + Net::GetBlobConsumers(child_layers_params, layer_param->top(0), param, i + 1 < param.layer_size() ? i + 1 : i); + const LayerParameter &child_layer_param = child_layers_params.size() > 0 ? *(child_layers_params[0]) : *layer_param; + // check whether child layer is BatchNorm + if (child_layer_param.type().compare("BatchNorm") == 0) { + BatchNormParameter bn_param = child_layer_param.batch_norm_param(); + if (is_net_init) { + //Testing Network init process + bool bn_use_global_stats = true; + if (bn_param.has_use_global_stats()) { + bn_use_global_stats = bn_param.use_global_stats(); + } + if (!bn_use_global_stats) { + //This bn layer's use_global_stats is set manually! Don't remove it. + //remained_bn_layer_names.push_back(child_layer_param.name()); + param_compiled->mutable_compile_net_state()->add_kept_bn_layers(child_layer_param.name()); + bn_use_global_stats_set = false; + } + } else { + int kept_bn_layers_num = param.compile_net_state().kept_bn_layers_size(); + bool in_kept_list = false; + for (int idx = 0; idx < kept_bn_layers_num; ++idx) { + if (child_layer_param.name().compare(param.compile_net_state().kept_bn_layers(idx)) == 0) { + in_kept_list = true; + break; + } + } + if (in_kept_list) { + bn_use_global_stats_set = false; + } + } + + if (!bn_use_global_stats_set) { + //Even in caffe TEST phase, current batch norm layer has set use_global_stats = false in protxt file, so we won't + //merge this layer into convolution layer. + param_compiled->add_layer()->CopyFrom(*layer_param); + continue; + } + std::vector grandchild_layers_params; + Net::GetBlobConsumers(grandchild_layers_params, child_layer_param.top(0), param, i + 2 < param.layer_size() ? i + 2 : i); + const LayerParameter &grandchild_layer_param = (grandchild_layers_params.size() > 0) ? *(grandchild_layers_params[0]) : child_layer_param; + if (grandchild_layer_param.type().compare("Scale") == 0) { + MergeLayer(*layer_param, grandchild_layer_param); + AdjustConvLayer(*layer_param, child_layer_param, grandchild_layer_param, is_net_init); + if (bn_scale_remove == false) bn_scale_remove = true; + layers_to_drop.insert(child_layer_param.name()); + layers_to_drop.insert(grandchild_layer_param.name()); + } else if (&child_layer_param != &grandchild_layer_param) { + //In fact, conv-->batchnorm can also be optimized. In such case, we check the blob size of batch norm layer + //if is 3, it means current net hasn't used scale layer, this is equivalent to scale layer with all 1 weights and 0 bias + //if is 4 or 5, it means intel caffe compilation rule 1 works here, we can recover the scale layer from batch norm layer + MergeLayer(*layer_param, child_layer_param); + if (!is_net_init) { + shared_ptr scale_layer_param(new LayerParameter()); + RecoverScaleFromBN(child_layer_param, *scale_layer_param, (Dtype)1, (Dtype)0); + AdjustConvLayer(*layer_param, child_layer_param, *scale_layer_param, is_net_init); + } else { + AdjustConvLayer(*layer_param, child_layer_param, grandchild_layer_param, true); + } + if (bn_scale_remove == false) bn_scale_remove = true; + layers_to_drop.insert(child_layer_param.name()); + } + } + } + if (layers_to_drop.find(layer_param->name()) != layers_to_drop.end()) { + LOG_IF(INFO, Caffe::root_solver()) << "Dropped Layer: "<< layer_param->name() << std::endl; + layer_included = false; + // Remove dropped layer from the list of layers to be dropped + layers_to_drop.erase(layers_to_drop.find(layer_param->name())); + } + if (layer_included) { + if (layer_param->type().compare("BatchNorm") == 0) { + param_compiled->mutable_compile_net_state()->add_kept_bn_layers(layer_param->name()); + } + param_compiled->add_layer()->CopyFrom(*layer_param); + } + } + + param_compiled->mutable_compile_net_state()->set_bn_scale_remove(bn_scale_remove); +} + +template void RecoverScaleFromBN(const LayerParameter& bn_layer_param, LayerParameter& scale_layer_param, float default_scale_weights, float default_scale_bias); +template void RecoverScaleFromBN(const LayerParameter& bn_layer_param, LayerParameter& scale_layer_param, double default_scale_weights, double default_scale_bias); +template void AdjustConvLayer(LayerParameter &conv_layer, + const LayerParameter &batch_norm_layer, + const LayerParameter &scale_layer, bool is_net_init); + +template void AdjustConvLayer(LayerParameter &conv_layer, + const LayerParameter &batch_norm_layer, + const LayerParameter &scale_layer, bool is_net_init); + +template void RecoverBNScaleMergedNet(NetParameter * net_param, NetParameter* recovered_net_param); +template void RecoverBNScaleMergedNet(NetParameter * net_param, NetParameter* recovered_net_param); +template void RemoveBNScale(const NetParameter& param, NetParameter* param_compiled); +template void RemoveBNScale(const NetParameter& param, NetParameter* param_compiled); +} diff --git a/src/caffe/util/sampler.cpp b/src/caffe/util/sampler.cpp new file mode 100644 index 00000000000..f47a78dd86b --- /dev/null +++ b/src/caffe/util/sampler.cpp @@ -0,0 +1,200 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include "caffe/util/bbox_util.hpp" +#include "caffe/util/sampler.hpp" + +namespace caffe { + +void GroupObjectBBoxes(const AnnotatedDatum& anno_datum, + vector* object_bboxes) { + object_bboxes->clear(); + for (int i = 0; i < anno_datum.annotation_group_size(); ++i) { + const AnnotationGroup& anno_group = anno_datum.annotation_group(i); + for (int j = 0; j < anno_group.annotation_size(); ++j) { + const Annotation& anno = anno_group.annotation(j); + object_bboxes->push_back(anno.bbox()); + } + } +} + +bool SatisfySampleConstraint(const NormalizedBBox& sampled_bbox, + const vector& object_bboxes, + const SampleConstraint& sample_constraint) { + bool has_jaccard_overlap = sample_constraint.has_min_jaccard_overlap() || + sample_constraint.has_max_jaccard_overlap(); + bool has_sample_coverage = sample_constraint.has_min_sample_coverage() || + sample_constraint.has_max_sample_coverage(); + bool has_object_coverage = sample_constraint.has_min_object_coverage() || + sample_constraint.has_max_object_coverage(); + bool satisfy = !has_jaccard_overlap && !has_sample_coverage && + !has_object_coverage; + if (satisfy) { + // By default, the sampled_bbox is "positive" if no constraints are defined. + return true; + } + // Check constraints. + bool found = false; + for (int i = 0; i < object_bboxes.size(); ++i) { + const NormalizedBBox& object_bbox = object_bboxes[i]; + // Test jaccard overlap. + if (has_jaccard_overlap) { + const float jaccard_overlap = JaccardOverlap(sampled_bbox, object_bbox); + if (sample_constraint.has_min_jaccard_overlap() && + jaccard_overlap < sample_constraint.min_jaccard_overlap()) { + continue; + } + if (sample_constraint.has_max_jaccard_overlap() && + jaccard_overlap > sample_constraint.max_jaccard_overlap()) { + continue; + } + found = true; + } + // Test sample coverage. + if (has_sample_coverage) { + const float sample_coverage = BBoxCoverage(sampled_bbox, object_bbox); + if (sample_constraint.has_min_sample_coverage() && + sample_coverage < sample_constraint.min_sample_coverage()) { + continue; + } + if (sample_constraint.has_max_sample_coverage() && + sample_coverage > sample_constraint.max_sample_coverage()) { + continue; + } + found = true; + } + // Test object coverage. + if (has_object_coverage) { + const float object_coverage = BBoxCoverage(object_bbox, sampled_bbox); + if (sample_constraint.has_min_object_coverage() && + object_coverage < sample_constraint.min_object_coverage()) { + continue; + } + if (sample_constraint.has_max_object_coverage() && + object_coverage > sample_constraint.max_object_coverage()) { + continue; + } + found = true; + } + if (found) { + return true; + } + } + return found; +} + +void SampleBBox(const Sampler& sampler, NormalizedBBox* sampled_bbox) { + // Get random scale. + CHECK_GE(sampler.max_scale(), sampler.min_scale()); + CHECK_GT(sampler.min_scale(), 0.); + CHECK_LE(sampler.max_scale(), 1.); + float scale; + caffe_rng_uniform(1, sampler.min_scale(), sampler.max_scale(), &scale); + + // Get random aspect ratio. + CHECK_GE(sampler.max_aspect_ratio(), sampler.min_aspect_ratio()); + CHECK_GT(sampler.min_aspect_ratio(), 0.); + CHECK_LT(sampler.max_aspect_ratio(), FLT_MAX); + float aspect_ratio; + float min_aspect_ratio = std::max(sampler.min_aspect_ratio(), + std::pow(scale, 2.)); + float max_aspect_ratio = std::min(sampler.max_aspect_ratio(), + 1 / std::pow(scale, 2.)); + caffe_rng_uniform(1, min_aspect_ratio, max_aspect_ratio, &aspect_ratio); + + // Figure out bbox dimension. + float bbox_width = scale * sqrt(aspect_ratio); + float bbox_height = scale / sqrt(aspect_ratio); + + // Figure out top left coordinates. + float w_off, h_off; + caffe_rng_uniform(1, 0.f, 1 - bbox_width, &w_off); + caffe_rng_uniform(1, 0.f, 1 - bbox_height, &h_off); + + sampled_bbox->set_xmin(w_off); + sampled_bbox->set_ymin(h_off); + sampled_bbox->set_xmax(w_off + bbox_width); + sampled_bbox->set_ymax(h_off + bbox_height); +} + +void GenerateSamples(const NormalizedBBox& source_bbox, + const vector& object_bboxes, + const BatchSampler& batch_sampler, + vector* sampled_bboxes) { + int found = 0; + for (int i = 0; i < batch_sampler.max_trials(); ++i) { + if (batch_sampler.has_max_sample() && + found >= batch_sampler.max_sample()) { + break; + } + // Generate sampled_bbox in the normalized space [0, 1]. + NormalizedBBox sampled_bbox; + SampleBBox(batch_sampler.sampler(), &sampled_bbox); + // Transform the sampled_bbox w.r.t. source_bbox. + LocateBBox(source_bbox, sampled_bbox, &sampled_bbox); + // Determine if the sampled bbox is positive or negative by the constraint. + if (SatisfySampleConstraint(sampled_bbox, object_bboxes, + batch_sampler.sample_constraint())) { + ++found; + sampled_bboxes->push_back(sampled_bbox); + } + } +} + +void GenerateBatchSamples(const AnnotatedDatum& anno_datum, + const vector& batch_samplers, + vector* sampled_bboxes) { + sampled_bboxes->clear(); + vector object_bboxes; + GroupObjectBBoxes(anno_datum, &object_bboxes); + for (int i = 0; i < batch_samplers.size(); ++i) { + if (batch_samplers[i].use_original_image()) { + NormalizedBBox unit_bbox; + unit_bbox.set_xmin(0); + unit_bbox.set_ymin(0); + unit_bbox.set_xmax(1); + unit_bbox.set_ymax(1); + GenerateSamples(unit_bbox, object_bboxes, batch_samplers[i], + sampled_bboxes); + } + } +} + +} // namespace caffe diff --git a/src/caffe/util/signal_handler.cpp b/src/caffe/util/signal_handler.cpp index 5d764ec524f..8be5fa79a55 100644 --- a/src/caffe/util/signal_handler.cpp +++ b/src/caffe/util/signal_handler.cpp @@ -1,3 +1,40 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include diff --git a/src/caffe/util/upgrade_proto.cpp b/src/caffe/util/upgrade_proto.cpp index 9e186915b43..05a743bc50f 100644 --- a/src/caffe/util/upgrade_proto.cpp +++ b/src/caffe/util/upgrade_proto.cpp @@ -1,7 +1,46 @@ +/* +All modification made by Intel Corporation: © 2016 Intel Corporation + +All contributions by the University of California: +Copyright (c) 2014, 2015, The Regents of the University of California (Regents) +All rights reserved. + +All other contributions: +Copyright (c) 2014, 2015, the respective contributors +All rights reserved. +For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Intel Corporation nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + #include #include #include +#include + #include #include @@ -10,6 +49,10 @@ #include "caffe/util/io.hpp" #include "caffe/util/upgrade_proto.hpp" +#ifdef USE_MLSL +#include "caffe/multinode/mlsl.hpp" +#endif /* USE_MLSL */ + namespace caffe { bool NetNeedsUpgrade(const NetParameter& net_param) { @@ -78,6 +121,9 @@ void ReadNetParamsFromTextFileOrDie(const string& param_file, NetParameter* param) { CHECK(ReadProtoFromTextFile(param_file, param)) << "Failed to parse NetParameter file: " << param_file; +#ifdef USE_MLSL + ReplaceMultinodeNetParams(param); +#endif UpgradeNetAsNeeded(param_file, param); } @@ -1065,4 +1111,54 @@ void ReadSolverParamsFromTextFileOrDie(const string& param_file, UpgradeSolverAsNeeded(param_file, param); } +#ifdef USE_MLSL +static std::string getNodeId() { + return std::to_string(mn::get_node_id()); +} + +static std::string getNumNodes() { + return std::to_string(mn::get_nodes_count()); +} + +void ReplaceMultinodeSolverParams(SolverParameter* param) { + std::string node_id = getNodeId(); + std::string num_nodes = getNumNodes(); + + if (param->has_train_net()) { + std::string* train_net = param->mutable_train_net(); + if (train_net) { + boost::replace_all(*train_net, "%#", node_id); + boost::replace_all(*train_net, "%*", num_nodes); + } + } + + if (param->has_snapshot_prefix()) { + std::string* prefix = param->mutable_snapshot_prefix(); + if (prefix) { + boost::replace_all(*prefix, "%#", node_id); + boost::replace_all(*prefix, "%*", num_nodes); + } + } +} + +void ReplaceMultinodeNetParams(NetParameter* param) { + for (int i = 0; i < param->layer_size(); ++i) { + std::string* source = nullptr; + + if (param->layer(i).has_data_param()) { + source = param->mutable_layer(i)->mutable_data_param()-> + mutable_source(); + } else if (param->layer(i).has_image_data_param()) { + source = param->mutable_layer(i)->mutable_image_data_param()-> + mutable_source(); + } + + if (source) { + boost::replace_all(*source, "%#", getNodeId()); + boost::replace_all(*source, "%*", getNumNodes()); + } + } +} +#endif + } // namespace caffe diff --git a/src/gmock/CHANGES b/src/gmock/CHANGES new file mode 100644 index 00000000000..d6f2f760e34 --- /dev/null +++ b/src/gmock/CHANGES @@ -0,0 +1,126 @@ +Changes for 1.7.0: + +* All new improvements in Google Test 1.7.0. +* New feature: matchers DoubleNear(), FloatNear(), + NanSensitiveDoubleNear(), NanSensitiveFloatNear(), + UnorderedElementsAre(), UnorderedElementsAreArray(), WhenSorted(), + WhenSortedBy(), IsEmpty(), and SizeIs(). +* Improvement: Google Mock can now be built as a DLL. +* Improvement: when compiled by a C++11 compiler, matchers AllOf() + and AnyOf() can accept an arbitrary number of matchers. +* Improvement: when compiled by a C++11 compiler, matchers + ElementsAreArray() can accept an initializer list. +* Improvement: when exceptions are enabled, a mock method with no + default action now throws instead crashing the test. +* Improvement: added class testing::StringMatchResultListener to aid + definition of composite matchers. +* Improvement: function return types used in MOCK_METHOD*() macros can + now contain unprotected commas. +* Improvement (potentially breaking): EXPECT_THAT() and ASSERT_THAT() + are now more strict in ensuring that the value type and the matcher + type are compatible, catching potential bugs in tests. +* Improvement: Pointee() now works on an optional. +* Improvement: the ElementsAreArray() matcher can now take a vector or + iterator range as input, and makes a copy of its input elements + before the conversion to a Matcher. +* Improvement: the Google Mock Generator can now generate mocks for + some class templates. +* Bug fix: mock object destruction triggerred by another mock object's + destruction no longer hangs. +* Improvement: Google Mock Doctor works better with newer Clang and + GCC now. +* Compatibility fixes. +* Bug/warning fixes. + +Changes for 1.6.0: + +* Compilation is much faster and uses much less memory, especially + when the constructor and destructor of a mock class are moved out of + the class body. +* New matchers: Pointwise(), Each(). +* New actions: ReturnPointee() and ReturnRefOfCopy(). +* CMake support. +* Project files for Visual Studio 2010. +* AllOf() and AnyOf() can handle up-to 10 arguments now. +* Google Mock doctor understands Clang error messages now. +* SetArgPointee<> now accepts string literals. +* gmock_gen.py handles storage specifier macros and template return + types now. +* Compatibility fixes. +* Bug fixes and implementation clean-ups. +* Potentially incompatible changes: disables the harmful 'make install' + command in autotools. + +Potentially breaking changes: + +* The description string for MATCHER*() changes from Python-style + interpolation to an ordinary C++ string expression. +* SetArgumentPointee is deprecated in favor of SetArgPointee. +* Some non-essential project files for Visual Studio 2005 are removed. + +Changes for 1.5.0: + + * New feature: Google Mock can be safely used in multi-threaded tests + on platforms having pthreads. + * New feature: function for printing a value of arbitrary type. + * New feature: function ExplainMatchResult() for easy definition of + composite matchers. + * The new matcher API lets user-defined matchers generate custom + explanations more directly and efficiently. + * Better failure messages all around. + * NotNull() and IsNull() now work with smart pointers. + * Field() and Property() now work when the matcher argument is a pointer + passed by reference. + * Regular expression matchers on all platforms. + * Added GCC 4.0 support for Google Mock Doctor. + * Added gmock_all_test.cc for compiling most Google Mock tests + in a single file. + * Significantly cleaned up compiler warnings. + * Bug fixes, better test coverage, and implementation clean-ups. + + Potentially breaking changes: + + * Custom matchers defined using MatcherInterface or MakePolymorphicMatcher() + need to be updated after upgrading to Google Mock 1.5.0; matchers defined + using MATCHER or MATCHER_P* aren't affected. + * Dropped support for 'make install'. + +Changes for 1.4.0 (we skipped 1.2.* and 1.3.* to match the version of +Google Test): + + * Works in more environments: Symbian and minGW, Visual C++ 7.1. + * Lighter weight: comes with our own implementation of TR1 tuple (no + more dependency on Boost!). + * New feature: --gmock_catch_leaked_mocks for detecting leaked mocks. + * New feature: ACTION_TEMPLATE for defining templatized actions. + * New feature: the .After() clause for specifying expectation order. + * New feature: the .With() clause for for specifying inter-argument + constraints. + * New feature: actions ReturnArg(), ReturnNew(...), and + DeleteArg(). + * New feature: matchers Key(), Pair(), Args<...>(), AllArgs(), IsNull(), + and Contains(). + * New feature: utility class MockFunction, useful for checkpoints, etc. + * New feature: functions Value(x, m) and SafeMatcherCast(m). + * New feature: copying a mock object is rejected at compile time. + * New feature: a script for fusing all Google Mock and Google Test + source files for easy deployment. + * Improved the Google Mock doctor to diagnose more diseases. + * Improved the Google Mock generator script. + * Compatibility fixes for Mac OS X and gcc. + * Bug fixes and implementation clean-ups. + +Changes for 1.1.0: + + * New feature: ability to use Google Mock with any testing framework. + * New feature: macros for easily defining new matchers + * New feature: macros for easily defining new actions. + * New feature: more container matchers. + * New feature: actions for accessing function arguments and throwing + exceptions. + * Improved the Google Mock doctor script for diagnosing compiler errors. + * Bug fixes and implementation clean-ups. + +Changes for 1.0.0: + + * Initial Open Source release of Google Mock diff --git a/src/gmock/CMakeLists.txt b/src/gmock/CMakeLists.txt new file mode 100644 index 00000000000..d812bbefed8 --- /dev/null +++ b/src/gmock/CMakeLists.txt @@ -0,0 +1,152 @@ +######################################################################## +# CMake build script for Google Mock. +# +# To run the tests for Google Mock itself on Linux, use 'make test' or +# ctest. You can select which tests to run using 'ctest -R regex'. +# For more options, run 'ctest --help'. + +# BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to +# make it prominent in the GUI. +option(BUILD_SHARED_LIBS "Build shared libraries (DLLs)." OFF) + +option(gmock_build_tests "Build all of Google Mock's own tests." OFF) + +# A directory to find Google Test sources. +if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/gtest/CMakeLists.txt") + set(gtest_dir gtest) +else() + set(gtest_dir ../gtest) +endif() + +# Defines pre_project_set_up_hermetic_build() and set_up_hermetic_build(). +include("${gtest_dir}/cmake/hermetic_build.cmake" OPTIONAL) + +if (COMMAND pre_project_set_up_hermetic_build) + # Google Test also calls hermetic setup functions from add_subdirectory, + # although its changes will not affect things at the current scope. + pre_project_set_up_hermetic_build() +endif() + +######################################################################## +# +# Project-wide settings + +# Name of the project. +# +# CMake files in this project can refer to the root source directory +# as ${gmock_SOURCE_DIR} and to the root binary directory as +# ${gmock_BINARY_DIR}. +# Language "C" is required for find_package(Threads). +project(gmock CXX C) +cmake_minimum_required(VERSION 2.6.2) + +if (COMMAND set_up_hermetic_build) + set_up_hermetic_build() +endif() + +# Instructs CMake to process Google Test's CMakeLists.txt and add its +# targets to the current scope. We are placing Google Test's binary +# directory in a subdirectory of our own as VC compilation may break +# if they are the same (the default). +add_subdirectory("${gtest_dir}" "${gmock_BINARY_DIR}/gtest") + +# Although Google Test's CMakeLists.txt calls this function, the +# changes there don't affect the current scope. Therefore we have to +# call it again here. +config_compiler_and_linker() # from ${gtest_dir}/cmake/internal_utils.cmake + +# Adds Google Mock's and Google Test's header directories to the search path. +include_directories("${gmock_SOURCE_DIR}/include" + "${gmock_SOURCE_DIR}" + "${gtest_SOURCE_DIR}/include" + # This directory is needed to build directly from Google + # Test sources. + "${gtest_SOURCE_DIR}") + +######################################################################## +# +# Defines the gmock & gmock_main libraries. User tests should link +# with one of them. + +# Google Mock libraries. We build them using more strict warnings than what +# are used for other targets, to ensure that Google Mock can be compiled by +# a user aggressive about warnings. +cxx_library(gmock + "${cxx_strict}" + "${gtest_dir}/src/gtest-all.cpp" + src/gmock-all.cpp) + +######################################################################## +# +# Google Mock's own tests. +# +# You can skip this section if you aren't interested in testing +# Google Mock itself. +# +# The tests are not built by default. To build them, set the +# gmock_build_tests option to ON. You can do it by running ccmake +# or specifying the -Dgmock_build_tests=ON flag when running cmake. + +if (gmock_build_tests) + # This must be set in the root directory for the tests to be run by + # 'make test' or ctest. + enable_testing() + + ############################################################ + # C++ tests built with standard compiler flags. + + cxx_test(gmock-actions_test gmock_main) + cxx_test(gmock-cardinalities_test gmock_main) + cxx_test(gmock_ex_test gmock_main) + cxx_test(gmock-generated-actions_test gmock_main) + cxx_test(gmock-generated-function-mockers_test gmock_main) + cxx_test(gmock-generated-internal-utils_test gmock_main) + cxx_test(gmock-generated-matchers_test gmock_main) + cxx_test(gmock-internal-utils_test gmock_main) + cxx_test(gmock-matchers_test gmock_main) + cxx_test(gmock-more-actions_test gmock_main) + cxx_test(gmock-nice-strict_test gmock_main) + cxx_test(gmock-port_test gmock_main) + cxx_test(gmock-spec-builders_test gmock_main) + cxx_test(gmock_link_test gmock_main test/gmock_link2_test.cc) + cxx_test(gmock_test gmock_main) + + if (CMAKE_USE_PTHREADS_INIT) + cxx_test(gmock_stress_test gmock) + endif() + + # gmock_all_test is commented to save time building and running tests. + # Uncomment if necessary. + # cxx_test(gmock_all_test gmock_main) + + ############################################################ + # C++ tests built with non-standard compiler flags. + + cxx_test_with_flags(gmock-more-actions_no_exception_test "${cxx_no_exception}" + gmock_main_no_exception test/gmock-more-actions_test.cc) + + cxx_test_with_flags(gmock_no_rtti_test "${cxx_no_rtti}" + gmock_main_no_rtti test/gmock-spec-builders_test.cc) + + cxx_test_with_flags(gmock_use_own_tuple_test "${cxx_use_own_tuple}" + gmock_main_use_own_tuple test/gmock-spec-builders_test.cc) + + # Tests that a binary can be built with Google Mock as a shared library. On + # some system configurations, it may not possible to run the binary without + # knowing more details about the system configurations. We do not try to run + # this binary. To get a more robust shared library coverage, configure with + # -DBUILD_SHARED_LIBS=ON. + cxx_executable_with_flags(shared_gmock_test_ "${cxx_default}" + shared_gmock_main test/gmock-spec-builders_test.cc) + set_target_properties(shared_gmock_test_ + PROPERTIES + COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") + + ############################################################ + # Python tests. + + py_test(gmock_leak_test) + + cxx_executable(gmock_output_test_ test gmock) + py_test(gmock_output_test) +endif() diff --git a/src/gmock/CONTRIBUTORS b/src/gmock/CONTRIBUTORS new file mode 100644 index 00000000000..6e9ae362b60 --- /dev/null +++ b/src/gmock/CONTRIBUTORS @@ -0,0 +1,40 @@ +# This file contains a list of people who've made non-trivial +# contribution to the Google C++ Mocking Framework project. People +# who commit code to the project are encouraged to add their names +# here. Please keep the list sorted by first names. + +Benoit Sigoure +Bogdan Piloca +Chandler Carruth +Dave MacLachlan +David Anderson +Dean Sturtevant +Gene Volovich +Hal Burch +Jeffrey Yasskin +Jim Keller +Joe Walnes +Jon Wray +Keir Mierle +Keith Ray +Kostya Serebryany +Lev Makhlis +Manuel Klimek +Mario Tanev +Mark Paskin +Markus Heule +Matthew Simmons +Mike Bland +Neal Norwitz +Nermin Ozkiranartli +Owen Carlsen +Paneendra Ba +Paul Menage +Piotr Kaminski +Russ Rufer +Sverre Sundsdal +Takeshi Yoshino +Vadim Berman +Vlad Losev +Wolfgang Klier +Zhanyong Wan diff --git a/src/gmock/LICENSE b/src/gmock/LICENSE new file mode 100644 index 00000000000..1941a11f8ce --- /dev/null +++ b/src/gmock/LICENSE @@ -0,0 +1,28 @@ +Copyright 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/gmock/README b/src/gmock/README new file mode 100644 index 00000000000..ed2e69bac30 --- /dev/null +++ b/src/gmock/README @@ -0,0 +1,369 @@ +Google C++ Mocking Framework +============================ + +http://code.google.com/p/googlemock/ + +Overview +-------- + +Google's framework for writing and using C++ mock classes on a variety +of platforms (Linux, Mac OS X, Windows, Windows CE, Symbian, etc). +Inspired by jMock, EasyMock, and Hamcrest, and designed with C++'s +specifics in mind, it can help you derive better designs of your +system and write better tests. + +Google Mock: + +- provides a declarative syntax for defining mocks, +- can easily define partial (hybrid) mocks, which are a cross of real + and mock objects, +- handles functions of arbitrary types and overloaded functions, +- comes with a rich set of matchers for validating function arguments, +- uses an intuitive syntax for controlling the behavior of a mock, +- does automatic verification of expectations (no record-and-replay + needed), +- allows arbitrary (partial) ordering constraints on + function calls to be expressed, +- lets a user extend it by defining new matchers and actions. +- does not use exceptions, and +- is easy to learn and use. + +Please see the project page above for more information as well as the +mailing list for questions, discussions, and development. There is +also an IRC channel on OFTC (irc.oftc.net) #gtest available. Please +join us! + +Please note that code under scripts/generator/ is from the cppclean +project (http://code.google.com/p/cppclean/) and under the Apache +License, which is different from Google Mock's license. + +Requirements for End Users +-------------------------- + +Google Mock is implemented on top of the Google Test C++ testing +framework (http://code.google.com/p/googletest/), and includes the +latter as part of the SVN repositary and distribution package. You +must use the bundled version of Google Test when using Google Mock, or +you may get compiler/linker errors. + +You can also easily configure Google Mock to work with another testing +framework of your choice; although it will still need Google Test as +an internal dependency. Please read +http://code.google.com/p/googlemock/wiki/ForDummies#Using_Google_Mock_with_Any_Testing_Framework +for how to do it. + +Google Mock depends on advanced C++ features and thus requires a more +modern compiler. The following are needed to use Google Mock: + +### Linux Requirements ### + +These are the base requirements to build and use Google Mock from a source +package (as described below): + + * GNU-compatible Make or "gmake" + * POSIX-standard shell + * POSIX(-2) Regular Expressions (regex.h) + * C++98-standard-compliant compiler (e.g. GCC 3.4 or newer) + +### Windows Requirements ### + + * Microsoft Visual C++ 8.0 SP1 or newer + +### Mac OS X Requirements ### + + * Mac OS X 10.4 Tiger or newer + * Developer Tools Installed + +Requirements for Contributors +----------------------------- + +We welcome patches. If you plan to contribute a patch, you need to +build Google Mock and its own tests from an SVN checkout (described +below), which has further requirements: + + * Automake version 1.9 or newer + * Autoconf version 2.59 or newer + * Libtool / Libtoolize + * Python version 2.3 or newer (for running some of the tests and + re-generating certain source files from templates) + +Getting the Source +------------------ + +There are two primary ways of getting Google Mock's source code: you +can download a stable source release in your preferred archive format, +or directly check out the source from our Subversion (SVN) repositary. +The SVN checkout requires a few extra steps and some extra software +packages on your system, but lets you track development and make +patches much more easily, so we highly encourage it. + +### Source Package ### + +Google Mock is released in versioned source packages which can be +downloaded from the download page [1]. Several different archive +formats are provided, but the only difference is the tools needed to +extract their contents, and the size of the resulting file. Download +whichever you are most comfortable with. + + [1] http://code.google.com/p/googlemock/downloads/list + +Once downloaded expand the archive using whichever tools you prefer +for that type. This will always result in a new directory with the +name "gmock-X.Y.Z" which contains all of the source code. Here are +some examples on Linux: + + tar -xvzf gmock-X.Y.Z.tar.gz + tar -xvjf gmock-X.Y.Z.tar.bz2 + unzip gmock-X.Y.Z.zip + +### SVN Checkout ### + +To check out the main branch (also known as the "trunk") of Google +Mock, run the following Subversion command: + + svn checkout http://googlemock.googlecode.com/svn/trunk/ gmock-svn + +If you are using a *nix system and plan to use the GNU Autotools build +system to build Google Mock (described below), you'll need to +configure it now. Otherwise you are done with getting the source +files. + +To prepare the Autotools build system, enter the target directory of +the checkout command you used ('gmock-svn') and proceed with the +following command: + + autoreconf -fvi + +Once you have completed this step, you are ready to build the library. +Note that you should only need to complete this step once. The +subsequent 'make' invocations will automatically re-generate the bits +of the build system that need to be changed. + +If your system uses older versions of the autotools, the above command +will fail. You may need to explicitly specify a version to use. For +instance, if you have both GNU Automake 1.4 and 1.9 installed and +'automake' would invoke the 1.4, use instead: + + AUTOMAKE=automake-1.9 ACLOCAL=aclocal-1.9 autoreconf -fvi + +Make sure you're using the same version of automake and aclocal. + +Setting up the Build +-------------------- + +To build Google Mock and your tests that use it, you need to tell your +build system where to find its headers and source files. The exact +way to do it depends on which build system you use, and is usually +straightforward. + +### Generic Build Instructions ### + +This section shows how you can integrate Google Mock into your +existing build system. + +Suppose you put Google Mock in directory ${GMOCK_DIR} and Google Test +in ${GTEST_DIR} (the latter is ${GMOCK_DIR}/gtest by default). To +build Google Mock, create a library build target (or a project as +called by Visual Studio and Xcode) to compile + + ${GTEST_DIR}/src/gtest-all.cc and ${GMOCK_DIR}/src/gmock-all.cc + +with + + ${GTEST_DIR}/include and ${GMOCK_DIR}/include + +in the system header search path, and + + ${GTEST_DIR} and ${GMOCK_DIR} + +in the normal header search path. Assuming a Linux-like system and gcc, +something like the following will do: + + g++ -isystem ${GTEST_DIR}/include -I${GTEST_DIR} \ + -isystem ${GMOCK_DIR}/include -I${GMOCK_DIR} \ + -pthread -c ${GTEST_DIR}/src/gtest-all.cc + g++ -isystem ${GTEST_DIR}/include -I${GTEST_DIR} \ + -isystem ${GMOCK_DIR}/include -I${GMOCK_DIR} \ + -pthread -c ${GMOCK_DIR}/src/gmock-all.cc + ar -rv libgmock.a gtest-all.o gmock-all.o + +(We need -pthread as Google Test and Google Mock use threads.) + +Next, you should compile your test source file with +${GTEST_DIR}/include and ${GMOCK_DIR}/include in the header search +path, and link it with gmock and any other necessary libraries: + + g++ -isystem ${GTEST_DIR}/include -isystem ${GMOCK_DIR}/include \ + -pthread path/to/your_test.cc libgmock.a -o your_test + +As an example, the make/ directory contains a Makefile that you can +use to build Google Mock on systems where GNU make is available +(e.g. Linux, Mac OS X, and Cygwin). It doesn't try to build Google +Mock's own tests. Instead, it just builds the Google Mock library and +a sample test. You can use it as a starting point for your own build +script. + +If the default settings are correct for your environment, the +following commands should succeed: + + cd ${GMOCK_DIR}/make + make + ./gmock_test + +If you see errors, try to tweak the contents of make/Makefile to make +them go away. There are instructions in make/Makefile on how to do +it. + +### Windows ### + +The msvc/2005 directory contains VC++ 2005 projects and the msvc/2010 +directory contains VC++ 2010 projects for building Google Mock and +selected tests. + +Change to the appropriate directory and run "msbuild gmock.sln" to +build the library and tests (or open the gmock.sln in the MSVC IDE). +If you want to create your own project to use with Google Mock, you'll +have to configure it to use the gmock_config propety sheet. For that: + + * Open the Property Manager window (View | Other Windows | Property Manager) + * Right-click on your project and select "Add Existing Property Sheet..." + * Navigate to gmock_config.vsprops or gmock_config.props and select it. + * In Project Properties | Configuration Properties | General | Additional + Include Directories, type /include. + +Tweaking Google Mock +-------------------- + +Google Mock can be used in diverse environments. The default +configuration may not work (or may not work well) out of the box in +some environments. However, you can easily tweak Google Mock by +defining control macros on the compiler command line. Generally, +these macros are named like GTEST_XYZ and you define them to either 1 +or 0 to enable or disable a certain feature. + +We list the most frequently used macros below. For a complete list, +see file ${GTEST_DIR}/include/gtest/internal/gtest-port.h. + +### Choosing a TR1 Tuple Library ### + +Google Mock uses the C++ Technical Report 1 (TR1) tuple library +heavily. Unfortunately TR1 tuple is not yet widely available with all +compilers. The good news is that Google Test 1.4.0+ implements a +subset of TR1 tuple that's enough for Google Mock's need. Google Mock +will automatically use that implementation when the compiler doesn't +provide TR1 tuple. + +Usually you don't need to care about which tuple library Google Test +and Google Mock use. However, if your project already uses TR1 tuple, +you need to tell Google Test and Google Mock to use the same TR1 tuple +library the rest of your project uses, or the two tuple +implementations will clash. To do that, add + + -DGTEST_USE_OWN_TR1_TUPLE=0 + +to the compiler flags while compiling Google Test, Google Mock, and +your tests. If you want to force Google Test and Google Mock to use +their own tuple library, just add + + -DGTEST_USE_OWN_TR1_TUPLE=1 + +to the compiler flags instead. + +If you want to use Boost's TR1 tuple library with Google Mock, please +refer to the Boost website (http://www.boost.org/) for how to obtain +it and set it up. + +### As a Shared Library (DLL) ### + +Google Mock is compact, so most users can build and link it as a static +library for the simplicity. Google Mock can be used as a DLL, but the +same DLL must contain Google Test as well. See Google Test's README +file for instructions on how to set up necessary compiler settings. + +### Tweaking Google Mock ### + +Most of Google Test's control macros apply to Google Mock as well. +Please see file ${GTEST_DIR}/README for how to tweak them. + +Upgrading from an Earlier Version +--------------------------------- + +We strive to keep Google Mock releases backward compatible. +Sometimes, though, we have to make some breaking changes for the +users' long-term benefits. This section describes what you'll need to +do if you are upgrading from an earlier version of Google Mock. + +### Upgrading from 1.1.0 or Earlier ### + +You may need to explicitly enable or disable Google Test's own TR1 +tuple library. See the instructions in section "Choosing a TR1 Tuple +Library". + +### Upgrading from 1.4.0 or Earlier ### + +On platforms where the pthread library is available, Google Test and +Google Mock use it in order to be thread-safe. For this to work, you +may need to tweak your compiler and/or linker flags. Please see the +"Multi-threaded Tests" section in file ${GTEST_DIR}/README for what +you may need to do. + +If you have custom matchers defined using MatcherInterface or +MakePolymorphicMatcher(), you'll need to update their definitions to +use the new matcher API [2]. Matchers defined using MATCHER() or +MATCHER_P*() aren't affected. + + [2] http://code.google.com/p/googlemock/wiki/CookBook#Writing_New_Monomorphic_Matchers, + http://code.google.com/p/googlemock/wiki/CookBook#Writing_New_Polymorphic_Matchers + +Developing Google Mock +---------------------- + +This section discusses how to make your own changes to Google Mock. + +### Testing Google Mock Itself ### + +To make sure your changes work as intended and don't break existing +functionality, you'll want to compile and run Google Test's own tests. +For that you'll need Autotools. First, make sure you have followed +the instructions in section "SVN Checkout" to configure Google Mock. +Then, create a build output directory and enter it. Next, + + ${GMOCK_DIR}/configure # Standard GNU configure script, --help for more info + +Once you have successfully configured Google Mock, the build steps are +standard for GNU-style OSS packages. + + make # Standard makefile following GNU conventions + make check # Builds and runs all tests - all should pass. + +Note that when building your project against Google Mock, you are building +against Google Test as well. There is no need to configure Google Test +separately. + +### Regenerating Source Files ### + +Some of Google Mock's source files are generated from templates (not +in the C++ sense) using a script. A template file is named FOO.pump, +where FOO is the name of the file it will generate. For example, the +file include/gmock/gmock-generated-actions.h.pump is used to generate +gmock-generated-actions.h in the same directory. + +Normally you don't need to worry about regenerating the source files, +unless you need to modify them. In that case, you should modify the +corresponding .pump files instead and run the 'pump' script (for Pump +is Useful for Meta Programming) to regenerate them. You can find +pump.py in the ${GTEST_DIR}/scripts/ directory. Read the Pump manual +[3] for how to use it. + + [3] http://code.google.com/p/googletest/wiki/PumpManual. + +### Contributing a Patch ### + +We welcome patches. Please read the Google Mock developer's guide [4] +for how you can contribute. In particular, make sure you have signed +the Contributor License Agreement, or we won't be able to accept the +patch. + + [4] http://code.google.com/p/googlemock/wiki/DevGuide + +Happy testing! diff --git a/src/gmock/configure.ac b/src/gmock/configure.ac new file mode 100644 index 00000000000..d268d5d7351 --- /dev/null +++ b/src/gmock/configure.ac @@ -0,0 +1,146 @@ +m4_include(gtest/m4/acx_pthread.m4) + +AC_INIT([Google C++ Mocking Framework], + [1.7.0], + [googlemock@googlegroups.com], + [gmock]) + +# Provide various options to initialize the Autoconf and configure processes. +AC_PREREQ([2.59]) +AC_CONFIG_SRCDIR([./LICENSE]) +AC_CONFIG_AUX_DIR([build-aux]) +AC_CONFIG_HEADERS([build-aux/config.h]) +AC_CONFIG_FILES([Makefile]) +AC_CONFIG_FILES([scripts/gmock-config], [chmod +x scripts/gmock-config]) + +# Initialize Automake with various options. We require at least v1.9, prevent +# pedantic complaints about package files, and enable various distribution +# targets. +AM_INIT_AUTOMAKE([1.9 dist-bzip2 dist-zip foreign subdir-objects]) + +# Check for programs used in building Google Test. +AC_PROG_CC +AC_PROG_CXX +AC_LANG([C++]) +AC_PROG_LIBTOOL + +# TODO(chandlerc@google.com): Currently we aren't running the Python tests +# against the interpreter detected by AM_PATH_PYTHON, and so we condition +# HAVE_PYTHON by requiring "python" to be in the PATH, and that interpreter's +# version to be >= 2.3. This will allow the scripts to use a "/usr/bin/env" +# hashbang. +PYTHON= # We *do not* allow the user to specify a python interpreter +AC_PATH_PROG([PYTHON],[python],[:]) +AS_IF([test "$PYTHON" != ":"], + [AM_PYTHON_CHECK_VERSION([$PYTHON],[2.3],[:],[PYTHON=":"])]) +AM_CONDITIONAL([HAVE_PYTHON],[test "$PYTHON" != ":"]) + +# TODO(chandlerc@google.com) Check for the necessary system headers. + +# Configure pthreads. +AC_ARG_WITH([pthreads], + [AS_HELP_STRING([--with-pthreads], + [use pthreads (default is yes)])], + [with_pthreads=$withval], + [with_pthreads=check]) + +have_pthreads=no +AS_IF([test "x$with_pthreads" != "xno"], + [ACX_PTHREAD( + [], + [AS_IF([test "x$with_pthreads" != "xcheck"], + [AC_MSG_FAILURE( + [--with-pthreads was specified, but unable to be used])])]) + have_pthreads="$acx_pthread_ok"]) +AM_CONDITIONAL([HAVE_PTHREADS],[test "x$have_pthreads" == "xyes"]) +AC_SUBST(PTHREAD_CFLAGS) +AC_SUBST(PTHREAD_LIBS) + +# GoogleMock currently has hard dependencies upon GoogleTest above and beyond +# running its own test suite, so we both provide our own version in +# a subdirectory and provide some logic to use a custom version or a system +# installed version. +AC_ARG_WITH([gtest], + [AS_HELP_STRING([--with-gtest], + [Specifies how to find the gtest package. If no + arguments are given, the default behavior, a + system installed gtest will be used if present, + and an internal version built otherwise. If a + path is provided, the gtest built or installed at + that prefix will be used.])], + [], + [with_gtest=yes]) +AC_ARG_ENABLE([external-gtest], + [AS_HELP_STRING([--disable-external-gtest], + [Disables any detection or use of a system + installed or user provided gtest. Any option to + '--with-gtest' is ignored. (Default is enabled.)]) + ], [], [enable_external_gtest=yes]) +AS_IF([test "x$with_gtest" == "xno"], + [AC_MSG_ERROR([dnl +Support for GoogleTest was explicitly disabled. Currently GoogleMock has a hard +dependency upon GoogleTest to build, please provide a version, or allow +GoogleMock to use any installed version and fall back upon its internal +version.])]) + +# Setup various GTEST variables. TODO(chandlerc@google.com): When these are +# used below, they should be used such that any pre-existing values always +# trump values we set them to, so that they can be used to selectively override +# details of the detection process. +AC_ARG_VAR([GTEST_CONFIG], + [The exact path of Google Test's 'gtest-config' script.]) +AC_ARG_VAR([GTEST_CPPFLAGS], + [C-like preprocessor flags for Google Test.]) +AC_ARG_VAR([GTEST_CXXFLAGS], + [C++ compile flags for Google Test.]) +AC_ARG_VAR([GTEST_LDFLAGS], + [Linker path and option flags for Google Test.]) +AC_ARG_VAR([GTEST_LIBS], + [Library linking flags for Google Test.]) +AC_ARG_VAR([GTEST_VERSION], + [The version of Google Test available.]) +HAVE_BUILT_GTEST="no" + +GTEST_MIN_VERSION="1.7.0" + +AS_IF([test "x${enable_external_gtest}" = "xyes"], + [# Begin filling in variables as we are able. + AS_IF([test "x${with_gtest}" != "xyes"], + [AS_IF([test -x "${with_gtest}/scripts/gtest-config"], + [GTEST_CONFIG="${with_gtest}/scripts/gtest-config"], + [GTEST_CONFIG="${with_gtest}/bin/gtest-config"]) + AS_IF([test -x "${GTEST_CONFIG}"], [], + [AC_MSG_ERROR([dnl +Unable to locate either a built or installed Google Test at '${with_gtest}'.]) + ])]) + + AS_IF([test -x "${GTEST_CONFIG}"], [], + [AC_PATH_PROG([GTEST_CONFIG], [gtest-config])]) + AS_IF([test -x "${GTEST_CONFIG}"], + [AC_MSG_CHECKING([for Google Test version >= ${GTEST_MIN_VERSION}]) + AS_IF([${GTEST_CONFIG} --min-version=${GTEST_MIN_VERSION}], + [AC_MSG_RESULT([yes]) + HAVE_BUILT_GTEST="yes"], + [AC_MSG_RESULT([no])])])]) + +AS_IF([test "x${HAVE_BUILT_GTEST}" = "xyes"], + [GTEST_CPPFLAGS=`${GTEST_CONFIG} --cppflags` + GTEST_CXXFLAGS=`${GTEST_CONFIG} --cxxflags` + GTEST_LDFLAGS=`${GTEST_CONFIG} --ldflags` + GTEST_LIBS=`${GTEST_CONFIG} --libs` + GTEST_VERSION=`${GTEST_CONFIG} --version`], + [AC_CONFIG_SUBDIRS([gtest]) + # GTEST_CONFIG needs to be executable both in a Makefile environmont and + # in a shell script environment, so resolve an absolute path for it here. + GTEST_CONFIG="`pwd -P`/gtest/scripts/gtest-config" + GTEST_CPPFLAGS='-I$(top_srcdir)/gtest/include' + GTEST_CXXFLAGS='-g' + GTEST_LDFLAGS='' + GTEST_LIBS='$(top_builddir)/gtest/lib/libgtest.la' + GTEST_VERSION="${GTEST_MIN_VERSION}"]) + +# TODO(chandlerc@google.com) Check the types, structures, and other compiler +# and architecture characteristics. + +# Output the generated files. No further autoconf macros may be used. +AC_OUTPUT diff --git a/src/gmock/include/gmock/gmock-actions.h b/src/gmock/include/gmock/gmock-actions.h new file mode 100644 index 00000000000..7e9708ec29e --- /dev/null +++ b/src/gmock/include/gmock/gmock-actions.h @@ -0,0 +1,1078 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements some commonly used actions. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ + +#ifndef _WIN32_WCE +# include +#endif + +#include +#include + +#include "gmock/internal/gmock-internal-utils.h" +#include "gmock/internal/gmock-port.h" + +namespace testing { + +// To implement an action Foo, define: +// 1. a class FooAction that implements the ActionInterface interface, and +// 2. a factory function that creates an Action object from a +// const FooAction*. +// +// The two-level delegation design follows that of Matcher, providing +// consistency for extension developers. It also eases ownership +// management as Action objects can now be copied like plain values. + +namespace internal { + +template +class ActionAdaptor; + +// BuiltInDefaultValue::Get() returns the "built-in" default +// value for type T, which is NULL when T is a pointer type, 0 when T +// is a numeric type, false when T is bool, or "" when T is string or +// std::string. For any other type T, this value is undefined and the +// function will abort the process. +template +class BuiltInDefaultValue { + public: + // This function returns true iff type T has a built-in default value. + static bool Exists() { return false; } + static T Get() { + Assert(false, __FILE__, __LINE__, + "Default action undefined for the function return type."); + return internal::Invalid(); + // The above statement will never be reached, but is required in + // order for this function to compile. + } +}; + +// This partial specialization says that we use the same built-in +// default value for T and const T. +template +class BuiltInDefaultValue { + public: + static bool Exists() { return BuiltInDefaultValue::Exists(); } + static T Get() { return BuiltInDefaultValue::Get(); } +}; + +// This partial specialization defines the default values for pointer +// types. +template +class BuiltInDefaultValue { + public: + static bool Exists() { return true; } + static T* Get() { return NULL; } +}; + +// The following specializations define the default values for +// specific types we care about. +#define GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(type, value) \ + template <> \ + class BuiltInDefaultValue { \ + public: \ + static bool Exists() { return true; } \ + static type Get() { return value; } \ + } + +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(void, ); // NOLINT +#if GTEST_HAS_GLOBAL_STRING +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::string, ""); +#endif // GTEST_HAS_GLOBAL_STRING +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(::std::string, ""); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(bool, false); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned char, '\0'); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed char, '\0'); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(char, '\0'); + +// There's no need for a default action for signed wchar_t, as that +// type is the same as wchar_t for gcc, and invalid for MSVC. +// +// There's also no need for a default action for unsigned wchar_t, as +// that type is the same as unsigned int for gcc, and invalid for +// MSVC. +#if GMOCK_WCHAR_T_IS_NATIVE_ +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(wchar_t, 0U); // NOLINT +#endif + +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned short, 0U); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed short, 0); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned int, 0U); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed int, 0); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(UInt64, 0); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(Int64, 0); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(float, 0); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(double, 0); + +#undef GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_ + +} // namespace internal + +// When an unexpected function call is encountered, Google Mock will +// let it return a default value if the user has specified one for its +// return type, or if the return type has a built-in default value; +// otherwise Google Mock won't know what value to return and will have +// to abort the process. +// +// The DefaultValue class allows a user to specify the +// default value for a type T that is both copyable and publicly +// destructible (i.e. anything that can be used as a function return +// type). The usage is: +// +// // Sets the default value for type T to be foo. +// DefaultValue::Set(foo); +template +class DefaultValue { + public: + // Sets the default value for type T; requires T to be + // copy-constructable and have a public destructor. + static void Set(T x) { + delete value_; + value_ = new T(x); + } + + // Unsets the default value for type T. + static void Clear() { + delete value_; + value_ = NULL; + } + + // Returns true iff the user has set the default value for type T. + static bool IsSet() { return value_ != NULL; } + + // Returns true if T has a default return value set by the user or there + // exists a built-in default value. + static bool Exists() { + return IsSet() || internal::BuiltInDefaultValue::Exists(); + } + + // Returns the default value for type T if the user has set one; + // otherwise returns the built-in default value if there is one; + // otherwise aborts the process. + static T Get() { + return value_ == NULL ? + internal::BuiltInDefaultValue::Get() : *value_; + } + + private: + static const T* value_; +}; + +// This partial specialization allows a user to set default values for +// reference types. +template +class DefaultValue { + public: + // Sets the default value for type T&. + static void Set(T& x) { // NOLINT + address_ = &x; + } + + // Unsets the default value for type T&. + static void Clear() { + address_ = NULL; + } + + // Returns true iff the user has set the default value for type T&. + static bool IsSet() { return address_ != NULL; } + + // Returns true if T has a default return value set by the user or there + // exists a built-in default value. + static bool Exists() { + return IsSet() || internal::BuiltInDefaultValue::Exists(); + } + + // Returns the default value for type T& if the user has set one; + // otherwise returns the built-in default value if there is one; + // otherwise aborts the process. + static T& Get() { + return address_ == NULL ? + internal::BuiltInDefaultValue::Get() : *address_; + } + + private: + static T* address_; +}; + +// This specialization allows DefaultValue::Get() to +// compile. +template <> +class DefaultValue { + public: + static bool Exists() { return true; } + static void Get() {} +}; + +// Points to the user-set default value for type T. +template +const T* DefaultValue::value_ = NULL; + +// Points to the user-set default value for type T&. +template +T* DefaultValue::address_ = NULL; + +// Implement this interface to define an action for function type F. +template +class ActionInterface { + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + ActionInterface() {} + virtual ~ActionInterface() {} + + // Performs the action. This method is not const, as in general an + // action can have side effects and be stateful. For example, a + // get-the-next-element-from-the-collection action will need to + // remember the current element. + virtual Result Perform(const ArgumentTuple& args) = 0; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionInterface); +}; + +// An Action is a copyable and IMMUTABLE (except by assignment) +// object that represents an action to be taken when a mock function +// of type F is called. The implementation of Action is just a +// linked_ptr to const ActionInterface, so copying is fairly cheap. +// Don't inherit from Action! +// +// You can view an object implementing ActionInterface as a +// concrete action (including its current state), and an Action +// object as a handle to it. +template +class Action { + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + // Constructs a null Action. Needed for storing Action objects in + // STL containers. + Action() : impl_(NULL) {} + + // Constructs an Action from its implementation. A NULL impl is + // used to represent the "do-default" action. + explicit Action(ActionInterface* impl) : impl_(impl) {} + + // Copy constructor. + Action(const Action& action) : impl_(action.impl_) {} + + // This constructor allows us to turn an Action object into an + // Action, as long as F's arguments can be implicitly converted + // to Func's and Func's return type can be implicitly converted to + // F's. + template + explicit Action(const Action& action); + + // Returns true iff this is the DoDefault() action. + bool IsDoDefault() const { return impl_.get() == NULL; } + + // Performs the action. Note that this method is const even though + // the corresponding method in ActionInterface is not. The reason + // is that a const Action means that it cannot be re-bound to + // another concrete action, not that the concrete action it binds to + // cannot change state. (Think of the difference between a const + // pointer and a pointer to const.) + Result Perform(const ArgumentTuple& args) const { + internal::Assert( + !IsDoDefault(), __FILE__, __LINE__, + "You are using DoDefault() inside a composite action like " + "DoAll() or WithArgs(). This is not supported for technical " + "reasons. Please instead spell out the default action, or " + "assign the default action to an Action variable and use " + "the variable in various places."); + return impl_->Perform(args); + } + + private: + template + friend class internal::ActionAdaptor; + + internal::linked_ptr > impl_; +}; + +// The PolymorphicAction class template makes it easy to implement a +// polymorphic action (i.e. an action that can be used in mock +// functions of than one type, e.g. Return()). +// +// To define a polymorphic action, a user first provides a COPYABLE +// implementation class that has a Perform() method template: +// +// class FooAction { +// public: +// template +// Result Perform(const ArgumentTuple& args) const { +// // Processes the arguments and returns a result, using +// // tr1::get(args) to get the N-th (0-based) argument in the tuple. +// } +// ... +// }; +// +// Then the user creates the polymorphic action using +// MakePolymorphicAction(object) where object has type FooAction. See +// the definition of Return(void) and SetArgumentPointee(value) for +// complete examples. +template +class PolymorphicAction { + public: + explicit PolymorphicAction(const Impl& impl) : impl_(impl) {} + + template + operator Action() const { + return Action(new MonomorphicImpl(impl_)); + } + + private: + template + class MonomorphicImpl : public ActionInterface { + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {} + + virtual Result Perform(const ArgumentTuple& args) { + return impl_.template Perform(args); + } + + private: + Impl impl_; + + GTEST_DISALLOW_ASSIGN_(MonomorphicImpl); + }; + + Impl impl_; + + GTEST_DISALLOW_ASSIGN_(PolymorphicAction); +}; + +// Creates an Action from its implementation and returns it. The +// created Action object owns the implementation. +template +Action MakeAction(ActionInterface* impl) { + return Action(impl); +} + +// Creates a polymorphic action from its implementation. This is +// easier to use than the PolymorphicAction constructor as it +// doesn't require you to explicitly write the template argument, e.g. +// +// MakePolymorphicAction(foo); +// vs +// PolymorphicAction(foo); +template +inline PolymorphicAction MakePolymorphicAction(const Impl& impl) { + return PolymorphicAction(impl); +} + +namespace internal { + +// Allows an Action object to pose as an Action, as long as F2 +// and F1 are compatible. +template +class ActionAdaptor : public ActionInterface { + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + explicit ActionAdaptor(const Action& from) : impl_(from.impl_) {} + + virtual Result Perform(const ArgumentTuple& args) { + return impl_->Perform(args); + } + + private: + const internal::linked_ptr > impl_; + + GTEST_DISALLOW_ASSIGN_(ActionAdaptor); +}; + +// Implements the polymorphic Return(x) action, which can be used in +// any function that returns the type of x, regardless of the argument +// types. +// +// Note: The value passed into Return must be converted into +// Function::Result when this action is cast to Action rather than +// when that action is performed. This is important in scenarios like +// +// MOCK_METHOD1(Method, T(U)); +// ... +// { +// Foo foo; +// X x(&foo); +// EXPECT_CALL(mock, Method(_)).WillOnce(Return(x)); +// } +// +// In the example above the variable x holds reference to foo which leaves +// scope and gets destroyed. If copying X just copies a reference to foo, +// that copy will be left with a hanging reference. If conversion to T +// makes a copy of foo, the above code is safe. To support that scenario, we +// need to make sure that the type conversion happens inside the EXPECT_CALL +// statement, and conversion of the result of Return to Action is a +// good place for that. +// +template +class ReturnAction { + public: + // Constructs a ReturnAction object from the value to be returned. + // 'value' is passed by value instead of by const reference in order + // to allow Return("string literal") to compile. + explicit ReturnAction(R value) : value_(value) {} + + // This template type conversion operator allows Return(x) to be + // used in ANY function that returns x's type. + template + operator Action() const { + // Assert statement belongs here because this is the best place to verify + // conditions on F. It produces the clearest error messages + // in most compilers. + // Impl really belongs in this scope as a local class but can't + // because MSVC produces duplicate symbols in different translation units + // in this case. Until MS fixes that bug we put Impl into the class scope + // and put the typedef both here (for use in assert statement) and + // in the Impl class. But both definitions must be the same. + typedef typename Function::Result Result; + GTEST_COMPILE_ASSERT_( + !internal::is_reference::value, + use_ReturnRef_instead_of_Return_to_return_a_reference); + return Action(new Impl(value_)); + } + + private: + // Implements the Return(x) action for a particular function type F. + template + class Impl : public ActionInterface { + public: + typedef typename Function::Result Result; + typedef typename Function::ArgumentTuple ArgumentTuple; + + // The implicit cast is necessary when Result has more than one + // single-argument constructor (e.g. Result is std::vector) and R + // has a type conversion operator template. In that case, value_(value) + // won't compile as the compiler doesn't known which constructor of + // Result to call. ImplicitCast_ forces the compiler to convert R to + // Result without considering explicit constructors, thus resolving the + // ambiguity. value_ is then initialized using its copy constructor. + explicit Impl(R value) + : value_(::testing::internal::ImplicitCast_(value)) {} + + virtual Result Perform(const ArgumentTuple&) { return value_; } + + private: + GTEST_COMPILE_ASSERT_(!internal::is_reference::value, + Result_cannot_be_a_reference_type); + Result value_; + + GTEST_DISALLOW_ASSIGN_(Impl); + }; + + R value_; + + GTEST_DISALLOW_ASSIGN_(ReturnAction); +}; + +// Implements the ReturnNull() action. +class ReturnNullAction { + public: + // Allows ReturnNull() to be used in any pointer-returning function. + template + static Result Perform(const ArgumentTuple&) { + GTEST_COMPILE_ASSERT_(internal::is_pointer::value, + ReturnNull_can_be_used_to_return_a_pointer_only); + return NULL; + } +}; + +// Implements the Return() action. +class ReturnVoidAction { + public: + // Allows Return() to be used in any void-returning function. + template + static void Perform(const ArgumentTuple&) { + CompileAssertTypesEqual(); + } +}; + +// Implements the polymorphic ReturnRef(x) action, which can be used +// in any function that returns a reference to the type of x, +// regardless of the argument types. +template +class ReturnRefAction { + public: + // Constructs a ReturnRefAction object from the reference to be returned. + explicit ReturnRefAction(T& ref) : ref_(ref) {} // NOLINT + + // This template type conversion operator allows ReturnRef(x) to be + // used in ANY function that returns a reference to x's type. + template + operator Action() const { + typedef typename Function::Result Result; + // Asserts that the function return type is a reference. This + // catches the user error of using ReturnRef(x) when Return(x) + // should be used, and generates some helpful error message. + GTEST_COMPILE_ASSERT_(internal::is_reference::value, + use_Return_instead_of_ReturnRef_to_return_a_value); + return Action(new Impl(ref_)); + } + + private: + // Implements the ReturnRef(x) action for a particular function type F. + template + class Impl : public ActionInterface { + public: + typedef typename Function::Result Result; + typedef typename Function::ArgumentTuple ArgumentTuple; + + explicit Impl(T& ref) : ref_(ref) {} // NOLINT + + virtual Result Perform(const ArgumentTuple&) { + return ref_; + } + + private: + T& ref_; + + GTEST_DISALLOW_ASSIGN_(Impl); + }; + + T& ref_; + + GTEST_DISALLOW_ASSIGN_(ReturnRefAction); +}; + +// Implements the polymorphic ReturnRefOfCopy(x) action, which can be +// used in any function that returns a reference to the type of x, +// regardless of the argument types. +template +class ReturnRefOfCopyAction { + public: + // Constructs a ReturnRefOfCopyAction object from the reference to + // be returned. + explicit ReturnRefOfCopyAction(const T& value) : value_(value) {} // NOLINT + + // This template type conversion operator allows ReturnRefOfCopy(x) to be + // used in ANY function that returns a reference to x's type. + template + operator Action() const { + typedef typename Function::Result Result; + // Asserts that the function return type is a reference. This + // catches the user error of using ReturnRefOfCopy(x) when Return(x) + // should be used, and generates some helpful error message. + GTEST_COMPILE_ASSERT_( + internal::is_reference::value, + use_Return_instead_of_ReturnRefOfCopy_to_return_a_value); + return Action(new Impl(value_)); + } + + private: + // Implements the ReturnRefOfCopy(x) action for a particular function type F. + template + class Impl : public ActionInterface { + public: + typedef typename Function::Result Result; + typedef typename Function::ArgumentTuple ArgumentTuple; + + explicit Impl(const T& value) : value_(value) {} // NOLINT + + virtual Result Perform(const ArgumentTuple&) { + return value_; + } + + private: + T value_; + + GTEST_DISALLOW_ASSIGN_(Impl); + }; + + const T value_; + + GTEST_DISALLOW_ASSIGN_(ReturnRefOfCopyAction); +}; + +// Implements the polymorphic DoDefault() action. +class DoDefaultAction { + public: + // This template type conversion operator allows DoDefault() to be + // used in any function. + template + operator Action() const { return Action(NULL); } +}; + +// Implements the Assign action to set a given pointer referent to a +// particular value. +template +class AssignAction { + public: + AssignAction(T1* ptr, T2 value) : ptr_(ptr), value_(value) {} + + template + void Perform(const ArgumentTuple& /* args */) const { + *ptr_ = value_; + } + + private: + T1* const ptr_; + const T2 value_; + + GTEST_DISALLOW_ASSIGN_(AssignAction); +}; + +#if !GTEST_OS_WINDOWS_MOBILE + +// Implements the SetErrnoAndReturn action to simulate return from +// various system calls and libc functions. +template +class SetErrnoAndReturnAction { + public: + SetErrnoAndReturnAction(int errno_value, T result) + : errno_(errno_value), + result_(result) {} + template + Result Perform(const ArgumentTuple& /* args */) const { + errno = errno_; + return result_; + } + + private: + const int errno_; + const T result_; + + GTEST_DISALLOW_ASSIGN_(SetErrnoAndReturnAction); +}; + +#endif // !GTEST_OS_WINDOWS_MOBILE + +// Implements the SetArgumentPointee(x) action for any function +// whose N-th argument (0-based) is a pointer to x's type. The +// template parameter kIsProto is true iff type A is ProtocolMessage, +// proto2::Message, or a sub-class of those. +template +class SetArgumentPointeeAction { + public: + // Constructs an action that sets the variable pointed to by the + // N-th function argument to 'value'. + explicit SetArgumentPointeeAction(const A& value) : value_(value) {} + + template + void Perform(const ArgumentTuple& args) const { + CompileAssertTypesEqual(); + *::std::tr1::get(args) = value_; + } + + private: + const A value_; + + GTEST_DISALLOW_ASSIGN_(SetArgumentPointeeAction); +}; + +template +class SetArgumentPointeeAction { + public: + // Constructs an action that sets the variable pointed to by the + // N-th function argument to 'proto'. Both ProtocolMessage and + // proto2::Message have the CopyFrom() method, so the same + // implementation works for both. + explicit SetArgumentPointeeAction(const Proto& proto) : proto_(new Proto) { + proto_->CopyFrom(proto); + } + + template + void Perform(const ArgumentTuple& args) const { + CompileAssertTypesEqual(); + ::std::tr1::get(args)->CopyFrom(*proto_); + } + + private: + const internal::linked_ptr proto_; + + GTEST_DISALLOW_ASSIGN_(SetArgumentPointeeAction); +}; + +// Implements the InvokeWithoutArgs(f) action. The template argument +// FunctionImpl is the implementation type of f, which can be either a +// function pointer or a functor. InvokeWithoutArgs(f) can be used as an +// Action as long as f's type is compatible with F (i.e. f can be +// assigned to a tr1::function). +template +class InvokeWithoutArgsAction { + public: + // The c'tor makes a copy of function_impl (either a function + // pointer or a functor). + explicit InvokeWithoutArgsAction(FunctionImpl function_impl) + : function_impl_(function_impl) {} + + // Allows InvokeWithoutArgs(f) to be used as any action whose type is + // compatible with f. + template + Result Perform(const ArgumentTuple&) { return function_impl_(); } + + private: + FunctionImpl function_impl_; + + GTEST_DISALLOW_ASSIGN_(InvokeWithoutArgsAction); +}; + +// Implements the InvokeWithoutArgs(object_ptr, &Class::Method) action. +template +class InvokeMethodWithoutArgsAction { + public: + InvokeMethodWithoutArgsAction(Class* obj_ptr, MethodPtr method_ptr) + : obj_ptr_(obj_ptr), method_ptr_(method_ptr) {} + + template + Result Perform(const ArgumentTuple&) const { + return (obj_ptr_->*method_ptr_)(); + } + + private: + Class* const obj_ptr_; + const MethodPtr method_ptr_; + + GTEST_DISALLOW_ASSIGN_(InvokeMethodWithoutArgsAction); +}; + +// Implements the IgnoreResult(action) action. +template +class IgnoreResultAction { + public: + explicit IgnoreResultAction(const A& action) : action_(action) {} + + template + operator Action() const { + // Assert statement belongs here because this is the best place to verify + // conditions on F. It produces the clearest error messages + // in most compilers. + // Impl really belongs in this scope as a local class but can't + // because MSVC produces duplicate symbols in different translation units + // in this case. Until MS fixes that bug we put Impl into the class scope + // and put the typedef both here (for use in assert statement) and + // in the Impl class. But both definitions must be the same. + typedef typename internal::Function::Result Result; + + // Asserts at compile time that F returns void. + CompileAssertTypesEqual(); + + return Action(new Impl(action_)); + } + + private: + template + class Impl : public ActionInterface { + public: + typedef typename internal::Function::Result Result; + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + explicit Impl(const A& action) : action_(action) {} + + virtual void Perform(const ArgumentTuple& args) { + // Performs the action and ignores its result. + action_.Perform(args); + } + + private: + // Type OriginalFunction is the same as F except that its return + // type is IgnoredValue. + typedef typename internal::Function::MakeResultIgnoredValue + OriginalFunction; + + const Action action_; + + GTEST_DISALLOW_ASSIGN_(Impl); + }; + + const A action_; + + GTEST_DISALLOW_ASSIGN_(IgnoreResultAction); +}; + +// A ReferenceWrapper object represents a reference to type T, +// which can be either const or not. It can be explicitly converted +// from, and implicitly converted to, a T&. Unlike a reference, +// ReferenceWrapper can be copied and can survive template type +// inference. This is used to support by-reference arguments in the +// InvokeArgument(...) action. The idea was from "reference +// wrappers" in tr1, which we don't have in our source tree yet. +template +class ReferenceWrapper { + public: + // Constructs a ReferenceWrapper object from a T&. + explicit ReferenceWrapper(T& l_value) : pointer_(&l_value) {} // NOLINT + + // Allows a ReferenceWrapper object to be implicitly converted to + // a T&. + operator T&() const { return *pointer_; } + private: + T* pointer_; +}; + +// Allows the expression ByRef(x) to be printed as a reference to x. +template +void PrintTo(const ReferenceWrapper& ref, ::std::ostream* os) { + T& value = ref; + UniversalPrinter::Print(value, os); +} + +// Does two actions sequentially. Used for implementing the DoAll(a1, +// a2, ...) action. +template +class DoBothAction { + public: + DoBothAction(Action1 action1, Action2 action2) + : action1_(action1), action2_(action2) {} + + // This template type conversion operator allows DoAll(a1, ..., a_n) + // to be used in ANY function of compatible type. + template + operator Action() const { + return Action(new Impl(action1_, action2_)); + } + + private: + // Implements the DoAll(...) action for a particular function type F. + template + class Impl : public ActionInterface { + public: + typedef typename Function::Result Result; + typedef typename Function::ArgumentTuple ArgumentTuple; + typedef typename Function::MakeResultVoid VoidResult; + + Impl(const Action& action1, const Action& action2) + : action1_(action1), action2_(action2) {} + + virtual Result Perform(const ArgumentTuple& args) { + action1_.Perform(args); + return action2_.Perform(args); + } + + private: + const Action action1_; + const Action action2_; + + GTEST_DISALLOW_ASSIGN_(Impl); + }; + + Action1 action1_; + Action2 action2_; + + GTEST_DISALLOW_ASSIGN_(DoBothAction); +}; + +} // namespace internal + +// An Unused object can be implicitly constructed from ANY value. +// This is handy when defining actions that ignore some or all of the +// mock function arguments. For example, given +// +// MOCK_METHOD3(Foo, double(const string& label, double x, double y)); +// MOCK_METHOD3(Bar, double(int index, double x, double y)); +// +// instead of +// +// double DistanceToOriginWithLabel(const string& label, double x, double y) { +// return sqrt(x*x + y*y); +// } +// double DistanceToOriginWithIndex(int index, double x, double y) { +// return sqrt(x*x + y*y); +// } +// ... +// EXEPCT_CALL(mock, Foo("abc", _, _)) +// .WillOnce(Invoke(DistanceToOriginWithLabel)); +// EXEPCT_CALL(mock, Bar(5, _, _)) +// .WillOnce(Invoke(DistanceToOriginWithIndex)); +// +// you could write +// +// // We can declare any uninteresting argument as Unused. +// double DistanceToOrigin(Unused, double x, double y) { +// return sqrt(x*x + y*y); +// } +// ... +// EXEPCT_CALL(mock, Foo("abc", _, _)).WillOnce(Invoke(DistanceToOrigin)); +// EXEPCT_CALL(mock, Bar(5, _, _)).WillOnce(Invoke(DistanceToOrigin)); +typedef internal::IgnoredValue Unused; + +// This constructor allows us to turn an Action object into an +// Action, as long as To's arguments can be implicitly converted +// to From's and From's return type cann be implicitly converted to +// To's. +template +template +Action::Action(const Action& from) + : impl_(new internal::ActionAdaptor(from)) {} + +// Creates an action that returns 'value'. 'value' is passed by value +// instead of const reference - otherwise Return("string literal") +// will trigger a compiler error about using array as initializer. +template +internal::ReturnAction Return(R value) { + return internal::ReturnAction(value); +} + +// Creates an action that returns NULL. +inline PolymorphicAction ReturnNull() { + return MakePolymorphicAction(internal::ReturnNullAction()); +} + +// Creates an action that returns from a void function. +inline PolymorphicAction Return() { + return MakePolymorphicAction(internal::ReturnVoidAction()); +} + +// Creates an action that returns the reference to a variable. +template +inline internal::ReturnRefAction ReturnRef(R& x) { // NOLINT + return internal::ReturnRefAction(x); +} + +// Creates an action that returns the reference to a copy of the +// argument. The copy is created when the action is constructed and +// lives as long as the action. +template +inline internal::ReturnRefOfCopyAction ReturnRefOfCopy(const R& x) { + return internal::ReturnRefOfCopyAction(x); +} + +// Creates an action that does the default action for the give mock function. +inline internal::DoDefaultAction DoDefault() { + return internal::DoDefaultAction(); +} + +// Creates an action that sets the variable pointed by the N-th +// (0-based) function argument to 'value'. +template +PolymorphicAction< + internal::SetArgumentPointeeAction< + N, T, internal::IsAProtocolMessage::value> > +SetArgPointee(const T& x) { + return MakePolymorphicAction(internal::SetArgumentPointeeAction< + N, T, internal::IsAProtocolMessage::value>(x)); +} + +#if !((GTEST_GCC_VER_ && GTEST_GCC_VER_ < 40000) || GTEST_OS_SYMBIAN) +// This overload allows SetArgPointee() to accept a string literal. +// GCC prior to the version 4.0 and Symbian C++ compiler cannot distinguish +// this overload from the templated version and emit a compile error. +template +PolymorphicAction< + internal::SetArgumentPointeeAction > +SetArgPointee(const char* p) { + return MakePolymorphicAction(internal::SetArgumentPointeeAction< + N, const char*, false>(p)); +} + +template +PolymorphicAction< + internal::SetArgumentPointeeAction > +SetArgPointee(const wchar_t* p) { + return MakePolymorphicAction(internal::SetArgumentPointeeAction< + N, const wchar_t*, false>(p)); +} +#endif + +// The following version is DEPRECATED. +template +PolymorphicAction< + internal::SetArgumentPointeeAction< + N, T, internal::IsAProtocolMessage::value> > +SetArgumentPointee(const T& x) { + return MakePolymorphicAction(internal::SetArgumentPointeeAction< + N, T, internal::IsAProtocolMessage::value>(x)); +} + +// Creates an action that sets a pointer referent to a given value. +template +PolymorphicAction > Assign(T1* ptr, T2 val) { + return MakePolymorphicAction(internal::AssignAction(ptr, val)); +} + +#if !GTEST_OS_WINDOWS_MOBILE + +// Creates an action that sets errno and returns the appropriate error. +template +PolymorphicAction > +SetErrnoAndReturn(int errval, T result) { + return MakePolymorphicAction( + internal::SetErrnoAndReturnAction(errval, result)); +} + +#endif // !GTEST_OS_WINDOWS_MOBILE + +// Various overloads for InvokeWithoutArgs(). + +// Creates an action that invokes 'function_impl' with no argument. +template +PolymorphicAction > +InvokeWithoutArgs(FunctionImpl function_impl) { + return MakePolymorphicAction( + internal::InvokeWithoutArgsAction(function_impl)); +} + +// Creates an action that invokes the given method on the given object +// with no argument. +template +PolymorphicAction > +InvokeWithoutArgs(Class* obj_ptr, MethodPtr method_ptr) { + return MakePolymorphicAction( + internal::InvokeMethodWithoutArgsAction( + obj_ptr, method_ptr)); +} + +// Creates an action that performs an_action and throws away its +// result. In other words, it changes the return type of an_action to +// void. an_action MUST NOT return void, or the code won't compile. +template +inline internal::IgnoreResultAction IgnoreResult(const A& an_action) { + return internal::IgnoreResultAction(an_action); +} + +// Creates a reference wrapper for the given L-value. If necessary, +// you can explicitly specify the type of the reference. For example, +// suppose 'derived' is an object of type Derived, ByRef(derived) +// would wrap a Derived&. If you want to wrap a const Base& instead, +// where Base is a base class of Derived, just write: +// +// ByRef(derived) +template +inline internal::ReferenceWrapper ByRef(T& l_value) { // NOLINT + return internal::ReferenceWrapper(l_value); +} + +} // namespace testing + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ diff --git a/src/gmock/include/gmock/gmock-cardinalities.h b/src/gmock/include/gmock/gmock-cardinalities.h new file mode 100644 index 00000000000..fc315f92ab5 --- /dev/null +++ b/src/gmock/include/gmock/gmock-cardinalities.h @@ -0,0 +1,147 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements some commonly used cardinalities. More +// cardinalities can be defined by the user implementing the +// CardinalityInterface interface if necessary. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ + +#include +#include // NOLINT +#include "gmock/internal/gmock-port.h" +#include "gtest/gtest.h" + +namespace testing { + +// To implement a cardinality Foo, define: +// 1. a class FooCardinality that implements the +// CardinalityInterface interface, and +// 2. a factory function that creates a Cardinality object from a +// const FooCardinality*. +// +// The two-level delegation design follows that of Matcher, providing +// consistency for extension developers. It also eases ownership +// management as Cardinality objects can now be copied like plain values. + +// The implementation of a cardinality. +class CardinalityInterface { + public: + virtual ~CardinalityInterface() {} + + // Conservative estimate on the lower/upper bound of the number of + // calls allowed. + virtual int ConservativeLowerBound() const { return 0; } + virtual int ConservativeUpperBound() const { return INT_MAX; } + + // Returns true iff call_count calls will satisfy this cardinality. + virtual bool IsSatisfiedByCallCount(int call_count) const = 0; + + // Returns true iff call_count calls will saturate this cardinality. + virtual bool IsSaturatedByCallCount(int call_count) const = 0; + + // Describes self to an ostream. + virtual void DescribeTo(::std::ostream* os) const = 0; +}; + +// A Cardinality is a copyable and IMMUTABLE (except by assignment) +// object that specifies how many times a mock function is expected to +// be called. The implementation of Cardinality is just a linked_ptr +// to const CardinalityInterface, so copying is fairly cheap. +// Don't inherit from Cardinality! +class GTEST_API_ Cardinality { + public: + // Constructs a null cardinality. Needed for storing Cardinality + // objects in STL containers. + Cardinality() {} + + // Constructs a Cardinality from its implementation. + explicit Cardinality(const CardinalityInterface* impl) : impl_(impl) {} + + // Conservative estimate on the lower/upper bound of the number of + // calls allowed. + int ConservativeLowerBound() const { return impl_->ConservativeLowerBound(); } + int ConservativeUpperBound() const { return impl_->ConservativeUpperBound(); } + + // Returns true iff call_count calls will satisfy this cardinality. + bool IsSatisfiedByCallCount(int call_count) const { + return impl_->IsSatisfiedByCallCount(call_count); + } + + // Returns true iff call_count calls will saturate this cardinality. + bool IsSaturatedByCallCount(int call_count) const { + return impl_->IsSaturatedByCallCount(call_count); + } + + // Returns true iff call_count calls will over-saturate this + // cardinality, i.e. exceed the maximum number of allowed calls. + bool IsOverSaturatedByCallCount(int call_count) const { + return impl_->IsSaturatedByCallCount(call_count) && + !impl_->IsSatisfiedByCallCount(call_count); + } + + // Describes self to an ostream + void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); } + + // Describes the given actual call count to an ostream. + static void DescribeActualCallCountTo(int actual_call_count, + ::std::ostream* os); + + private: + internal::linked_ptr impl_; +}; + +// Creates a cardinality that allows at least n calls. +GTEST_API_ Cardinality AtLeast(int n); + +// Creates a cardinality that allows at most n calls. +GTEST_API_ Cardinality AtMost(int n); + +// Creates a cardinality that allows any number of calls. +GTEST_API_ Cardinality AnyNumber(); + +// Creates a cardinality that allows between min and max calls. +GTEST_API_ Cardinality Between(int min, int max); + +// Creates a cardinality that allows exactly n calls. +GTEST_API_ Cardinality Exactly(int n); + +// Creates a cardinality from its implementation. +inline Cardinality MakeCardinality(const CardinalityInterface* c) { + return Cardinality(c); +} + +} // namespace testing + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ diff --git a/src/gmock/include/gmock/gmock-generated-actions.h b/src/gmock/include/gmock/gmock-generated-actions.h new file mode 100644 index 00000000000..2327393d6b1 --- /dev/null +++ b/src/gmock/include/gmock/gmock-generated-actions.h @@ -0,0 +1,2415 @@ +// This file was GENERATED by a script. DO NOT EDIT BY HAND!!! + +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements some commonly used variadic actions. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ + +#include "gmock/gmock-actions.h" +#include "gmock/internal/gmock-port.h" + +namespace testing { +namespace internal { + +// InvokeHelper knows how to unpack an N-tuple and invoke an N-ary +// function or method with the unpacked values, where F is a function +// type that takes N arguments. +template +class InvokeHelper; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple<>&) { + return function(); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple<>&) { + return (obj_ptr->*method_ptr)(); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args), get<2>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args), + get<3>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args), + get<3>(args), get<4>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args), get<5>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args), + get<3>(args), get<4>(args), get<5>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args), get<5>(args), get<6>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args), + get<3>(args), get<4>(args), get<5>(args), get<6>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args), get<5>(args), get<6>(args), get<7>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args), + get<3>(args), get<4>(args), get<5>(args), get<6>(args), get<7>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args), + get<3>(args), get<4>(args), get<5>(args), get<6>(args), get<7>(args), + get<8>(args)); + } +}; + +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return function(get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args), + get<9>(args)); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return (obj_ptr->*method_ptr)(get<0>(args), get<1>(args), get<2>(args), + get<3>(args), get<4>(args), get<5>(args), get<6>(args), get<7>(args), + get<8>(args), get<9>(args)); + } +}; + +// CallableHelper has static methods for invoking "callables", +// i.e. function pointers and functors. It uses overloading to +// provide a uniform interface for invoking different kinds of +// callables. In particular, you can use: +// +// CallableHelper::Call(callable, a1, a2, ..., an) +// +// to invoke an n-ary callable, where R is its return type. If an +// argument, say a2, needs to be passed by reference, you should write +// ByRef(a2) instead of a2 in the above expression. +template +class CallableHelper { + public: + // Calls a nullary callable. + template + static R Call(Function function) { return function(); } + + // Calls a unary callable. + + // We deliberately pass a1 by value instead of const reference here + // in case it is a C-string literal. If we had declared the + // parameter as 'const A1& a1' and write Call(function, "Hi"), the + // compiler would've thought A1 is 'char[3]', which causes trouble + // when you need to copy a value of type A1. By declaring the + // parameter as 'A1 a1', the compiler will correctly infer that A1 + // is 'const char*' when it sees Call(function, "Hi"). + // + // Since this function is defined inline, the compiler can get rid + // of the copying of the arguments. Therefore the performance won't + // be hurt. + template + static R Call(Function function, A1 a1) { return function(a1); } + + // Calls a binary callable. + template + static R Call(Function function, A1 a1, A2 a2) { + return function(a1, a2); + } + + // Calls a ternary callable. + template + static R Call(Function function, A1 a1, A2 a2, A3 a3) { + return function(a1, a2, a3); + } + + // Calls a 4-ary callable. + template + static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4) { + return function(a1, a2, a3, a4); + } + + // Calls a 5-ary callable. + template + static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) { + return function(a1, a2, a3, a4, a5); + } + + // Calls a 6-ary callable. + template + static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) { + return function(a1, a2, a3, a4, a5, a6); + } + + // Calls a 7-ary callable. + template + static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, + A7 a7) { + return function(a1, a2, a3, a4, a5, a6, a7); + } + + // Calls a 8-ary callable. + template + static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, + A7 a7, A8 a8) { + return function(a1, a2, a3, a4, a5, a6, a7, a8); + } + + // Calls a 9-ary callable. + template + static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, + A7 a7, A8 a8, A9 a9) { + return function(a1, a2, a3, a4, a5, a6, a7, a8, a9); + } + + // Calls a 10-ary callable. + template + static R Call(Function function, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, + A7 a7, A8 a8, A9 a9, A10 a10) { + return function(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10); + } +}; // class CallableHelper + +// An INTERNAL macro for extracting the type of a tuple field. It's +// subject to change without notice - DO NOT USE IN USER CODE! +#define GMOCK_FIELD_(Tuple, N) \ + typename ::std::tr1::tuple_element::type + +// SelectArgs::type is the +// type of an n-ary function whose i-th (1-based) argument type is the +// k{i}-th (0-based) field of ArgumentTuple, which must be a tuple +// type, and whose return type is Result. For example, +// SelectArgs, 0, 3>::type +// is int(bool, long). +// +// SelectArgs::Select(args) +// returns the selected fields (k1, k2, ..., k_n) of args as a tuple. +// For example, +// SelectArgs, 2, 0>::Select( +// ::std::tr1::make_tuple(true, 'a', 2.5)) +// returns ::std::tr1::tuple (2.5, true). +// +// The numbers in list k1, k2, ..., k_n must be >= 0, where n can be +// in the range [0, 10]. Duplicates are allowed and they don't have +// to be in an ascending or descending order. + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3), + GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5), + GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7), + GMOCK_FIELD_(ArgumentTuple, k8), GMOCK_FIELD_(ArgumentTuple, k9), + GMOCK_FIELD_(ArgumentTuple, k10)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args), get(args), + get(args), get(args), get(args), get(args), + get(args), get(args), get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& /* args */) { + using ::std::tr1::get; + return SelectedArgs(); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args), get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3), + GMOCK_FIELD_(ArgumentTuple, k4)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args), get(args), + get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3), + GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args), get(args), + get(args), get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3), + GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5), + GMOCK_FIELD_(ArgumentTuple, k6)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args), get(args), + get(args), get(args), get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3), + GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5), + GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args), get(args), + get(args), get(args), get(args), get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3), + GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5), + GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7), + GMOCK_FIELD_(ArgumentTuple, k8)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args), get(args), + get(args), get(args), get(args), get(args), + get(args)); + } +}; + +template +class SelectArgs { + public: + typedef Result type(GMOCK_FIELD_(ArgumentTuple, k1), + GMOCK_FIELD_(ArgumentTuple, k2), GMOCK_FIELD_(ArgumentTuple, k3), + GMOCK_FIELD_(ArgumentTuple, k4), GMOCK_FIELD_(ArgumentTuple, k5), + GMOCK_FIELD_(ArgumentTuple, k6), GMOCK_FIELD_(ArgumentTuple, k7), + GMOCK_FIELD_(ArgumentTuple, k8), GMOCK_FIELD_(ArgumentTuple, k9)); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs(get(args), get(args), get(args), + get(args), get(args), get(args), get(args), + get(args), get(args)); + } +}; + +#undef GMOCK_FIELD_ + +// Implements the WithArgs action. +template +class WithArgsAction { + public: + explicit WithArgsAction(const InnerAction& action) : action_(action) {} + + template + operator Action() const { return MakeAction(new Impl(action_)); } + + private: + template + class Impl : public ActionInterface { + public: + typedef typename Function::Result Result; + typedef typename Function::ArgumentTuple ArgumentTuple; + + explicit Impl(const InnerAction& action) : action_(action) {} + + virtual Result Perform(const ArgumentTuple& args) { + return action_.Perform(SelectArgs::Select(args)); + } + + private: + typedef typename SelectArgs::type InnerFunctionType; + + Action action_; + }; + + const InnerAction action_; + + GTEST_DISALLOW_ASSIGN_(WithArgsAction); +}; + +// A macro from the ACTION* family (defined later in this file) +// defines an action that can be used in a mock function. Typically, +// these actions only care about a subset of the arguments of the mock +// function. For example, if such an action only uses the second +// argument, it can be used in any mock function that takes >= 2 +// arguments where the type of the second argument is compatible. +// +// Therefore, the action implementation must be prepared to take more +// arguments than it needs. The ExcessiveArg type is used to +// represent those excessive arguments. In order to keep the compiler +// error messages tractable, we define it in the testing namespace +// instead of testing::internal. However, this is an INTERNAL TYPE +// and subject to change without notice, so a user MUST NOT USE THIS +// TYPE DIRECTLY. +struct ExcessiveArg {}; + +// A helper class needed for implementing the ACTION* macros. +template +class ActionHelper { + public: + static Result Perform(Impl* impl, const ::std::tr1::tuple<>& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl<>(args, ExcessiveArg(), + ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, get<0>(args), + ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, get<0>(args), + get<1>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, get<0>(args), + get<1>(args), get<2>(args), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, get<0>(args), + get<1>(args), get<2>(args), get<3>(args), ExcessiveArg(), + ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, + get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args), + ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, + get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args), + get<5>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, + get<0>(args), get<1>(args), get<2>(args), get<3>(args), get<4>(args), + get<5>(args), get<6>(args), ExcessiveArg(), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args), get<5>(args), get<6>(args), get<7>(args), ExcessiveArg(), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args), + ExcessiveArg()); + } + + template + static Result Perform(Impl* impl, const ::std::tr1::tuple& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl(args, get<0>(args), get<1>(args), get<2>(args), get<3>(args), + get<4>(args), get<5>(args), get<6>(args), get<7>(args), get<8>(args), + get<9>(args)); + } +}; + +} // namespace internal + +// Various overloads for Invoke(). + +// WithArgs(an_action) creates an action that passes +// the selected arguments of the mock function to an_action and +// performs it. It serves as an adaptor between actions with +// different argument lists. C++ doesn't support default arguments for +// function templates, so we have to overload it. +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +template +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + +// Creates an action that does actions a1, a2, ..., sequentially in +// each invocation. +template +inline internal::DoBothAction +DoAll(Action1 a1, Action2 a2) { + return internal::DoBothAction(a1, a2); +} + +template +inline internal::DoBothAction > +DoAll(Action1 a1, Action2 a2, Action3 a3) { + return DoAll(a1, DoAll(a2, a3)); +} + +template +inline internal::DoBothAction > > +DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4) { + return DoAll(a1, DoAll(a2, a3, a4)); +} + +template +inline internal::DoBothAction > > > +DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5) { + return DoAll(a1, DoAll(a2, a3, a4, a5)); +} + +template +inline internal::DoBothAction > > > > +DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6) { + return DoAll(a1, DoAll(a2, a3, a4, a5, a6)); +} + +template +inline internal::DoBothAction > > > > > +DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6, + Action7 a7) { + return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7)); +} + +template +inline internal::DoBothAction > > > > > > +DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6, + Action7 a7, Action8 a8) { + return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8)); +} + +template +inline internal::DoBothAction > > > > > > > +DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6, + Action7 a7, Action8 a8, Action9 a9) { + return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8, a9)); +} + +template +inline internal::DoBothAction > > > > > > > > +DoAll(Action1 a1, Action2 a2, Action3 a3, Action4 a4, Action5 a5, Action6 a6, + Action7 a7, Action8 a8, Action9 a9, Action10 a10) { + return DoAll(a1, DoAll(a2, a3, a4, a5, a6, a7, a8, a9, a10)); +} + +} // namespace testing + +// The ACTION* family of macros can be used in a namespace scope to +// define custom actions easily. The syntax: +// +// ACTION(name) { statements; } +// +// will define an action with the given name that executes the +// statements. The value returned by the statements will be used as +// the return value of the action. Inside the statements, you can +// refer to the K-th (0-based) argument of the mock function by +// 'argK', and refer to its type by 'argK_type'. For example: +// +// ACTION(IncrementArg1) { +// arg1_type temp = arg1; +// return ++(*temp); +// } +// +// allows you to write +// +// ...WillOnce(IncrementArg1()); +// +// You can also refer to the entire argument tuple and its type by +// 'args' and 'args_type', and refer to the mock function type and its +// return type by 'function_type' and 'return_type'. +// +// Note that you don't need to specify the types of the mock function +// arguments. However rest assured that your code is still type-safe: +// you'll get a compiler error if *arg1 doesn't support the ++ +// operator, or if the type of ++(*arg1) isn't compatible with the +// mock function's return type, for example. +// +// Sometimes you'll want to parameterize the action. For that you can use +// another macro: +// +// ACTION_P(name, param_name) { statements; } +// +// For example: +// +// ACTION_P(Add, n) { return arg0 + n; } +// +// will allow you to write: +// +// ...WillOnce(Add(5)); +// +// Note that you don't need to provide the type of the parameter +// either. If you need to reference the type of a parameter named +// 'foo', you can write 'foo_type'. For example, in the body of +// ACTION_P(Add, n) above, you can write 'n_type' to refer to the type +// of 'n'. +// +// We also provide ACTION_P2, ACTION_P3, ..., up to ACTION_P10 to support +// multi-parameter actions. +// +// For the purpose of typing, you can view +// +// ACTION_Pk(Foo, p1, ..., pk) { ... } +// +// as shorthand for +// +// template +// FooActionPk Foo(p1_type p1, ..., pk_type pk) { ... } +// +// In particular, you can provide the template type arguments +// explicitly when invoking Foo(), as in Foo(5, false); +// although usually you can rely on the compiler to infer the types +// for you automatically. You can assign the result of expression +// Foo(p1, ..., pk) to a variable of type FooActionPk. This can be useful when composing actions. +// +// You can also overload actions with different numbers of parameters: +// +// ACTION_P(Plus, a) { ... } +// ACTION_P2(Plus, a, b) { ... } +// +// While it's tempting to always use the ACTION* macros when defining +// a new action, you should also consider implementing ActionInterface +// or using MakePolymorphicAction() instead, especially if you need to +// use the action a lot. While these approaches require more work, +// they give you more control on the types of the mock function +// arguments and the action parameters, which in general leads to +// better compiler error messages that pay off in the long run. They +// also allow overloading actions based on parameter types (as opposed +// to just based on the number of parameters). +// +// CAVEAT: +// +// ACTION*() can only be used in a namespace scope. The reason is +// that C++ doesn't yet allow function-local types to be used to +// instantiate templates. The up-coming C++0x standard will fix this. +// Once that's done, we'll consider supporting using ACTION*() inside +// a function. +// +// MORE INFORMATION: +// +// To learn more about using these macros, please search for 'ACTION' +// on http://code.google.com/p/googlemock/wiki/CookBook. + +// An internal macro needed for implementing ACTION*(). +#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_\ + const args_type& args GTEST_ATTRIBUTE_UNUSED_, \ + arg0_type arg0 GTEST_ATTRIBUTE_UNUSED_, \ + arg1_type arg1 GTEST_ATTRIBUTE_UNUSED_, \ + arg2_type arg2 GTEST_ATTRIBUTE_UNUSED_, \ + arg3_type arg3 GTEST_ATTRIBUTE_UNUSED_, \ + arg4_type arg4 GTEST_ATTRIBUTE_UNUSED_, \ + arg5_type arg5 GTEST_ATTRIBUTE_UNUSED_, \ + arg6_type arg6 GTEST_ATTRIBUTE_UNUSED_, \ + arg7_type arg7 GTEST_ATTRIBUTE_UNUSED_, \ + arg8_type arg8 GTEST_ATTRIBUTE_UNUSED_, \ + arg9_type arg9 GTEST_ATTRIBUTE_UNUSED_ + +// Sometimes you want to give an action explicit template parameters +// that cannot be inferred from its value parameters. ACTION() and +// ACTION_P*() don't support that. ACTION_TEMPLATE() remedies that +// and can be viewed as an extension to ACTION() and ACTION_P*(). +// +// The syntax: +// +// ACTION_TEMPLATE(ActionName, +// HAS_m_TEMPLATE_PARAMS(kind1, name1, ..., kind_m, name_m), +// AND_n_VALUE_PARAMS(p1, ..., p_n)) { statements; } +// +// defines an action template that takes m explicit template +// parameters and n value parameters. name_i is the name of the i-th +// template parameter, and kind_i specifies whether it's a typename, +// an integral constant, or a template. p_i is the name of the i-th +// value parameter. +// +// Example: +// +// // DuplicateArg(output) converts the k-th argument of the mock +// // function to type T and copies it to *output. +// ACTION_TEMPLATE(DuplicateArg, +// HAS_2_TEMPLATE_PARAMS(int, k, typename, T), +// AND_1_VALUE_PARAMS(output)) { +// *output = T(std::tr1::get(args)); +// } +// ... +// int n; +// EXPECT_CALL(mock, Foo(_, _)) +// .WillOnce(DuplicateArg<1, unsigned char>(&n)); +// +// To create an instance of an action template, write: +// +// ActionName(v1, ..., v_n) +// +// where the ts are the template arguments and the vs are the value +// arguments. The value argument types are inferred by the compiler. +// If you want to explicitly specify the value argument types, you can +// provide additional template arguments: +// +// ActionName(v1, ..., v_n) +// +// where u_i is the desired type of v_i. +// +// ACTION_TEMPLATE and ACTION/ACTION_P* can be overloaded on the +// number of value parameters, but not on the number of template +// parameters. Without the restriction, the meaning of the following +// is unclear: +// +// OverloadedAction(x); +// +// Are we using a single-template-parameter action where 'bool' refers +// to the type of x, or are we using a two-template-parameter action +// where the compiler is asked to infer the type of x? +// +// Implementation notes: +// +// GMOCK_INTERNAL_*_HAS_m_TEMPLATE_PARAMS and +// GMOCK_INTERNAL_*_AND_n_VALUE_PARAMS are internal macros for +// implementing ACTION_TEMPLATE. The main trick we use is to create +// new macro invocations when expanding a macro. For example, we have +// +// #define ACTION_TEMPLATE(name, template_params, value_params) +// ... GMOCK_INTERNAL_DECL_##template_params ... +// +// which causes ACTION_TEMPLATE(..., HAS_1_TEMPLATE_PARAMS(typename, T), ...) +// to expand to +// +// ... GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(typename, T) ... +// +// Since GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS is a macro, the +// preprocessor will continue to expand it to +// +// ... typename T ... +// +// This technique conforms to the C++ standard and is portable. It +// allows us to implement action templates using O(N) code, where N is +// the maximum number of template/value parameters supported. Without +// using it, we'd have to devote O(N^2) amount of code to implement all +// combinations of m and n. + +// Declares the template parameters. +#define GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(kind0, name0) kind0 name0 +#define GMOCK_INTERNAL_DECL_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \ + name1) kind0 name0, kind1 name1 +#define GMOCK_INTERNAL_DECL_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2) kind0 name0, kind1 name1, kind2 name2 +#define GMOCK_INTERNAL_DECL_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3) kind0 name0, kind1 name1, kind2 name2, \ + kind3 name3 +#define GMOCK_INTERNAL_DECL_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4) kind0 name0, kind1 name1, \ + kind2 name2, kind3 name3, kind4 name4 +#define GMOCK_INTERNAL_DECL_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4, kind5, name5) kind0 name0, \ + kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5 +#define GMOCK_INTERNAL_DECL_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ + name6) kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \ + kind5 name5, kind6 name6 +#define GMOCK_INTERNAL_DECL_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ + kind7, name7) kind0 name0, kind1 name1, kind2 name2, kind3 name3, \ + kind4 name4, kind5 name5, kind6 name6, kind7 name7 +#define GMOCK_INTERNAL_DECL_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ + kind7, name7, kind8, name8) kind0 name0, kind1 name1, kind2 name2, \ + kind3 name3, kind4 name4, kind5 name5, kind6 name6, kind7 name7, \ + kind8 name8 +#define GMOCK_INTERNAL_DECL_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \ + name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ + name6, kind7, name7, kind8, name8, kind9, name9) kind0 name0, \ + kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5, \ + kind6 name6, kind7 name7, kind8 name8, kind9 name9 + +// Lists the template parameters. +#define GMOCK_INTERNAL_LIST_HAS_1_TEMPLATE_PARAMS(kind0, name0) name0 +#define GMOCK_INTERNAL_LIST_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \ + name1) name0, name1 +#define GMOCK_INTERNAL_LIST_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2) name0, name1, name2 +#define GMOCK_INTERNAL_LIST_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3) name0, name1, name2, name3 +#define GMOCK_INTERNAL_LIST_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4) name0, name1, name2, name3, \ + name4 +#define GMOCK_INTERNAL_LIST_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4, kind5, name5) name0, name1, \ + name2, name3, name4, name5 +#define GMOCK_INTERNAL_LIST_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ + name6) name0, name1, name2, name3, name4, name5, name6 +#define GMOCK_INTERNAL_LIST_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ + kind7, name7) name0, name1, name2, name3, name4, name5, name6, name7 +#define GMOCK_INTERNAL_LIST_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ + kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ + kind7, name7, kind8, name8) name0, name1, name2, name3, name4, name5, \ + name6, name7, name8 +#define GMOCK_INTERNAL_LIST_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \ + name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ + name6, kind7, name7, kind8, name8, kind9, name9) name0, name1, name2, \ + name3, name4, name5, name6, name7, name8, name9 + +// Declares the types of value parameters. +#define GMOCK_INTERNAL_DECL_TYPE_AND_0_VALUE_PARAMS() +#define GMOCK_INTERNAL_DECL_TYPE_AND_1_VALUE_PARAMS(p0) , typename p0##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_2_VALUE_PARAMS(p0, p1) , \ + typename p0##_type, typename p1##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , \ + typename p0##_type, typename p1##_type, typename p2##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \ + typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \ + typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \ + typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6) , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type, \ + typename p6##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6, p7) , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type, \ + typename p6##_type, typename p7##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6, p7, p8) , typename p0##_type, typename p1##_type, typename p2##_type, \ + typename p3##_type, typename p4##_type, typename p5##_type, \ + typename p6##_type, typename p7##_type, typename p8##_type +#define GMOCK_INTERNAL_DECL_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6, p7, p8, p9) , typename p0##_type, typename p1##_type, \ + typename p2##_type, typename p3##_type, typename p4##_type, \ + typename p5##_type, typename p6##_type, typename p7##_type, \ + typename p8##_type, typename p9##_type + +// Initializes the value parameters. +#define GMOCK_INTERNAL_INIT_AND_0_VALUE_PARAMS()\ + () +#define GMOCK_INTERNAL_INIT_AND_1_VALUE_PARAMS(p0)\ + (p0##_type gmock_p0) : p0(gmock_p0) +#define GMOCK_INTERNAL_INIT_AND_2_VALUE_PARAMS(p0, p1)\ + (p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), p1(gmock_p1) +#define GMOCK_INTERNAL_INIT_AND_3_VALUE_PARAMS(p0, p1, p2)\ + (p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) +#define GMOCK_INTERNAL_INIT_AND_4_VALUE_PARAMS(p0, p1, p2, p3)\ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3) +#define GMOCK_INTERNAL_INIT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)\ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), \ + p2(gmock_p2), p3(gmock_p3), p4(gmock_p4) +#define GMOCK_INTERNAL_INIT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)\ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) +#define GMOCK_INTERNAL_INIT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)\ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6) +#define GMOCK_INTERNAL_INIT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)\ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), \ + p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \ + p7(gmock_p7) +#define GMOCK_INTERNAL_INIT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8)\ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7, \ + p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \ + p8(gmock_p8) +#define GMOCK_INTERNAL_INIT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8, p9)\ + (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \ + p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \ + p8(gmock_p8), p9(gmock_p9) + +// Declares the fields for storing the value parameters. +#define GMOCK_INTERNAL_DEFN_AND_0_VALUE_PARAMS() +#define GMOCK_INTERNAL_DEFN_AND_1_VALUE_PARAMS(p0) p0##_type p0; +#define GMOCK_INTERNAL_DEFN_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0; \ + p1##_type p1; +#define GMOCK_INTERNAL_DEFN_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0; \ + p1##_type p1; p2##_type p2; +#define GMOCK_INTERNAL_DEFN_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0; \ + p1##_type p1; p2##_type p2; p3##_type p3; +#define GMOCK_INTERNAL_DEFN_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \ + p4) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; +#define GMOCK_INTERNAL_DEFN_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \ + p5) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ + p5##_type p5; +#define GMOCK_INTERNAL_DEFN_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ + p5##_type p5; p6##_type p6; +#define GMOCK_INTERNAL_DEFN_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ + p5##_type p5; p6##_type p6; p7##_type p7; +#define GMOCK_INTERNAL_DEFN_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \ + p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8; +#define GMOCK_INTERNAL_DEFN_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8, p9) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \ + p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8; \ + p9##_type p9; + +// Lists the value parameters. +#define GMOCK_INTERNAL_LIST_AND_0_VALUE_PARAMS() +#define GMOCK_INTERNAL_LIST_AND_1_VALUE_PARAMS(p0) p0 +#define GMOCK_INTERNAL_LIST_AND_2_VALUE_PARAMS(p0, p1) p0, p1 +#define GMOCK_INTERNAL_LIST_AND_3_VALUE_PARAMS(p0, p1, p2) p0, p1, p2 +#define GMOCK_INTERNAL_LIST_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0, p1, p2, p3 +#define GMOCK_INTERNAL_LIST_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) p0, p1, \ + p2, p3, p4 +#define GMOCK_INTERNAL_LIST_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) p0, \ + p1, p2, p3, p4, p5 +#define GMOCK_INTERNAL_LIST_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6) p0, p1, p2, p3, p4, p5, p6 +#define GMOCK_INTERNAL_LIST_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7) p0, p1, p2, p3, p4, p5, p6, p7 +#define GMOCK_INTERNAL_LIST_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8) p0, p1, p2, p3, p4, p5, p6, p7, p8 +#define GMOCK_INTERNAL_LIST_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8, p9) p0, p1, p2, p3, p4, p5, p6, p7, p8, p9 + +// Lists the value parameter types. +#define GMOCK_INTERNAL_LIST_TYPE_AND_0_VALUE_PARAMS() +#define GMOCK_INTERNAL_LIST_TYPE_AND_1_VALUE_PARAMS(p0) , p0##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_2_VALUE_PARAMS(p0, p1) , p0##_type, \ + p1##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , p0##_type, \ + p1##_type, p2##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \ + p0##_type, p1##_type, p2##_type, p3##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \ + p0##_type, p1##_type, p2##_type, p3##_type, p4##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \ + p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \ + p6##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6, p7) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ + p5##_type, p6##_type, p7##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6, p7, p8) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ + p5##_type, p6##_type, p7##_type, p8##_type +#define GMOCK_INTERNAL_LIST_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6, p7, p8, p9) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ + p5##_type, p6##_type, p7##_type, p8##_type, p9##_type + +// Declares the value parameters. +#define GMOCK_INTERNAL_DECL_AND_0_VALUE_PARAMS() +#define GMOCK_INTERNAL_DECL_AND_1_VALUE_PARAMS(p0) p0##_type p0 +#define GMOCK_INTERNAL_DECL_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0, \ + p1##_type p1 +#define GMOCK_INTERNAL_DECL_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0, \ + p1##_type p1, p2##_type p2 +#define GMOCK_INTERNAL_DECL_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0, \ + p1##_type p1, p2##_type p2, p3##_type p3 +#define GMOCK_INTERNAL_DECL_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \ + p4) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4 +#define GMOCK_INTERNAL_DECL_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \ + p5) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ + p5##_type p5 +#define GMOCK_INTERNAL_DECL_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ + p6) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ + p5##_type p5, p6##_type p6 +#define GMOCK_INTERNAL_DECL_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ + p5##_type p5, p6##_type p6, p7##_type p7 +#define GMOCK_INTERNAL_DECL_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ + p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8 +#define GMOCK_INTERNAL_DECL_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8, p9) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ + p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \ + p9##_type p9 + +// The suffix of the class template implementing the action template. +#define GMOCK_INTERNAL_COUNT_AND_0_VALUE_PARAMS() +#define GMOCK_INTERNAL_COUNT_AND_1_VALUE_PARAMS(p0) P +#define GMOCK_INTERNAL_COUNT_AND_2_VALUE_PARAMS(p0, p1) P2 +#define GMOCK_INTERNAL_COUNT_AND_3_VALUE_PARAMS(p0, p1, p2) P3 +#define GMOCK_INTERNAL_COUNT_AND_4_VALUE_PARAMS(p0, p1, p2, p3) P4 +#define GMOCK_INTERNAL_COUNT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) P5 +#define GMOCK_INTERNAL_COUNT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) P6 +#define GMOCK_INTERNAL_COUNT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) P7 +#define GMOCK_INTERNAL_COUNT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7) P8 +#define GMOCK_INTERNAL_COUNT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8) P9 +#define GMOCK_INTERNAL_COUNT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ + p7, p8, p9) P10 + +// The name of the class template implementing the action template. +#define GMOCK_ACTION_CLASS_(name, value_params)\ + GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params) + +#define ACTION_TEMPLATE(name, template_params, value_params)\ + template \ + class GMOCK_ACTION_CLASS_(name, value_params) {\ + public:\ + GMOCK_ACTION_CLASS_(name, value_params)\ + GMOCK_INTERNAL_INIT_##value_params {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + explicit gmock_Impl GMOCK_INTERNAL_INIT_##value_params {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + GMOCK_INTERNAL_DEFN_##value_params\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(\ + new gmock_Impl(GMOCK_INTERNAL_LIST_##value_params));\ + }\ + GMOCK_INTERNAL_DEFN_##value_params\ + private:\ + GTEST_DISALLOW_ASSIGN_(GMOCK_ACTION_CLASS_(name, value_params));\ + };\ + template \ + inline GMOCK_ACTION_CLASS_(name, value_params)<\ + GMOCK_INTERNAL_LIST_##template_params\ + GMOCK_INTERNAL_LIST_TYPE_##value_params> name(\ + GMOCK_INTERNAL_DECL_##value_params) {\ + return GMOCK_ACTION_CLASS_(name, value_params)<\ + GMOCK_INTERNAL_LIST_##template_params\ + GMOCK_INTERNAL_LIST_TYPE_##value_params>(\ + GMOCK_INTERNAL_LIST_##value_params);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + GMOCK_ACTION_CLASS_(name, value_params)<\ + GMOCK_INTERNAL_LIST_##template_params\ + GMOCK_INTERNAL_LIST_TYPE_##value_params>::gmock_Impl::\ + gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION(name)\ + class name##Action {\ + public:\ + name##Action() {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl() {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl());\ + }\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##Action);\ + };\ + inline name##Action name() {\ + return name##Action();\ + }\ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##Action::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P(name, p0)\ + template \ + class name##ActionP {\ + public:\ + name##ActionP(p0##_type gmock_p0) : p0(gmock_p0) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + explicit gmock_Impl(p0##_type gmock_p0) : p0(gmock_p0) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0));\ + }\ + p0##_type p0;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP);\ + };\ + template \ + inline name##ActionP name(p0##_type p0) {\ + return name##ActionP(p0);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P2(name, p0, p1)\ + template \ + class name##ActionP2 {\ + public:\ + name##ActionP2(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \ + p1(gmock_p1) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \ + p1(gmock_p1) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP2);\ + };\ + template \ + inline name##ActionP2 name(p0##_type p0, \ + p1##_type p1) {\ + return name##ActionP2(p0, p1);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP2::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P3(name, p0, p1, p2)\ + template \ + class name##ActionP3 {\ + public:\ + name##ActionP3(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1, p2));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP3);\ + };\ + template \ + inline name##ActionP3 name(p0##_type p0, \ + p1##_type p1, p2##_type p2) {\ + return name##ActionP3(p0, p1, p2);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP3::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P4(name, p0, p1, p2, p3)\ + template \ + class name##ActionP4 {\ + public:\ + name##ActionP4(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), \ + p2(gmock_p2), p3(gmock_p3) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1, p2, p3));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP4);\ + };\ + template \ + inline name##ActionP4 name(p0##_type p0, p1##_type p1, p2##_type p2, \ + p3##_type p3) {\ + return name##ActionP4(p0, p1, \ + p2, p3);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP4::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P5(name, p0, p1, p2, p3, p4)\ + template \ + class name##ActionP5 {\ + public:\ + name##ActionP5(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, \ + p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4) : p0(gmock_p0), \ + p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), p4(gmock_p4) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP5);\ + };\ + template \ + inline name##ActionP5 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ + p4##_type p4) {\ + return name##ActionP5(p0, p1, p2, p3, p4);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP5::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P6(name, p0, p1, p2, p3, p4, p5)\ + template \ + class name##ActionP6 {\ + public:\ + name##ActionP6(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP6);\ + };\ + template \ + inline name##ActionP6 name(p0##_type p0, p1##_type p1, p2##_type p2, \ + p3##_type p3, p4##_type p4, p5##_type p5) {\ + return name##ActionP6(p0, p1, p2, p3, p4, p5);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP6::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P7(name, p0, p1, p2, p3, p4, p5, p6)\ + template \ + class name##ActionP7 {\ + public:\ + name##ActionP7(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5, p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), \ + p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), \ + p6(gmock_p6) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5, \ + p6));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP7);\ + };\ + template \ + inline name##ActionP7 name(p0##_type p0, p1##_type p1, \ + p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \ + p6##_type p6) {\ + return name##ActionP7(p0, p1, p2, p3, p4, p5, p6);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP7::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P8(name, p0, p1, p2, p3, p4, p5, p6, p7)\ + template \ + class name##ActionP8 {\ + public:\ + name##ActionP8(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5, p6##_type gmock_p6, \ + p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \ + p7(gmock_p7) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7) : p0(gmock_p0), \ + p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), \ + p5(gmock_p5), p6(gmock_p6), p7(gmock_p7) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5, \ + p6, p7));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP8);\ + };\ + template \ + inline name##ActionP8 name(p0##_type p0, \ + p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \ + p6##_type p6, p7##_type p7) {\ + return name##ActionP8(p0, p1, p2, p3, p4, p5, \ + p6, p7);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP8::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8)\ + template \ + class name##ActionP9 {\ + public:\ + name##ActionP9(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \ + p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \ + p8(gmock_p8) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7, \ + p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \ + p7(gmock_p7), p8(gmock_p8) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + p8##_type p8;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5, \ + p6, p7, p8));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + p8##_type p8;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP9);\ + };\ + template \ + inline name##ActionP9 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ + p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, \ + p8##_type p8) {\ + return name##ActionP9(p0, p1, p2, \ + p3, p4, p5, p6, p7, p8);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP9::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)\ + template \ + class name##ActionP10 {\ + public:\ + name##ActionP10(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \ + p8##_type gmock_p8, p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), \ + p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \ + p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \ + p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \ + p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template \ + return_type gmock_PerformImpl(const args_type& args, arg0_type arg0, \ + arg1_type arg1, arg2_type arg2, arg3_type arg3, arg4_type arg4, \ + arg5_type arg5, arg6_type arg6, arg7_type arg7, arg8_type arg8, \ + arg9_type arg9) const;\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + p8##_type p8;\ + p9##_type p9;\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5, \ + p6, p7, p8, p9));\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + p8##_type p8;\ + p9##_type p9;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##ActionP10);\ + };\ + template \ + inline name##ActionP10 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ + p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \ + p9##_type p9) {\ + return name##ActionP10(p0, \ + p1, p2, p3, p4, p5, p6, p7, p8, p9);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + name##ActionP10::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +namespace testing { + +// The ACTION*() macros trigger warning C4100 (unreferenced formal +// parameter) in MSVC with -W4. Unfortunately they cannot be fixed in +// the macro definition, as the warnings are generated when the macro +// is expanded and macro expansion cannot contain #pragma. Therefore +// we suppress them here. +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable:4100) +#endif + +// Various overloads for InvokeArgument(). +// +// The InvokeArgument(a1, a2, ..., a_k) action invokes the N-th +// (0-based) argument, which must be a k-ary callable, of the mock +// function, with arguments a1, a2, ..., a_k. +// +// Notes: +// +// 1. The arguments are passed by value by default. If you need to +// pass an argument by reference, wrap it inside ByRef(). For +// example, +// +// InvokeArgument<1>(5, string("Hello"), ByRef(foo)) +// +// passes 5 and string("Hello") by value, and passes foo by +// reference. +// +// 2. If the callable takes an argument by reference but ByRef() is +// not used, it will receive the reference to a copy of the value, +// instead of the original value. For example, when the 0-th +// argument of the mock function takes a const string&, the action +// +// InvokeArgument<0>(string("Hello")) +// +// makes a copy of the temporary string("Hello") object and passes a +// reference of the copy, instead of the original temporary object, +// to the callable. This makes it easy for a user to define an +// InvokeArgument action from temporary values and have it performed +// later. + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_0_VALUE_PARAMS()) { + return internal::CallableHelper::Call( + ::std::tr1::get(args)); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_1_VALUE_PARAMS(p0)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_2_VALUE_PARAMS(p0, p1)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_3_VALUE_PARAMS(p0, p1, p2)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1, p2); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_4_VALUE_PARAMS(p0, p1, p2, p3)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1, p2, p3); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1, p2, p3, p4); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1, p2, p3, p4, p5); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1, p2, p3, p4, p5, p6); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1, p2, p3, p4, p5, p6, p7); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1, p2, p3, p4, p5, p6, p7, p8); +} + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) { + return internal::CallableHelper::Call( + ::std::tr1::get(args), p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); +} + +// Various overloads for ReturnNew(). +// +// The ReturnNew(a1, a2, ..., a_k) action returns a pointer to a new +// instance of type T, constructed on the heap with constructor arguments +// a1, a2, ..., and a_k. The caller assumes ownership of the returned value. +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_0_VALUE_PARAMS()) { + return new T(); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_1_VALUE_PARAMS(p0)) { + return new T(p0); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_2_VALUE_PARAMS(p0, p1)) { + return new T(p0, p1); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_3_VALUE_PARAMS(p0, p1, p2)) { + return new T(p0, p1, p2); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_4_VALUE_PARAMS(p0, p1, p2, p3)) { + return new T(p0, p1, p2, p3); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) { + return new T(p0, p1, p2, p3, p4); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) { + return new T(p0, p1, p2, p3, p4, p5); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) { + return new T(p0, p1, p2, p3, p4, p5, p6); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)) { + return new T(p0, p1, p2, p3, p4, p5, p6, p7); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8)) { + return new T(p0, p1, p2, p3, p4, p5, p6, p7, p8); +} + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) { + return new T(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); +} + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +} // namespace testing + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ diff --git a/src/gmock/include/gmock/gmock-generated-actions.h.pump b/src/gmock/include/gmock/gmock-generated-actions.h.pump new file mode 100644 index 00000000000..8e2b57352e5 --- /dev/null +++ b/src/gmock/include/gmock/gmock-generated-actions.h.pump @@ -0,0 +1,821 @@ +$$ -*- mode: c++; -*- +$$ This is a Pump source file. Please use Pump to convert it to +$$ gmock-generated-actions.h. +$$ +$var n = 10 $$ The maximum arity we support. +$$}} This meta comment fixes auto-indentation in editors. +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements some commonly used variadic actions. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ + +#include "gmock/gmock-actions.h" +#include "gmock/internal/gmock-port.h" + +namespace testing { +namespace internal { + +// InvokeHelper knows how to unpack an N-tuple and invoke an N-ary +// function or method with the unpacked values, where F is a function +// type that takes N arguments. +template +class InvokeHelper; + + +$range i 0..n +$for i [[ +$range j 1..i +$var types = [[$for j [[, typename A$j]]]] +$var as = [[$for j, [[A$j]]]] +$var args = [[$if i==0 [[]] $else [[ args]]]] +$var import = [[$if i==0 [[]] $else [[ + using ::std::tr1::get; + +]]]] +$var gets = [[$for j, [[get<$(j - 1)>(args)]]]] +template +class InvokeHelper > { + public: + template + static R Invoke(Function function, const ::std::tr1::tuple<$as>&$args) { +$import return function($gets); + } + + template + static R InvokeMethod(Class* obj_ptr, + MethodPtr method_ptr, + const ::std::tr1::tuple<$as>&$args) { +$import return (obj_ptr->*method_ptr)($gets); + } +}; + + +]] +// CallableHelper has static methods for invoking "callables", +// i.e. function pointers and functors. It uses overloading to +// provide a uniform interface for invoking different kinds of +// callables. In particular, you can use: +// +// CallableHelper::Call(callable, a1, a2, ..., an) +// +// to invoke an n-ary callable, where R is its return type. If an +// argument, say a2, needs to be passed by reference, you should write +// ByRef(a2) instead of a2 in the above expression. +template +class CallableHelper { + public: + // Calls a nullary callable. + template + static R Call(Function function) { return function(); } + + // Calls a unary callable. + + // We deliberately pass a1 by value instead of const reference here + // in case it is a C-string literal. If we had declared the + // parameter as 'const A1& a1' and write Call(function, "Hi"), the + // compiler would've thought A1 is 'char[3]', which causes trouble + // when you need to copy a value of type A1. By declaring the + // parameter as 'A1 a1', the compiler will correctly infer that A1 + // is 'const char*' when it sees Call(function, "Hi"). + // + // Since this function is defined inline, the compiler can get rid + // of the copying of the arguments. Therefore the performance won't + // be hurt. + template + static R Call(Function function, A1 a1) { return function(a1); } + +$range i 2..n +$for i +[[ +$var arity = [[$if i==2 [[binary]] $elif i==3 [[ternary]] $else [[$i-ary]]]] + + // Calls a $arity callable. + +$range j 1..i +$var typename_As = [[$for j, [[typename A$j]]]] +$var Aas = [[$for j, [[A$j a$j]]]] +$var as = [[$for j, [[a$j]]]] +$var typename_Ts = [[$for j, [[typename T$j]]]] +$var Ts = [[$for j, [[T$j]]]] + template + static R Call(Function function, $Aas) { + return function($as); + } + +]] +}; // class CallableHelper + +// An INTERNAL macro for extracting the type of a tuple field. It's +// subject to change without notice - DO NOT USE IN USER CODE! +#define GMOCK_FIELD_(Tuple, N) \ + typename ::std::tr1::tuple_element::type + +$range i 1..n + +// SelectArgs::type is the +// type of an n-ary function whose i-th (1-based) argument type is the +// k{i}-th (0-based) field of ArgumentTuple, which must be a tuple +// type, and whose return type is Result. For example, +// SelectArgs, 0, 3>::type +// is int(bool, long). +// +// SelectArgs::Select(args) +// returns the selected fields (k1, k2, ..., k_n) of args as a tuple. +// For example, +// SelectArgs, 2, 0>::Select( +// ::std::tr1::make_tuple(true, 'a', 2.5)) +// returns ::std::tr1::tuple (2.5, true). +// +// The numbers in list k1, k2, ..., k_n must be >= 0, where n can be +// in the range [0, $n]. Duplicates are allowed and they don't have +// to be in an ascending or descending order. + +template +class SelectArgs { + public: + typedef Result type($for i, [[GMOCK_FIELD_(ArgumentTuple, k$i)]]); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& args) { + using ::std::tr1::get; + return SelectedArgs($for i, [[get(args)]]); + } +}; + + +$for i [[ +$range j 1..n +$range j1 1..i-1 +template +class SelectArgs { + public: + typedef Result type($for j1, [[GMOCK_FIELD_(ArgumentTuple, k$j1)]]); + typedef typename Function::ArgumentTuple SelectedArgs; + static SelectedArgs Select(const ArgumentTuple& [[]] +$if i == 1 [[/* args */]] $else [[args]]) { + using ::std::tr1::get; + return SelectedArgs($for j1, [[get(args)]]); + } +}; + + +]] +#undef GMOCK_FIELD_ + +$var ks = [[$for i, [[k$i]]]] + +// Implements the WithArgs action. +template +class WithArgsAction { + public: + explicit WithArgsAction(const InnerAction& action) : action_(action) {} + + template + operator Action() const { return MakeAction(new Impl(action_)); } + + private: + template + class Impl : public ActionInterface { + public: + typedef typename Function::Result Result; + typedef typename Function::ArgumentTuple ArgumentTuple; + + explicit Impl(const InnerAction& action) : action_(action) {} + + virtual Result Perform(const ArgumentTuple& args) { + return action_.Perform(SelectArgs::Select(args)); + } + + private: + typedef typename SelectArgs::type InnerFunctionType; + + Action action_; + }; + + const InnerAction action_; + + GTEST_DISALLOW_ASSIGN_(WithArgsAction); +}; + +// A macro from the ACTION* family (defined later in this file) +// defines an action that can be used in a mock function. Typically, +// these actions only care about a subset of the arguments of the mock +// function. For example, if such an action only uses the second +// argument, it can be used in any mock function that takes >= 2 +// arguments where the type of the second argument is compatible. +// +// Therefore, the action implementation must be prepared to take more +// arguments than it needs. The ExcessiveArg type is used to +// represent those excessive arguments. In order to keep the compiler +// error messages tractable, we define it in the testing namespace +// instead of testing::internal. However, this is an INTERNAL TYPE +// and subject to change without notice, so a user MUST NOT USE THIS +// TYPE DIRECTLY. +struct ExcessiveArg {}; + +// A helper class needed for implementing the ACTION* macros. +template +class ActionHelper { + public: +$range i 0..n +$for i + +[[ +$var template = [[$if i==0 [[]] $else [[ +$range j 0..i-1 + template <$for j, [[typename A$j]]> +]]]] +$range j 0..i-1 +$var As = [[$for j, [[A$j]]]] +$var as = [[$for j, [[get<$j>(args)]]]] +$range k 1..n-i +$var eas = [[$for k, [[ExcessiveArg()]]]] +$var arg_list = [[$if (i==0) | (i==n) [[$as$eas]] $else [[$as, $eas]]]] +$template + static Result Perform(Impl* impl, const ::std::tr1::tuple<$As>& args) { + using ::std::tr1::get; + return impl->template gmock_PerformImpl<$As>(args, $arg_list); + } + +]] +}; + +} // namespace internal + +// Various overloads for Invoke(). + +// WithArgs(an_action) creates an action that passes +// the selected arguments of the mock function to an_action and +// performs it. It serves as an adaptor between actions with +// different argument lists. C++ doesn't support default arguments for +// function templates, so we have to overload it. + +$range i 1..n +$for i [[ +$range j 1..i +template <$for j [[int k$j, ]]typename InnerAction> +inline internal::WithArgsAction +WithArgs(const InnerAction& action) { + return internal::WithArgsAction(action); +} + + +]] +// Creates an action that does actions a1, a2, ..., sequentially in +// each invocation. +$range i 2..n +$for i [[ +$range j 2..i +$var types = [[$for j, [[typename Action$j]]]] +$var Aas = [[$for j [[, Action$j a$j]]]] + +template +$range k 1..i-1 + +inline $for k [[internal::DoBothAction]] + +DoAll(Action1 a1$Aas) { +$if i==2 [[ + + return internal::DoBothAction(a1, a2); +]] $else [[ +$range j2 2..i + + return DoAll(a1, DoAll($for j2, [[a$j2]])); +]] + +} + +]] + +} // namespace testing + +// The ACTION* family of macros can be used in a namespace scope to +// define custom actions easily. The syntax: +// +// ACTION(name) { statements; } +// +// will define an action with the given name that executes the +// statements. The value returned by the statements will be used as +// the return value of the action. Inside the statements, you can +// refer to the K-th (0-based) argument of the mock function by +// 'argK', and refer to its type by 'argK_type'. For example: +// +// ACTION(IncrementArg1) { +// arg1_type temp = arg1; +// return ++(*temp); +// } +// +// allows you to write +// +// ...WillOnce(IncrementArg1()); +// +// You can also refer to the entire argument tuple and its type by +// 'args' and 'args_type', and refer to the mock function type and its +// return type by 'function_type' and 'return_type'. +// +// Note that you don't need to specify the types of the mock function +// arguments. However rest assured that your code is still type-safe: +// you'll get a compiler error if *arg1 doesn't support the ++ +// operator, or if the type of ++(*arg1) isn't compatible with the +// mock function's return type, for example. +// +// Sometimes you'll want to parameterize the action. For that you can use +// another macro: +// +// ACTION_P(name, param_name) { statements; } +// +// For example: +// +// ACTION_P(Add, n) { return arg0 + n; } +// +// will allow you to write: +// +// ...WillOnce(Add(5)); +// +// Note that you don't need to provide the type of the parameter +// either. If you need to reference the type of a parameter named +// 'foo', you can write 'foo_type'. For example, in the body of +// ACTION_P(Add, n) above, you can write 'n_type' to refer to the type +// of 'n'. +// +// We also provide ACTION_P2, ACTION_P3, ..., up to ACTION_P$n to support +// multi-parameter actions. +// +// For the purpose of typing, you can view +// +// ACTION_Pk(Foo, p1, ..., pk) { ... } +// +// as shorthand for +// +// template +// FooActionPk Foo(p1_type p1, ..., pk_type pk) { ... } +// +// In particular, you can provide the template type arguments +// explicitly when invoking Foo(), as in Foo(5, false); +// although usually you can rely on the compiler to infer the types +// for you automatically. You can assign the result of expression +// Foo(p1, ..., pk) to a variable of type FooActionPk. This can be useful when composing actions. +// +// You can also overload actions with different numbers of parameters: +// +// ACTION_P(Plus, a) { ... } +// ACTION_P2(Plus, a, b) { ... } +// +// While it's tempting to always use the ACTION* macros when defining +// a new action, you should also consider implementing ActionInterface +// or using MakePolymorphicAction() instead, especially if you need to +// use the action a lot. While these approaches require more work, +// they give you more control on the types of the mock function +// arguments and the action parameters, which in general leads to +// better compiler error messages that pay off in the long run. They +// also allow overloading actions based on parameter types (as opposed +// to just based on the number of parameters). +// +// CAVEAT: +// +// ACTION*() can only be used in a namespace scope. The reason is +// that C++ doesn't yet allow function-local types to be used to +// instantiate templates. The up-coming C++0x standard will fix this. +// Once that's done, we'll consider supporting using ACTION*() inside +// a function. +// +// MORE INFORMATION: +// +// To learn more about using these macros, please search for 'ACTION' +// on http://code.google.com/p/googlemock/wiki/CookBook. + +$range i 0..n +$range k 0..n-1 + +// An internal macro needed for implementing ACTION*(). +#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_\ + const args_type& args GTEST_ATTRIBUTE_UNUSED_ +$for k [[, \ + arg$k[[]]_type arg$k GTEST_ATTRIBUTE_UNUSED_]] + + +// Sometimes you want to give an action explicit template parameters +// that cannot be inferred from its value parameters. ACTION() and +// ACTION_P*() don't support that. ACTION_TEMPLATE() remedies that +// and can be viewed as an extension to ACTION() and ACTION_P*(). +// +// The syntax: +// +// ACTION_TEMPLATE(ActionName, +// HAS_m_TEMPLATE_PARAMS(kind1, name1, ..., kind_m, name_m), +// AND_n_VALUE_PARAMS(p1, ..., p_n)) { statements; } +// +// defines an action template that takes m explicit template +// parameters and n value parameters. name_i is the name of the i-th +// template parameter, and kind_i specifies whether it's a typename, +// an integral constant, or a template. p_i is the name of the i-th +// value parameter. +// +// Example: +// +// // DuplicateArg(output) converts the k-th argument of the mock +// // function to type T and copies it to *output. +// ACTION_TEMPLATE(DuplicateArg, +// HAS_2_TEMPLATE_PARAMS(int, k, typename, T), +// AND_1_VALUE_PARAMS(output)) { +// *output = T(std::tr1::get(args)); +// } +// ... +// int n; +// EXPECT_CALL(mock, Foo(_, _)) +// .WillOnce(DuplicateArg<1, unsigned char>(&n)); +// +// To create an instance of an action template, write: +// +// ActionName(v1, ..., v_n) +// +// where the ts are the template arguments and the vs are the value +// arguments. The value argument types are inferred by the compiler. +// If you want to explicitly specify the value argument types, you can +// provide additional template arguments: +// +// ActionName(v1, ..., v_n) +// +// where u_i is the desired type of v_i. +// +// ACTION_TEMPLATE and ACTION/ACTION_P* can be overloaded on the +// number of value parameters, but not on the number of template +// parameters. Without the restriction, the meaning of the following +// is unclear: +// +// OverloadedAction(x); +// +// Are we using a single-template-parameter action where 'bool' refers +// to the type of x, or are we using a two-template-parameter action +// where the compiler is asked to infer the type of x? +// +// Implementation notes: +// +// GMOCK_INTERNAL_*_HAS_m_TEMPLATE_PARAMS and +// GMOCK_INTERNAL_*_AND_n_VALUE_PARAMS are internal macros for +// implementing ACTION_TEMPLATE. The main trick we use is to create +// new macro invocations when expanding a macro. For example, we have +// +// #define ACTION_TEMPLATE(name, template_params, value_params) +// ... GMOCK_INTERNAL_DECL_##template_params ... +// +// which causes ACTION_TEMPLATE(..., HAS_1_TEMPLATE_PARAMS(typename, T), ...) +// to expand to +// +// ... GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(typename, T) ... +// +// Since GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS is a macro, the +// preprocessor will continue to expand it to +// +// ... typename T ... +// +// This technique conforms to the C++ standard and is portable. It +// allows us to implement action templates using O(N) code, where N is +// the maximum number of template/value parameters supported. Without +// using it, we'd have to devote O(N^2) amount of code to implement all +// combinations of m and n. + +// Declares the template parameters. + +$range j 1..n +$for j [[ +$range m 0..j-1 +#define GMOCK_INTERNAL_DECL_HAS_$j[[]] +_TEMPLATE_PARAMS($for m, [[kind$m, name$m]]) $for m, [[kind$m name$m]] + + +]] + +// Lists the template parameters. + +$for j [[ +$range m 0..j-1 +#define GMOCK_INTERNAL_LIST_HAS_$j[[]] +_TEMPLATE_PARAMS($for m, [[kind$m, name$m]]) $for m, [[name$m]] + + +]] + +// Declares the types of value parameters. + +$for i [[ +$range j 0..i-1 +#define GMOCK_INTERNAL_DECL_TYPE_AND_$i[[]] +_VALUE_PARAMS($for j, [[p$j]]) $for j [[, typename p$j##_type]] + + +]] + +// Initializes the value parameters. + +$for i [[ +$range j 0..i-1 +#define GMOCK_INTERNAL_INIT_AND_$i[[]]_VALUE_PARAMS($for j, [[p$j]])\ + ($for j, [[p$j##_type gmock_p$j]])$if i>0 [[ : ]]$for j, [[p$j(gmock_p$j)]] + + +]] + +// Declares the fields for storing the value parameters. + +$for i [[ +$range j 0..i-1 +#define GMOCK_INTERNAL_DEFN_AND_$i[[]] +_VALUE_PARAMS($for j, [[p$j]]) $for j [[p$j##_type p$j; ]] + + +]] + +// Lists the value parameters. + +$for i [[ +$range j 0..i-1 +#define GMOCK_INTERNAL_LIST_AND_$i[[]] +_VALUE_PARAMS($for j, [[p$j]]) $for j, [[p$j]] + + +]] + +// Lists the value parameter types. + +$for i [[ +$range j 0..i-1 +#define GMOCK_INTERNAL_LIST_TYPE_AND_$i[[]] +_VALUE_PARAMS($for j, [[p$j]]) $for j [[, p$j##_type]] + + +]] + +// Declares the value parameters. + +$for i [[ +$range j 0..i-1 +#define GMOCK_INTERNAL_DECL_AND_$i[[]]_VALUE_PARAMS($for j, [[p$j]]) [[]] +$for j, [[p$j##_type p$j]] + + +]] + +// The suffix of the class template implementing the action template. +$for i [[ + + +$range j 0..i-1 +#define GMOCK_INTERNAL_COUNT_AND_$i[[]]_VALUE_PARAMS($for j, [[p$j]]) [[]] +$if i==1 [[P]] $elif i>=2 [[P$i]] +]] + + +// The name of the class template implementing the action template. +#define GMOCK_ACTION_CLASS_(name, value_params)\ + GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params) + +$range k 0..n-1 + +#define ACTION_TEMPLATE(name, template_params, value_params)\ + template \ + class GMOCK_ACTION_CLASS_(name, value_params) {\ + public:\ + GMOCK_ACTION_CLASS_(name, value_params)\ + GMOCK_INTERNAL_INIT_##value_params {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + explicit gmock_Impl GMOCK_INTERNAL_INIT_##value_params {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template <$for k, [[typename arg$k[[]]_type]]>\ + return_type gmock_PerformImpl(const args_type& args[[]] +$for k [[, arg$k[[]]_type arg$k]]) const;\ + GMOCK_INTERNAL_DEFN_##value_params\ + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(\ + new gmock_Impl(GMOCK_INTERNAL_LIST_##value_params));\ + }\ + GMOCK_INTERNAL_DEFN_##value_params\ + private:\ + GTEST_DISALLOW_ASSIGN_(GMOCK_ACTION_CLASS_(name, value_params));\ + };\ + template \ + inline GMOCK_ACTION_CLASS_(name, value_params)<\ + GMOCK_INTERNAL_LIST_##template_params\ + GMOCK_INTERNAL_LIST_TYPE_##value_params> name(\ + GMOCK_INTERNAL_DECL_##value_params) {\ + return GMOCK_ACTION_CLASS_(name, value_params)<\ + GMOCK_INTERNAL_LIST_##template_params\ + GMOCK_INTERNAL_LIST_TYPE_##value_params>(\ + GMOCK_INTERNAL_LIST_##value_params);\ + }\ + template \ + template \ + template \ + typename ::testing::internal::Function::Result\ + GMOCK_ACTION_CLASS_(name, value_params)<\ + GMOCK_INTERNAL_LIST_##template_params\ + GMOCK_INTERNAL_LIST_TYPE_##value_params>::gmock_Impl::\ + gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +$for i + +[[ +$var template = [[$if i==0 [[]] $else [[ +$range j 0..i-1 + + template <$for j, [[typename p$j##_type]]>\ +]]]] +$var class_name = [[name##Action[[$if i==0 [[]] $elif i==1 [[P]] + $else [[P$i]]]]]] +$range j 0..i-1 +$var ctor_param_list = [[$for j, [[p$j##_type gmock_p$j]]]] +$var param_types_and_names = [[$for j, [[p$j##_type p$j]]]] +$var inits = [[$if i==0 [[]] $else [[ : $for j, [[p$j(gmock_p$j)]]]]]] +$var param_field_decls = [[$for j +[[ + + p$j##_type p$j;\ +]]]] +$var param_field_decls2 = [[$for j +[[ + + p$j##_type p$j;\ +]]]] +$var params = [[$for j, [[p$j]]]] +$var param_types = [[$if i==0 [[]] $else [[<$for j, [[p$j##_type]]>]]]] +$var typename_arg_types = [[$for k, [[typename arg$k[[]]_type]]]] +$var arg_types_and_names = [[$for k, [[arg$k[[]]_type arg$k]]]] +$var macro_name = [[$if i==0 [[ACTION]] $elif i==1 [[ACTION_P]] + $else [[ACTION_P$i]]]] + +#define $macro_name(name$for j [[, p$j]])\$template + class $class_name {\ + public:\ + $class_name($ctor_param_list)$inits {}\ + template \ + class gmock_Impl : public ::testing::ActionInterface {\ + public:\ + typedef F function_type;\ + typedef typename ::testing::internal::Function::Result return_type;\ + typedef typename ::testing::internal::Function::ArgumentTuple\ + args_type;\ + [[$if i==1 [[explicit ]]]]gmock_Impl($ctor_param_list)$inits {}\ + virtual return_type Perform(const args_type& args) {\ + return ::testing::internal::ActionHelper::\ + Perform(this, args);\ + }\ + template <$typename_arg_types>\ + return_type gmock_PerformImpl(const args_type& args, [[]] +$arg_types_and_names) const;\$param_field_decls + private:\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template operator ::testing::Action() const {\ + return ::testing::Action(new gmock_Impl($params));\ + }\$param_field_decls2 + private:\ + GTEST_DISALLOW_ASSIGN_($class_name);\ + };\$template + inline $class_name$param_types name($param_types_and_names) {\ + return $class_name$param_types($params);\ + }\$template + template \ + template <$typename_arg_types>\ + typename ::testing::internal::Function::Result\ + $class_name$param_types::gmock_Impl::gmock_PerformImpl(\ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const +]] +$$ } // This meta comment fixes auto-indentation in Emacs. It won't +$$ // show up in the generated code. + + +namespace testing { + +// The ACTION*() macros trigger warning C4100 (unreferenced formal +// parameter) in MSVC with -W4. Unfortunately they cannot be fixed in +// the macro definition, as the warnings are generated when the macro +// is expanded and macro expansion cannot contain #pragma. Therefore +// we suppress them here. +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable:4100) +#endif + +// Various overloads for InvokeArgument(). +// +// The InvokeArgument(a1, a2, ..., a_k) action invokes the N-th +// (0-based) argument, which must be a k-ary callable, of the mock +// function, with arguments a1, a2, ..., a_k. +// +// Notes: +// +// 1. The arguments are passed by value by default. If you need to +// pass an argument by reference, wrap it inside ByRef(). For +// example, +// +// InvokeArgument<1>(5, string("Hello"), ByRef(foo)) +// +// passes 5 and string("Hello") by value, and passes foo by +// reference. +// +// 2. If the callable takes an argument by reference but ByRef() is +// not used, it will receive the reference to a copy of the value, +// instead of the original value. For example, when the 0-th +// argument of the mock function takes a const string&, the action +// +// InvokeArgument<0>(string("Hello")) +// +// makes a copy of the temporary string("Hello") object and passes a +// reference of the copy, instead of the original temporary object, +// to the callable. This makes it easy for a user to define an +// InvokeArgument action from temporary values and have it performed +// later. + +$range i 0..n +$for i [[ +$range j 0..i-1 + +ACTION_TEMPLATE(InvokeArgument, + HAS_1_TEMPLATE_PARAMS(int, k), + AND_$i[[]]_VALUE_PARAMS($for j, [[p$j]])) { + return internal::CallableHelper::Call( + ::std::tr1::get(args)$for j [[, p$j]]); +} + +]] + +// Various overloads for ReturnNew(). +// +// The ReturnNew(a1, a2, ..., a_k) action returns a pointer to a new +// instance of type T, constructed on the heap with constructor arguments +// a1, a2, ..., and a_k. The caller assumes ownership of the returned value. +$range i 0..n +$for i [[ +$range j 0..i-1 +$var ps = [[$for j, [[p$j]]]] + +ACTION_TEMPLATE(ReturnNew, + HAS_1_TEMPLATE_PARAMS(typename, T), + AND_$i[[]]_VALUE_PARAMS($ps)) { + return new T($ps); +} + +]] + +#ifdef _MSC_VER +# pragma warning(pop) +#endif + +} // namespace testing + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ diff --git a/src/gmock/include/gmock/gmock-generated-function-mockers.h b/src/gmock/include/gmock/gmock-generated-function-mockers.h new file mode 100644 index 00000000000..577fd9e9114 --- /dev/null +++ b/src/gmock/include/gmock/gmock-generated-function-mockers.h @@ -0,0 +1,991 @@ +// This file was GENERATED by command: +// pump.py gmock-generated-function-mockers.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements function mockers of various arities. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ + +#include "gmock/gmock-spec-builders.h" +#include "gmock/internal/gmock-internal-utils.h" + +namespace testing { +namespace internal { + +template +class FunctionMockerBase; + +// Note: class FunctionMocker really belongs to the ::testing +// namespace. However if we define it in ::testing, MSVC will +// complain when classes in ::testing::internal declare it as a +// friend class template. To workaround this compiler bug, we define +// FunctionMocker in ::testing::internal and import it into ::testing. +template +class FunctionMocker; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With() { + return this->current_spec(); + } + + R Invoke() { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple()); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1)); + return this->current_spec(); + } + + R Invoke(A1 a1) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2, A3); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2, + const Matcher& m3) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2, A3 a3) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2, a3)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2, A3, A4); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2, + const Matcher& m3, const Matcher& m4) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2, A3 a3, A4 a4) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2, A3, A4, A5); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2, + const Matcher& m3, const Matcher& m4, const Matcher& m5) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, + m5)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2, A3, A4, A5, A6); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2, + const Matcher& m3, const Matcher& m4, const Matcher& m5, + const Matcher& m6) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5, + m6)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2, A3, A4, A5, A6, A7); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2, + const Matcher& m3, const Matcher& m4, const Matcher& m5, + const Matcher& m6, const Matcher& m7) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5, + m6, m7)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2, A3, A4, A5, A6, A7, A8); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2, + const Matcher& m3, const Matcher& m4, const Matcher& m5, + const Matcher& m6, const Matcher& m7, const Matcher& m8) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5, + m6, m7, m8)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2, A3, A4, A5, A6, A7, A8, A9); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2, + const Matcher& m3, const Matcher& m4, const Matcher& m5, + const Matcher& m6, const Matcher& m7, const Matcher& m8, + const Matcher& m9) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5, + m6, m7, m8, m9)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8, A9 a9) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8, a9)); + } +}; + +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With(const Matcher& m1, const Matcher& m2, + const Matcher& m3, const Matcher& m4, const Matcher& m5, + const Matcher& m6, const Matcher& m7, const Matcher& m8, + const Matcher& m9, const Matcher& m10) { + this->current_spec().SetMatchers(::std::tr1::make_tuple(m1, m2, m3, m4, m5, + m6, m7, m8, m9, m10)); + return this->current_spec(); + } + + R Invoke(A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, A7 a7, A8 a8, A9 a9, + A10 a10) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple(a1, a2, a3, a4, a5, a6, a7, a8, a9, + a10)); + } +}; + +} // namespace internal + +// The style guide prohibits "using" statements in a namespace scope +// inside a header file. However, the FunctionMocker class template +// is meant to be defined in the ::testing namespace. The following +// line is just a trick for working around a bug in MSVC 8.0, which +// cannot handle it if we define FunctionMocker in ::testing. +using internal::FunctionMocker; + +// GMOCK_RESULT_(tn, F) expands to the result type of function type F. +// We define this as a variadic macro in case F contains unprotected +// commas (the same reason that we use variadic macros in other places +// in this file). +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_RESULT_(tn, ...) \ + tn ::testing::internal::Function<__VA_ARGS__>::Result + +// The type of argument N of the given function type. +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_ARG_(tn, N, ...) \ + tn ::testing::internal::Function<__VA_ARGS__>::Argument##N + +// The matcher type for argument N of the given function type. +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_MATCHER_(tn, N, ...) \ + const ::testing::Matcher& + +// The variable for mocking the given method. +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_MOCKER_(arity, constness, Method) \ + GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD0_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + ) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 0), \ + this_method_does_not_take_0_arguments); \ + GMOCK_MOCKER_(0, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(0, constness, Method).Invoke(); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method() constness { \ + GMOCK_MOCKER_(0, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(0, constness, Method).With(); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(0, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD1_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 1), \ + this_method_does_not_take_1_argument); \ + GMOCK_MOCKER_(1, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(1, constness, Method).Invoke(gmock_a1); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1) constness { \ + GMOCK_MOCKER_(1, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(1, constness, Method).With(gmock_a1); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(1, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD2_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 2), \ + this_method_does_not_take_2_arguments); \ + GMOCK_MOCKER_(2, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(2, constness, Method).Invoke(gmock_a1, gmock_a2); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2) constness { \ + GMOCK_MOCKER_(2, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(2, constness, Method).With(gmock_a1, gmock_a2); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(2, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD3_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 3), \ + this_method_does_not_take_3_arguments); \ + GMOCK_MOCKER_(3, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(3, constness, Method).Invoke(gmock_a1, gmock_a2, \ + gmock_a3); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3) constness { \ + GMOCK_MOCKER_(3, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(3, constness, Method).With(gmock_a1, gmock_a2, \ + gmock_a3); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(3, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD4_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 4), \ + this_method_does_not_take_4_arguments); \ + GMOCK_MOCKER_(4, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(4, constness, Method).Invoke(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4) constness { \ + GMOCK_MOCKER_(4, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(4, constness, Method).With(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(4, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD5_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 5), \ + this_method_does_not_take_5_arguments); \ + GMOCK_MOCKER_(5, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(5, constness, Method).Invoke(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5) constness { \ + GMOCK_MOCKER_(5, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(5, constness, Method).With(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(5, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD6_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 6), \ + this_method_does_not_take_6_arguments); \ + GMOCK_MOCKER_(6, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(6, constness, Method).Invoke(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6) constness { \ + GMOCK_MOCKER_(6, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(6, constness, Method).With(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(6, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD7_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \ + GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 7), \ + this_method_does_not_take_7_arguments); \ + GMOCK_MOCKER_(7, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(7, constness, Method).Invoke(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \ + GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7) constness { \ + GMOCK_MOCKER_(7, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(7, constness, Method).With(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(7, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD8_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \ + GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, \ + GMOCK_ARG_(tn, 8, __VA_ARGS__) gmock_a8) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 8), \ + this_method_does_not_take_8_arguments); \ + GMOCK_MOCKER_(8, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(8, constness, Method).Invoke(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \ + GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \ + GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8) constness { \ + GMOCK_MOCKER_(8, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(8, constness, Method).With(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(8, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD9_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \ + GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, \ + GMOCK_ARG_(tn, 8, __VA_ARGS__) gmock_a8, \ + GMOCK_ARG_(tn, 9, __VA_ARGS__) gmock_a9) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 9), \ + this_method_does_not_take_9_arguments); \ + GMOCK_MOCKER_(9, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(9, constness, Method).Invoke(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, \ + gmock_a9); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \ + GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \ + GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8, \ + GMOCK_MATCHER_(tn, 9, __VA_ARGS__) gmock_a9) constness { \ + GMOCK_MOCKER_(9, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(9, constness, Method).With(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, \ + gmock_a9); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(9, constness, \ + Method) + +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD10_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_ARG_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_ARG_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \ + GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, \ + GMOCK_ARG_(tn, 8, __VA_ARGS__) gmock_a8, \ + GMOCK_ARG_(tn, 9, __VA_ARGS__) gmock_a9, \ + GMOCK_ARG_(tn, 10, __VA_ARGS__) gmock_a10) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value \ + == 10), \ + this_method_does_not_take_10_arguments); \ + GMOCK_MOCKER_(10, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_(10, constness, Method).Invoke(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, gmock_a9, \ + gmock_a10); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ + GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ + GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ + GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ + GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ + GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \ + GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \ + GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8, \ + GMOCK_MATCHER_(tn, 9, __VA_ARGS__) gmock_a9, \ + GMOCK_MATCHER_(tn, 10, \ + __VA_ARGS__) gmock_a10) constness { \ + GMOCK_MOCKER_(10, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_(10, constness, Method).With(gmock_a1, gmock_a2, \ + gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, gmock_a9, \ + gmock_a10); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(10, constness, \ + Method) + +#define MOCK_METHOD0(m, ...) GMOCK_METHOD0_(, , , m, __VA_ARGS__) +#define MOCK_METHOD1(m, ...) GMOCK_METHOD1_(, , , m, __VA_ARGS__) +#define MOCK_METHOD2(m, ...) GMOCK_METHOD2_(, , , m, __VA_ARGS__) +#define MOCK_METHOD3(m, ...) GMOCK_METHOD3_(, , , m, __VA_ARGS__) +#define MOCK_METHOD4(m, ...) GMOCK_METHOD4_(, , , m, __VA_ARGS__) +#define MOCK_METHOD5(m, ...) GMOCK_METHOD5_(, , , m, __VA_ARGS__) +#define MOCK_METHOD6(m, ...) GMOCK_METHOD6_(, , , m, __VA_ARGS__) +#define MOCK_METHOD7(m, ...) GMOCK_METHOD7_(, , , m, __VA_ARGS__) +#define MOCK_METHOD8(m, ...) GMOCK_METHOD8_(, , , m, __VA_ARGS__) +#define MOCK_METHOD9(m, ...) GMOCK_METHOD9_(, , , m, __VA_ARGS__) +#define MOCK_METHOD10(m, ...) GMOCK_METHOD10_(, , , m, __VA_ARGS__) + +#define MOCK_CONST_METHOD0(m, ...) GMOCK_METHOD0_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD1(m, ...) GMOCK_METHOD1_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD2(m, ...) GMOCK_METHOD2_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD3(m, ...) GMOCK_METHOD3_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD4(m, ...) GMOCK_METHOD4_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD5(m, ...) GMOCK_METHOD5_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD6(m, ...) GMOCK_METHOD6_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD7(m, ...) GMOCK_METHOD7_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD8(m, ...) GMOCK_METHOD8_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD9(m, ...) GMOCK_METHOD9_(, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD10(m, ...) GMOCK_METHOD10_(, const, , m, __VA_ARGS__) + +#define MOCK_METHOD0_T(m, ...) GMOCK_METHOD0_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD1_T(m, ...) GMOCK_METHOD1_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD2_T(m, ...) GMOCK_METHOD2_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD3_T(m, ...) GMOCK_METHOD3_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD4_T(m, ...) GMOCK_METHOD4_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD5_T(m, ...) GMOCK_METHOD5_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD6_T(m, ...) GMOCK_METHOD6_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD7_T(m, ...) GMOCK_METHOD7_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD8_T(m, ...) GMOCK_METHOD8_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD9_T(m, ...) GMOCK_METHOD9_(typename, , , m, __VA_ARGS__) +#define MOCK_METHOD10_T(m, ...) GMOCK_METHOD10_(typename, , , m, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_T(m, ...) \ + GMOCK_METHOD0_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD1_T(m, ...) \ + GMOCK_METHOD1_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD2_T(m, ...) \ + GMOCK_METHOD2_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD3_T(m, ...) \ + GMOCK_METHOD3_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD4_T(m, ...) \ + GMOCK_METHOD4_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD5_T(m, ...) \ + GMOCK_METHOD5_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD6_T(m, ...) \ + GMOCK_METHOD6_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD7_T(m, ...) \ + GMOCK_METHOD7_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD8_T(m, ...) \ + GMOCK_METHOD8_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD9_T(m, ...) \ + GMOCK_METHOD9_(typename, const, , m, __VA_ARGS__) +#define MOCK_CONST_METHOD10_T(m, ...) \ + GMOCK_METHOD10_(typename, const, , m, __VA_ARGS__) + +#define MOCK_METHOD0_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD0_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD1_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD1_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD2_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD2_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD3_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD3_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD4_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD4_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD5_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD5_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD6_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD6_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD7_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD7_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD8_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD8_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD9_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD9_(, , ct, m, __VA_ARGS__) +#define MOCK_METHOD10_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD10_(, , ct, m, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD0_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD1_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD2_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD3_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD4_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD5_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD6_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD7_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD8_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD9_(, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD10_(, const, ct, m, __VA_ARGS__) + +#define MOCK_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD0_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD1_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD2_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD3_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD4_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD5_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD6_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD7_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD8_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD9_(typename, , ct, m, __VA_ARGS__) +#define MOCK_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD10_(typename, , ct, m, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD0_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD1_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD2_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD3_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD4_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD5_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD6_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD7_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD8_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD9_(typename, const, ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD10_(typename, const, ct, m, __VA_ARGS__) + +// A MockFunction class has one mock method whose type is F. It is +// useful when you just want your test code to emit some messages and +// have Google Mock verify the right messages are sent (and perhaps at +// the right times). For example, if you are exercising code: +// +// Foo(1); +// Foo(2); +// Foo(3); +// +// and want to verify that Foo(1) and Foo(3) both invoke +// mock.Bar("a"), but Foo(2) doesn't invoke anything, you can write: +// +// TEST(FooTest, InvokesBarCorrectly) { +// MyMock mock; +// MockFunction check; +// { +// InSequence s; +// +// EXPECT_CALL(mock, Bar("a")); +// EXPECT_CALL(check, Call("1")); +// EXPECT_CALL(check, Call("2")); +// EXPECT_CALL(mock, Bar("a")); +// } +// Foo(1); +// check.Call("1"); +// Foo(2); +// check.Call("2"); +// Foo(3); +// } +// +// The expectation spec says that the first Bar("a") must happen +// before check point "1", the second Bar("a") must happen after check +// point "2", and nothing should happen between the two check +// points. The explicit check points make it easy to tell which +// Bar("a") is called by which call to Foo(). +template +class MockFunction; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD0_T(Call, R()); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD1_T(Call, R(A0)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD2_T(Call, R(A0, A1)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD3_T(Call, R(A0, A1, A2)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD4_T(Call, R(A0, A1, A2, A3)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD5_T(Call, R(A0, A1, A2, A3, A4)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD6_T(Call, R(A0, A1, A2, A3, A4, A5)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD7_T(Call, R(A0, A1, A2, A3, A4, A5, A6)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD8_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD9_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7, A8)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD10_T(Call, R(A0, A1, A2, A3, A4, A5, A6, A7, A8, A9)); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + +} // namespace testing + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ diff --git a/src/gmock/include/gmock/gmock-generated-function-mockers.h.pump b/src/gmock/include/gmock/gmock-generated-function-mockers.h.pump new file mode 100644 index 00000000000..f050caf1297 --- /dev/null +++ b/src/gmock/include/gmock/gmock-generated-function-mockers.h.pump @@ -0,0 +1,265 @@ +$$ -*- mode: c++; -*- +$$ This is a Pump source file. Please use Pump to convert it to +$$ gmock-generated-function-mockers.h. +$$ +$var n = 10 $$ The maximum arity we support. +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements function mockers of various arities. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ + +#include "gmock/gmock-spec-builders.h" +#include "gmock/internal/gmock-internal-utils.h" + +namespace testing { +namespace internal { + +template +class FunctionMockerBase; + +// Note: class FunctionMocker really belongs to the ::testing +// namespace. However if we define it in ::testing, MSVC will +// complain when classes in ::testing::internal declare it as a +// friend class template. To workaround this compiler bug, we define +// FunctionMocker in ::testing::internal and import it into ::testing. +template +class FunctionMocker; + + +$range i 0..n +$for i [[ +$range j 1..i +$var typename_As = [[$for j [[, typename A$j]]]] +$var As = [[$for j, [[A$j]]]] +$var as = [[$for j, [[a$j]]]] +$var Aas = [[$for j, [[A$j a$j]]]] +$var ms = [[$for j, [[m$j]]]] +$var matchers = [[$for j, [[const Matcher& m$j]]]] +template +class FunctionMocker : public + internal::FunctionMockerBase { + public: + typedef R F($As); + typedef typename internal::Function::ArgumentTuple ArgumentTuple; + + MockSpec& With($matchers) { + +$if i >= 1 [[ + this->current_spec().SetMatchers(::std::tr1::make_tuple($ms)); + +]] + return this->current_spec(); + } + + R Invoke($Aas) { + // Even though gcc and MSVC don't enforce it, 'this->' is required + // by the C++ standard [14.6.4] here, as the base class type is + // dependent on the template argument (and thus shouldn't be + // looked into when resolving InvokeWith). + return this->InvokeWith(ArgumentTuple($as)); + } +}; + + +]] +} // namespace internal + +// The style guide prohibits "using" statements in a namespace scope +// inside a header file. However, the FunctionMocker class template +// is meant to be defined in the ::testing namespace. The following +// line is just a trick for working around a bug in MSVC 8.0, which +// cannot handle it if we define FunctionMocker in ::testing. +using internal::FunctionMocker; + +// GMOCK_RESULT_(tn, F) expands to the result type of function type F. +// We define this as a variadic macro in case F contains unprotected +// commas (the same reason that we use variadic macros in other places +// in this file). +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_RESULT_(tn, ...) \ + tn ::testing::internal::Function<__VA_ARGS__>::Result + +// The type of argument N of the given function type. +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_ARG_(tn, N, ...) \ + tn ::testing::internal::Function<__VA_ARGS__>::Argument##N + +// The matcher type for argument N of the given function type. +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_MATCHER_(tn, N, ...) \ + const ::testing::Matcher& + +// The variable for mocking the given method. +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_MOCKER_(arity, constness, Method) \ + GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__) + + +$for i [[ +$range j 1..i +$var arg_as = [[$for j, \ + [[GMOCK_ARG_(tn, $j, __VA_ARGS__) gmock_a$j]]]] +$var as = [[$for j, [[gmock_a$j]]]] +$var matcher_as = [[$for j, \ + [[GMOCK_MATCHER_(tn, $j, __VA_ARGS__) gmock_a$j]]]] +// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! +#define GMOCK_METHOD$i[[]]_(tn, constness, ct, Method, ...) \ + GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ + $arg_as) constness { \ + GTEST_COMPILE_ASSERT_((::std::tr1::tuple_size< \ + tn ::testing::internal::Function<__VA_ARGS__>::ArgumentTuple>::value == $i), \ + this_method_does_not_take_$i[[]]_argument[[$if i != 1 [[s]]]]); \ + GMOCK_MOCKER_($i, constness, Method).SetOwnerAndName(this, #Method); \ + return GMOCK_MOCKER_($i, constness, Method).Invoke($as); \ + } \ + ::testing::MockSpec<__VA_ARGS__>& \ + gmock_##Method($matcher_as) constness { \ + GMOCK_MOCKER_($i, constness, Method).RegisterOwner(this); \ + return GMOCK_MOCKER_($i, constness, Method).With($as); \ + } \ + mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_($i, constness, Method) + + +]] +$for i [[ +#define MOCK_METHOD$i(m, ...) GMOCK_METHOD$i[[]]_(, , , m, __VA_ARGS__) + +]] + + +$for i [[ +#define MOCK_CONST_METHOD$i(m, ...) GMOCK_METHOD$i[[]]_(, const, , m, __VA_ARGS__) + +]] + + +$for i [[ +#define MOCK_METHOD$i[[]]_T(m, ...) GMOCK_METHOD$i[[]]_(typename, , , m, __VA_ARGS__) + +]] + + +$for i [[ +#define MOCK_CONST_METHOD$i[[]]_T(m, ...) \ + GMOCK_METHOD$i[[]]_(typename, const, , m, __VA_ARGS__) + +]] + + +$for i [[ +#define MOCK_METHOD$i[[]]_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD$i[[]]_(, , ct, m, __VA_ARGS__) + +]] + + +$for i [[ +#define MOCK_CONST_METHOD$i[[]]_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD$i[[]]_(, const, ct, m, __VA_ARGS__) + +]] + + +$for i [[ +#define MOCK_METHOD$i[[]]_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD$i[[]]_(typename, , ct, m, __VA_ARGS__) + +]] + + +$for i [[ +#define MOCK_CONST_METHOD$i[[]]_T_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_METHOD$i[[]]_(typename, const, ct, m, __VA_ARGS__) + +]] + +// A MockFunction class has one mock method whose type is F. It is +// useful when you just want your test code to emit some messages and +// have Google Mock verify the right messages are sent (and perhaps at +// the right times). For example, if you are exercising code: +// +// Foo(1); +// Foo(2); +// Foo(3); +// +// and want to verify that Foo(1) and Foo(3) both invoke +// mock.Bar("a"), but Foo(2) doesn't invoke anything, you can write: +// +// TEST(FooTest, InvokesBarCorrectly) { +// MyMock mock; +// MockFunction check; +// { +// InSequence s; +// +// EXPECT_CALL(mock, Bar("a")); +// EXPECT_CALL(check, Call("1")); +// EXPECT_CALL(check, Call("2")); +// EXPECT_CALL(mock, Bar("a")); +// } +// Foo(1); +// check.Call("1"); +// Foo(2); +// check.Call("2"); +// Foo(3); +// } +// +// The expectation spec says that the first Bar("a") must happen +// before check point "1", the second Bar("a") must happen after check +// point "2", and nothing should happen between the two check +// points. The explicit check points make it easy to tell which +// Bar("a") is called by which call to Foo(). +template +class MockFunction; + + +$for i [[ +$range j 0..i-1 +template +class MockFunction { + public: + MockFunction() {} + + MOCK_METHOD$i[[]]_T(Call, R($for j, [[A$j]])); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(MockFunction); +}; + + +]] +} // namespace testing + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ diff --git a/src/gmock/include/gmock/gmock-generated-matchers.h b/src/gmock/include/gmock/gmock-generated-matchers.h new file mode 100644 index 00000000000..b4c85715a88 --- /dev/null +++ b/src/gmock/include/gmock/gmock-generated-matchers.h @@ -0,0 +1,2190 @@ +// This file was GENERATED by command: +// pump.py gmock-generated-matchers.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements some commonly used variadic matchers. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ + +#include +#include +#include +#include +#include "gmock/gmock-matchers.h" + +namespace testing { +namespace internal { + +// The type of the i-th (0-based) field of Tuple. +#define GMOCK_FIELD_TYPE_(Tuple, i) \ + typename ::std::tr1::tuple_element::type + +// TupleFields is for selecting fields from a +// tuple of type Tuple. It has two members: +// +// type: a tuple type whose i-th field is the ki-th field of Tuple. +// GetSelectedFields(t): returns fields k0, ..., and kn of t as a tuple. +// +// For example, in class TupleFields, 2, 0>, we have: +// +// type is tuple, and +// GetSelectedFields(make_tuple(true, 'a', 42)) is (42, true). + +template +class TupleFields; + +// This generic version is used when there are 10 selectors. +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t), get(t), get(t), get(t), + get(t), get(t), get(t), get(t), get(t)); + } +}; + +// The following specialization is used for 0 ~ 9 selectors. + +template +class TupleFields { + public: + typedef ::std::tr1::tuple<> type; + static type GetSelectedFields(const Tuple& /* t */) { + using ::std::tr1::get; + return type(); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t)); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t)); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t), get(t)); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t), get(t), get(t)); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t), get(t), get(t), get(t)); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t), get(t), get(t), get(t), + get(t)); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t), get(t), get(t), get(t), + get(t), get(t)); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t), get(t), get(t), get(t), + get(t), get(t), get(t)); + } +}; + +template +class TupleFields { + public: + typedef ::std::tr1::tuple type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type(get(t), get(t), get(t), get(t), get(t), + get(t), get(t), get(t), get(t)); + } +}; + +#undef GMOCK_FIELD_TYPE_ + +// Implements the Args() matcher. +template +class ArgsMatcherImpl : public MatcherInterface { + public: + // ArgsTuple may have top-level const or reference modifiers. + typedef GTEST_REMOVE_REFERENCE_AND_CONST_(ArgsTuple) RawArgsTuple; + typedef typename internal::TupleFields::type SelectedArgs; + typedef Matcher MonomorphicInnerMatcher; + + template + explicit ArgsMatcherImpl(const InnerMatcher& inner_matcher) + : inner_matcher_(SafeMatcherCast(inner_matcher)) {} + + virtual bool MatchAndExplain(ArgsTuple args, + MatchResultListener* listener) const { + const SelectedArgs& selected_args = GetSelectedArgs(args); + if (!listener->IsInterested()) + return inner_matcher_.Matches(selected_args); + + PrintIndices(listener->stream()); + *listener << "are " << PrintToString(selected_args); + + StringMatchResultListener inner_listener; + const bool match = inner_matcher_.MatchAndExplain(selected_args, + &inner_listener); + PrintIfNotEmpty(inner_listener.str(), listener->stream()); + return match; + } + + virtual void DescribeTo(::std::ostream* os) const { + *os << "are a tuple "; + PrintIndices(os); + inner_matcher_.DescribeTo(os); + } + + virtual void DescribeNegationTo(::std::ostream* os) const { + *os << "are a tuple "; + PrintIndices(os); + inner_matcher_.DescribeNegationTo(os); + } + + private: + static SelectedArgs GetSelectedArgs(ArgsTuple args) { + return TupleFields::GetSelectedFields(args); + } + + // Prints the indices of the selected fields. + static void PrintIndices(::std::ostream* os) { + *os << "whose fields ("; + const int indices[10] = { k0, k1, k2, k3, k4, k5, k6, k7, k8, k9 }; + for (int i = 0; i < 10; i++) { + if (indices[i] < 0) + break; + + if (i >= 1) + *os << ", "; + + *os << "#" << indices[i]; + } + *os << ") "; + } + + const MonomorphicInnerMatcher inner_matcher_; + + GTEST_DISALLOW_ASSIGN_(ArgsMatcherImpl); +}; + +template +class ArgsMatcher { + public: + explicit ArgsMatcher(const InnerMatcher& inner_matcher) + : inner_matcher_(inner_matcher) {} + + template + operator Matcher() const { + return MakeMatcher(new ArgsMatcherImpl(inner_matcher_)); + } + + private: + const InnerMatcher inner_matcher_; + + GTEST_DISALLOW_ASSIGN_(ArgsMatcher); +}; + +// A set of metafunctions for computing the result type of AllOf. +// AllOf(m1, ..., mN) returns +// AllOfResultN::type. + +// Although AllOf isn't defined for one argument, AllOfResult1 is defined +// to simplify the implementation. +template +struct AllOfResult1 { + typedef M1 type; +}; + +template +struct AllOfResult2 { + typedef BothOfMatcher< + typename AllOfResult1::type, + typename AllOfResult1::type + > type; +}; + +template +struct AllOfResult3 { + typedef BothOfMatcher< + typename AllOfResult1::type, + typename AllOfResult2::type + > type; +}; + +template +struct AllOfResult4 { + typedef BothOfMatcher< + typename AllOfResult2::type, + typename AllOfResult2::type + > type; +}; + +template +struct AllOfResult5 { + typedef BothOfMatcher< + typename AllOfResult2::type, + typename AllOfResult3::type + > type; +}; + +template +struct AllOfResult6 { + typedef BothOfMatcher< + typename AllOfResult3::type, + typename AllOfResult3::type + > type; +}; + +template +struct AllOfResult7 { + typedef BothOfMatcher< + typename AllOfResult3::type, + typename AllOfResult4::type + > type; +}; + +template +struct AllOfResult8 { + typedef BothOfMatcher< + typename AllOfResult4::type, + typename AllOfResult4::type + > type; +}; + +template +struct AllOfResult9 { + typedef BothOfMatcher< + typename AllOfResult4::type, + typename AllOfResult5::type + > type; +}; + +template +struct AllOfResult10 { + typedef BothOfMatcher< + typename AllOfResult5::type, + typename AllOfResult5::type + > type; +}; + +// A set of metafunctions for computing the result type of AnyOf. +// AnyOf(m1, ..., mN) returns +// AnyOfResultN::type. + +// Although AnyOf isn't defined for one argument, AnyOfResult1 is defined +// to simplify the implementation. +template +struct AnyOfResult1 { + typedef M1 type; +}; + +template +struct AnyOfResult2 { + typedef EitherOfMatcher< + typename AnyOfResult1::type, + typename AnyOfResult1::type + > type; +}; + +template +struct AnyOfResult3 { + typedef EitherOfMatcher< + typename AnyOfResult1::type, + typename AnyOfResult2::type + > type; +}; + +template +struct AnyOfResult4 { + typedef EitherOfMatcher< + typename AnyOfResult2::type, + typename AnyOfResult2::type + > type; +}; + +template +struct AnyOfResult5 { + typedef EitherOfMatcher< + typename AnyOfResult2::type, + typename AnyOfResult3::type + > type; +}; + +template +struct AnyOfResult6 { + typedef EitherOfMatcher< + typename AnyOfResult3::type, + typename AnyOfResult3::type + > type; +}; + +template +struct AnyOfResult7 { + typedef EitherOfMatcher< + typename AnyOfResult3::type, + typename AnyOfResult4::type + > type; +}; + +template +struct AnyOfResult8 { + typedef EitherOfMatcher< + typename AnyOfResult4::type, + typename AnyOfResult4::type + > type; +}; + +template +struct AnyOfResult9 { + typedef EitherOfMatcher< + typename AnyOfResult4::type, + typename AnyOfResult5::type + > type; +}; + +template +struct AnyOfResult10 { + typedef EitherOfMatcher< + typename AnyOfResult5::type, + typename AnyOfResult5::type + > type; +}; + +} // namespace internal + +// Args(a_matcher) matches a tuple if the selected +// fields of it matches a_matcher. C++ doesn't support default +// arguments for function templates, so we have to overload it. +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +template +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + +// ElementsAre(e_1, e_2, ... e_n) matches an STL-style container with +// n elements, where the i-th element in the container must +// match the i-th argument in the list. Each argument of +// ElementsAre() can be either a value or a matcher. We support up to +// 10 arguments. +// +// The use of DecayArray in the implementation allows ElementsAre() +// to accept string literals, whose type is const char[N], but we +// want to treat them as const char*. +// +// NOTE: Since ElementsAre() cares about the order of the elements, it +// must not be used with containers whose elements's order is +// undefined (e.g. hash_map). + +inline internal::ElementsAreMatcher< + std::tr1::tuple<> > +ElementsAre() { + typedef std::tr1::tuple<> Args; + return internal::ElementsAreMatcher(Args()); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type> > +ElementsAre(const T1& e1) { + typedef std::tr1::tuple< + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2, const T3& e3) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2, e3)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2, e3, e4)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2, e3, e4, e5)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2, e3, e4, e5, e6)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6, const T7& e7) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2, e3, e4, e5, e6, e7)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6, const T7& e7, const T8& e8) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2, e3, e4, e5, e6, e7, + e8)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2, e3, e4, e5, e6, e7, + e8, e9)); +} + +template +inline internal::ElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +ElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9, + const T10& e10) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::ElementsAreMatcher(Args(e1, e2, e3, e4, e5, e6, e7, + e8, e9, e10)); +} + +// UnorderedElementsAre(e_1, e_2, ..., e_n) is an ElementsAre extension +// that matches n elements in any order. We support up to n=10 arguments. + +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple<> > +UnorderedElementsAre() { + typedef std::tr1::tuple<> Args; + return internal::UnorderedElementsAreMatcher(Args()); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1) { + typedef std::tr1::tuple< + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2, e3)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2, e3, e4)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2, e3, e4, e5)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2, e3, e4, e5, + e6)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6, const T7& e7) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2, e3, e4, e5, + e6, e7)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6, const T7& e7, const T8& e8) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2, e3, e4, e5, + e6, e7, e8)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2, e3, e4, e5, + e6, e7, e8, e9)); +} + +template +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> > +UnorderedElementsAre(const T1& e1, const T2& e2, const T3& e3, const T4& e4, + const T5& e5, const T6& e6, const T7& e7, const T8& e8, const T9& e9, + const T10& e10) { + typedef std::tr1::tuple< + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type, + typename internal::DecayArray::type> Args; + return internal::UnorderedElementsAreMatcher(Args(e1, e2, e3, e4, e5, + e6, e7, e8, e9, e10)); +} + +// AllOf(m1, m2, ..., mk) matches any value that matches all of the given +// sub-matchers. AllOf is called fully qualified to prevent ADL from firing. + +template +inline typename internal::AllOfResult2::type +AllOf(M1 m1, M2 m2) { + return typename internal::AllOfResult2::type( + m1, + m2); +} + +template +inline typename internal::AllOfResult3::type +AllOf(M1 m1, M2 m2, M3 m3) { + return typename internal::AllOfResult3::type( + m1, + ::testing::AllOf(m2, m3)); +} + +template +inline typename internal::AllOfResult4::type +AllOf(M1 m1, M2 m2, M3 m3, M4 m4) { + return typename internal::AllOfResult4::type( + ::testing::AllOf(m1, m2), + ::testing::AllOf(m3, m4)); +} + +template +inline typename internal::AllOfResult5::type +AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5) { + return typename internal::AllOfResult5::type( + ::testing::AllOf(m1, m2), + ::testing::AllOf(m3, m4, m5)); +} + +template +inline typename internal::AllOfResult6::type +AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6) { + return typename internal::AllOfResult6::type( + ::testing::AllOf(m1, m2, m3), + ::testing::AllOf(m4, m5, m6)); +} + +template +inline typename internal::AllOfResult7::type +AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7) { + return typename internal::AllOfResult7::type( + ::testing::AllOf(m1, m2, m3), + ::testing::AllOf(m4, m5, m6, m7)); +} + +template +inline typename internal::AllOfResult8::type +AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8) { + return typename internal::AllOfResult8::type( + ::testing::AllOf(m1, m2, m3, m4), + ::testing::AllOf(m5, m6, m7, m8)); +} + +template +inline typename internal::AllOfResult9::type +AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8, M9 m9) { + return typename internal::AllOfResult9::type( + ::testing::AllOf(m1, m2, m3, m4), + ::testing::AllOf(m5, m6, m7, m8, m9)); +} + +template +inline typename internal::AllOfResult10::type +AllOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8, M9 m9, M10 m10) { + return typename internal::AllOfResult10::type( + ::testing::AllOf(m1, m2, m3, m4, m5), + ::testing::AllOf(m6, m7, m8, m9, m10)); +} + +// AnyOf(m1, m2, ..., mk) matches any value that matches any of the given +// sub-matchers. AnyOf is called fully qualified to prevent ADL from firing. + +template +inline typename internal::AnyOfResult2::type +AnyOf(M1 m1, M2 m2) { + return typename internal::AnyOfResult2::type( + m1, + m2); +} + +template +inline typename internal::AnyOfResult3::type +AnyOf(M1 m1, M2 m2, M3 m3) { + return typename internal::AnyOfResult3::type( + m1, + ::testing::AnyOf(m2, m3)); +} + +template +inline typename internal::AnyOfResult4::type +AnyOf(M1 m1, M2 m2, M3 m3, M4 m4) { + return typename internal::AnyOfResult4::type( + ::testing::AnyOf(m1, m2), + ::testing::AnyOf(m3, m4)); +} + +template +inline typename internal::AnyOfResult5::type +AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5) { + return typename internal::AnyOfResult5::type( + ::testing::AnyOf(m1, m2), + ::testing::AnyOf(m3, m4, m5)); +} + +template +inline typename internal::AnyOfResult6::type +AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6) { + return typename internal::AnyOfResult6::type( + ::testing::AnyOf(m1, m2, m3), + ::testing::AnyOf(m4, m5, m6)); +} + +template +inline typename internal::AnyOfResult7::type +AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7) { + return typename internal::AnyOfResult7::type( + ::testing::AnyOf(m1, m2, m3), + ::testing::AnyOf(m4, m5, m6, m7)); +} + +template +inline typename internal::AnyOfResult8::type +AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8) { + return typename internal::AnyOfResult8::type( + ::testing::AnyOf(m1, m2, m3, m4), + ::testing::AnyOf(m5, m6, m7, m8)); +} + +template +inline typename internal::AnyOfResult9::type +AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8, M9 m9) { + return typename internal::AnyOfResult9::type( + ::testing::AnyOf(m1, m2, m3, m4), + ::testing::AnyOf(m5, m6, m7, m8, m9)); +} + +template +inline typename internal::AnyOfResult10::type +AnyOf(M1 m1, M2 m2, M3 m3, M4 m4, M5 m5, M6 m6, M7 m7, M8 m8, M9 m9, M10 m10) { + return typename internal::AnyOfResult10::type( + ::testing::AnyOf(m1, m2, m3, m4, m5), + ::testing::AnyOf(m6, m7, m8, m9, m10)); +} + +} // namespace testing + + +// The MATCHER* family of macros can be used in a namespace scope to +// define custom matchers easily. +// +// Basic Usage +// =========== +// +// The syntax +// +// MATCHER(name, description_string) { statements; } +// +// defines a matcher with the given name that executes the statements, +// which must return a bool to indicate if the match succeeds. Inside +// the statements, you can refer to the value being matched by 'arg', +// and refer to its type by 'arg_type'. +// +// The description string documents what the matcher does, and is used +// to generate the failure message when the match fails. Since a +// MATCHER() is usually defined in a header file shared by multiple +// C++ source files, we require the description to be a C-string +// literal to avoid possible side effects. It can be empty, in which +// case we'll use the sequence of words in the matcher name as the +// description. +// +// For example: +// +// MATCHER(IsEven, "") { return (arg % 2) == 0; } +// +// allows you to write +// +// // Expects mock_foo.Bar(n) to be called where n is even. +// EXPECT_CALL(mock_foo, Bar(IsEven())); +// +// or, +// +// // Verifies that the value of some_expression is even. +// EXPECT_THAT(some_expression, IsEven()); +// +// If the above assertion fails, it will print something like: +// +// Value of: some_expression +// Expected: is even +// Actual: 7 +// +// where the description "is even" is automatically calculated from the +// matcher name IsEven. +// +// Argument Type +// ============= +// +// Note that the type of the value being matched (arg_type) is +// determined by the context in which you use the matcher and is +// supplied to you by the compiler, so you don't need to worry about +// declaring it (nor can you). This allows the matcher to be +// polymorphic. For example, IsEven() can be used to match any type +// where the value of "(arg % 2) == 0" can be implicitly converted to +// a bool. In the "Bar(IsEven())" example above, if method Bar() +// takes an int, 'arg_type' will be int; if it takes an unsigned long, +// 'arg_type' will be unsigned long; and so on. +// +// Parameterizing Matchers +// ======================= +// +// Sometimes you'll want to parameterize the matcher. For that you +// can use another macro: +// +// MATCHER_P(name, param_name, description_string) { statements; } +// +// For example: +// +// MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; } +// +// will allow you to write: +// +// EXPECT_THAT(Blah("a"), HasAbsoluteValue(n)); +// +// which may lead to this message (assuming n is 10): +// +// Value of: Blah("a") +// Expected: has absolute value 10 +// Actual: -9 +// +// Note that both the matcher description and its parameter are +// printed, making the message human-friendly. +// +// In the matcher definition body, you can write 'foo_type' to +// reference the type of a parameter named 'foo'. For example, in the +// body of MATCHER_P(HasAbsoluteValue, value) above, you can write +// 'value_type' to refer to the type of 'value'. +// +// We also provide MATCHER_P2, MATCHER_P3, ..., up to MATCHER_P10 to +// support multi-parameter matchers. +// +// Describing Parameterized Matchers +// ================================= +// +// The last argument to MATCHER*() is a string-typed expression. The +// expression can reference all of the matcher's parameters and a +// special bool-typed variable named 'negation'. When 'negation' is +// false, the expression should evaluate to the matcher's description; +// otherwise it should evaluate to the description of the negation of +// the matcher. For example, +// +// using testing::PrintToString; +// +// MATCHER_P2(InClosedRange, low, hi, +// string(negation ? "is not" : "is") + " in range [" + +// PrintToString(low) + ", " + PrintToString(hi) + "]") { +// return low <= arg && arg <= hi; +// } +// ... +// EXPECT_THAT(3, InClosedRange(4, 6)); +// EXPECT_THAT(3, Not(InClosedRange(2, 4))); +// +// would generate two failures that contain the text: +// +// Expected: is in range [4, 6] +// ... +// Expected: is not in range [2, 4] +// +// If you specify "" as the description, the failure message will +// contain the sequence of words in the matcher name followed by the +// parameter values printed as a tuple. For example, +// +// MATCHER_P2(InClosedRange, low, hi, "") { ... } +// ... +// EXPECT_THAT(3, InClosedRange(4, 6)); +// EXPECT_THAT(3, Not(InClosedRange(2, 4))); +// +// would generate two failures that contain the text: +// +// Expected: in closed range (4, 6) +// ... +// Expected: not (in closed range (2, 4)) +// +// Types of Matcher Parameters +// =========================== +// +// For the purpose of typing, you can view +// +// MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... } +// +// as shorthand for +// +// template +// FooMatcherPk +// Foo(p1_type p1, ..., pk_type pk) { ... } +// +// When you write Foo(v1, ..., vk), the compiler infers the types of +// the parameters v1, ..., and vk for you. If you are not happy with +// the result of the type inference, you can specify the types by +// explicitly instantiating the template, as in Foo(5, +// false). As said earlier, you don't get to (or need to) specify +// 'arg_type' as that's determined by the context in which the matcher +// is used. You can assign the result of expression Foo(p1, ..., pk) +// to a variable of type FooMatcherPk. This +// can be useful when composing matchers. +// +// While you can instantiate a matcher template with reference types, +// passing the parameters by pointer usually makes your code more +// readable. If, however, you still want to pass a parameter by +// reference, be aware that in the failure message generated by the +// matcher you will see the value of the referenced object but not its +// address. +// +// Explaining Match Results +// ======================== +// +// Sometimes the matcher description alone isn't enough to explain why +// the match has failed or succeeded. For example, when expecting a +// long string, it can be very helpful to also print the diff between +// the expected string and the actual one. To achieve that, you can +// optionally stream additional information to a special variable +// named result_listener, whose type is a pointer to class +// MatchResultListener: +// +// MATCHER_P(EqualsLongString, str, "") { +// if (arg == str) return true; +// +// *result_listener << "the difference: " +/// << DiffStrings(str, arg); +// return false; +// } +// +// Overloading Matchers +// ==================== +// +// You can overload matchers with different numbers of parameters: +// +// MATCHER_P(Blah, a, description_string1) { ... } +// MATCHER_P2(Blah, a, b, description_string2) { ... } +// +// Caveats +// ======= +// +// When defining a new matcher, you should also consider implementing +// MatcherInterface or using MakePolymorphicMatcher(). These +// approaches require more work than the MATCHER* macros, but also +// give you more control on the types of the value being matched and +// the matcher parameters, which may leads to better compiler error +// messages when the matcher is used wrong. They also allow +// overloading matchers based on parameter types (as opposed to just +// based on the number of parameters). +// +// MATCHER*() can only be used in a namespace scope. The reason is +// that C++ doesn't yet allow function-local types to be used to +// instantiate templates. The up-coming C++0x standard will fix this. +// Once that's done, we'll consider supporting using MATCHER*() inside +// a function. +// +// More Information +// ================ +// +// To learn more about using these macros, please search for 'MATCHER' +// on http://code.google.com/p/googlemock/wiki/CookBook. + +#define MATCHER(name, description)\ + class name##Matcher {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl()\ + {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple<>()));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl());\ + }\ + name##Matcher() {\ + }\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##Matcher);\ + };\ + inline name##Matcher name() {\ + return name##Matcher();\ + }\ + template \ + bool name##Matcher::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P(name, p0, description)\ + template \ + class name##MatcherP {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + explicit gmock_Impl(p0##_type gmock_p0)\ + : p0(gmock_p0) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0));\ + }\ + name##MatcherP(p0##_type gmock_p0) : p0(gmock_p0) {\ + }\ + p0##_type p0;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP);\ + };\ + template \ + inline name##MatcherP name(p0##_type p0) {\ + return name##MatcherP(p0);\ + }\ + template \ + template \ + bool name##MatcherP::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P2(name, p0, p1, description)\ + template \ + class name##MatcherP2 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1)\ + : p0(gmock_p0), p1(gmock_p1) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1));\ + }\ + name##MatcherP2(p0##_type gmock_p0, p1##_type gmock_p1) : p0(gmock_p0), \ + p1(gmock_p1) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP2);\ + };\ + template \ + inline name##MatcherP2 name(p0##_type p0, \ + p1##_type p1) {\ + return name##MatcherP2(p0, p1);\ + }\ + template \ + template \ + bool name##MatcherP2::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P3(name, p0, p1, p2, description)\ + template \ + class name##MatcherP3 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2)\ + : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1, \ + p2)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1, p2));\ + }\ + name##MatcherP3(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP3);\ + };\ + template \ + inline name##MatcherP3 name(p0##_type p0, \ + p1##_type p1, p2##_type p2) {\ + return name##MatcherP3(p0, p1, p2);\ + }\ + template \ + template \ + bool name##MatcherP3::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P4(name, p0, p1, p2, p3, description)\ + template \ + class name##MatcherP4 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3)\ + : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1, p2, p3)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1, p2, p3));\ + }\ + name##MatcherP4(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3) : p0(gmock_p0), p1(gmock_p1), \ + p2(gmock_p2), p3(gmock_p3) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP4);\ + };\ + template \ + inline name##MatcherP4 name(p0##_type p0, p1##_type p1, p2##_type p2, \ + p3##_type p3) {\ + return name##MatcherP4(p0, \ + p1, p2, p3);\ + }\ + template \ + template \ + bool name##MatcherP4::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P5(name, p0, p1, p2, p3, p4, description)\ + template \ + class name##MatcherP5 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4)\ + : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \ + p4(gmock_p4) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1, p2, p3, p4)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1, p2, p3, p4));\ + }\ + name##MatcherP5(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, \ + p4##_type gmock_p4) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP5);\ + };\ + template \ + inline name##MatcherP5 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ + p4##_type p4) {\ + return name##MatcherP5(p0, p1, p2, p3, p4);\ + }\ + template \ + template \ + bool name##MatcherP5::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P6(name, p0, p1, p2, p3, p4, p5, description)\ + template \ + class name##MatcherP6 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5)\ + : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \ + p4(gmock_p4), p5(gmock_p5) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1, p2, p3, p4, p5)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1, p2, p3, p4, p5));\ + }\ + name##MatcherP6(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP6);\ + };\ + template \ + inline name##MatcherP6 name(p0##_type p0, p1##_type p1, p2##_type p2, \ + p3##_type p3, p4##_type p4, p5##_type p5) {\ + return name##MatcherP6(p0, p1, p2, p3, p4, p5);\ + }\ + template \ + template \ + bool name##MatcherP6::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P7(name, p0, p1, p2, p3, p4, p5, p6, description)\ + template \ + class name##MatcherP7 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6)\ + : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \ + p4(gmock_p4), p5(gmock_p5), p6(gmock_p6) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1, p2, p3, p4, p5, \ + p6)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1, p2, p3, p4, p5, p6));\ + }\ + name##MatcherP7(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5, p6##_type gmock_p6) : p0(gmock_p0), p1(gmock_p1), \ + p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), \ + p6(gmock_p6) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP7);\ + };\ + template \ + inline name##MatcherP7 name(p0##_type p0, p1##_type p1, \ + p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \ + p6##_type p6) {\ + return name##MatcherP7(p0, p1, p2, p3, p4, p5, p6);\ + }\ + template \ + template \ + bool name##MatcherP7::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P8(name, p0, p1, p2, p3, p4, p5, p6, p7, description)\ + template \ + class name##MatcherP8 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7)\ + : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \ + p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1, p2, \ + p3, p4, p5, p6, p7)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1, p2, p3, p4, p5, p6, p7));\ + }\ + name##MatcherP8(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5, p6##_type gmock_p6, \ + p7##_type gmock_p7) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \ + p7(gmock_p7) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP8);\ + };\ + template \ + inline name##MatcherP8 name(p0##_type p0, \ + p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \ + p6##_type p6, p7##_type p7) {\ + return name##MatcherP8(p0, p1, p2, p3, p4, p5, \ + p6, p7);\ + }\ + template \ + template \ + bool name##MatcherP8::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, description)\ + template \ + class name##MatcherP9 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8)\ + : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \ + p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \ + p8(gmock_p8) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + p8##_type p8;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1, p2, p3, p4, p5, p6, p7, p8)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1, p2, p3, p4, p5, p6, p7, p8));\ + }\ + name##MatcherP9(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \ + p8##_type gmock_p8) : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), \ + p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \ + p8(gmock_p8) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + p8##_type p8;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP9);\ + };\ + template \ + inline name##MatcherP9 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ + p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, \ + p8##_type p8) {\ + return name##MatcherP9(p0, p1, p2, \ + p3, p4, p5, p6, p7, p8);\ + }\ + template \ + template \ + bool name##MatcherP9::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#define MATCHER_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, description)\ + template \ + class name##MatcherP10 {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ + p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ + p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \ + p9##_type gmock_p9)\ + : p0(gmock_p0), p1(gmock_p1), p2(gmock_p2), p3(gmock_p3), \ + p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), p7(gmock_p7), \ + p8(gmock_p8), p9(gmock_p9) {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + p8##_type p8;\ + p9##_type p9;\ + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9));\ + }\ + name##MatcherP10(p0##_type gmock_p0, p1##_type gmock_p1, \ + p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ + p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \ + p8##_type gmock_p8, p9##_type gmock_p9) : p0(gmock_p0), p1(gmock_p1), \ + p2(gmock_p2), p3(gmock_p3), p4(gmock_p4), p5(gmock_p5), p6(gmock_p6), \ + p7(gmock_p7), p8(gmock_p8), p9(gmock_p9) {\ + }\ + p0##_type p0;\ + p1##_type p1;\ + p2##_type p2;\ + p3##_type p3;\ + p4##_type p4;\ + p5##_type p5;\ + p6##_type p6;\ + p7##_type p7;\ + p8##_type p8;\ + p9##_type p9;\ + private:\ + GTEST_DISALLOW_ASSIGN_(name##MatcherP10);\ + };\ + template \ + inline name##MatcherP10 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ + p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \ + p9##_type p9) {\ + return name##MatcherP10(p0, \ + p1, p2, p3, p4, p5, p6, p7, p8, p9);\ + }\ + template \ + template \ + bool name##MatcherP10::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ diff --git a/src/gmock/include/gmock/gmock-generated-matchers.h.pump b/src/gmock/include/gmock/gmock-generated-matchers.h.pump new file mode 100644 index 00000000000..af02acbc163 --- /dev/null +++ b/src/gmock/include/gmock/gmock-generated-matchers.h.pump @@ -0,0 +1,674 @@ +$$ -*- mode: c++; -*- +$$ This is a Pump source file. Please use Pump to convert it to +$$ gmock-generated-actions.h. +$$ +$var n = 10 $$ The maximum arity we support. +$$ }} This line fixes auto-indentation of the following code in Emacs. +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements some commonly used variadic matchers. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ + +#include +#include +#include +#include +#include "gmock/gmock-matchers.h" + +namespace testing { +namespace internal { + +$range i 0..n-1 + +// The type of the i-th (0-based) field of Tuple. +#define GMOCK_FIELD_TYPE_(Tuple, i) \ + typename ::std::tr1::tuple_element::type + +// TupleFields is for selecting fields from a +// tuple of type Tuple. It has two members: +// +// type: a tuple type whose i-th field is the ki-th field of Tuple. +// GetSelectedFields(t): returns fields k0, ..., and kn of t as a tuple. +// +// For example, in class TupleFields, 2, 0>, we have: +// +// type is tuple, and +// GetSelectedFields(make_tuple(true, 'a', 42)) is (42, true). + +template +class TupleFields; + +// This generic version is used when there are $n selectors. +template +class TupleFields { + public: + typedef ::std::tr1::tuple<$for i, [[GMOCK_FIELD_TYPE_(Tuple, k$i)]]> type; + static type GetSelectedFields(const Tuple& t) { + using ::std::tr1::get; + return type($for i, [[get(t)]]); + } +}; + +// The following specialization is used for 0 ~ $(n-1) selectors. + +$for i [[ +$$ }}} +$range j 0..i-1 +$range k 0..n-1 + +template +class TupleFields { + public: + typedef ::std::tr1::tuple<$for j, [[GMOCK_FIELD_TYPE_(Tuple, k$j)]]> type; + static type GetSelectedFields(const Tuple& $if i==0 [[/* t */]] $else [[t]]) { + using ::std::tr1::get; + return type($for j, [[get(t)]]); + } +}; + +]] + +#undef GMOCK_FIELD_TYPE_ + +// Implements the Args() matcher. + +$var ks = [[$for i, [[k$i]]]] +template +class ArgsMatcherImpl : public MatcherInterface { + public: + // ArgsTuple may have top-level const or reference modifiers. + typedef GTEST_REMOVE_REFERENCE_AND_CONST_(ArgsTuple) RawArgsTuple; + typedef typename internal::TupleFields::type SelectedArgs; + typedef Matcher MonomorphicInnerMatcher; + + template + explicit ArgsMatcherImpl(const InnerMatcher& inner_matcher) + : inner_matcher_(SafeMatcherCast(inner_matcher)) {} + + virtual bool MatchAndExplain(ArgsTuple args, + MatchResultListener* listener) const { + const SelectedArgs& selected_args = GetSelectedArgs(args); + if (!listener->IsInterested()) + return inner_matcher_.Matches(selected_args); + + PrintIndices(listener->stream()); + *listener << "are " << PrintToString(selected_args); + + StringMatchResultListener inner_listener; + const bool match = inner_matcher_.MatchAndExplain(selected_args, + &inner_listener); + PrintIfNotEmpty(inner_listener.str(), listener->stream()); + return match; + } + + virtual void DescribeTo(::std::ostream* os) const { + *os << "are a tuple "; + PrintIndices(os); + inner_matcher_.DescribeTo(os); + } + + virtual void DescribeNegationTo(::std::ostream* os) const { + *os << "are a tuple "; + PrintIndices(os); + inner_matcher_.DescribeNegationTo(os); + } + + private: + static SelectedArgs GetSelectedArgs(ArgsTuple args) { + return TupleFields::GetSelectedFields(args); + } + + // Prints the indices of the selected fields. + static void PrintIndices(::std::ostream* os) { + *os << "whose fields ("; + const int indices[$n] = { $ks }; + for (int i = 0; i < $n; i++) { + if (indices[i] < 0) + break; + + if (i >= 1) + *os << ", "; + + *os << "#" << indices[i]; + } + *os << ") "; + } + + const MonomorphicInnerMatcher inner_matcher_; + + GTEST_DISALLOW_ASSIGN_(ArgsMatcherImpl); +}; + +template +class ArgsMatcher { + public: + explicit ArgsMatcher(const InnerMatcher& inner_matcher) + : inner_matcher_(inner_matcher) {} + + template + operator Matcher() const { + return MakeMatcher(new ArgsMatcherImpl(inner_matcher_)); + } + + private: + const InnerMatcher inner_matcher_; + + GTEST_DISALLOW_ASSIGN_(ArgsMatcher); +}; + +// A set of metafunctions for computing the result type of AllOf. +// AllOf(m1, ..., mN) returns +// AllOfResultN::type. + +// Although AllOf isn't defined for one argument, AllOfResult1 is defined +// to simplify the implementation. +template +struct AllOfResult1 { + typedef M1 type; +}; + +$range i 1..n + +$range i 2..n +$for i [[ +$range j 2..i +$var m = i/2 +$range k 1..m +$range t m+1..i + +template +struct AllOfResult$i { + typedef BothOfMatcher< + typename AllOfResult$m<$for k, [[M$k]]>::type, + typename AllOfResult$(i-m)<$for t, [[M$t]]>::type + > type; +}; + +]] + +// A set of metafunctions for computing the result type of AnyOf. +// AnyOf(m1, ..., mN) returns +// AnyOfResultN::type. + +// Although AnyOf isn't defined for one argument, AnyOfResult1 is defined +// to simplify the implementation. +template +struct AnyOfResult1 { + typedef M1 type; +}; + +$range i 1..n + +$range i 2..n +$for i [[ +$range j 2..i +$var m = i/2 +$range k 1..m +$range t m+1..i + +template +struct AnyOfResult$i { + typedef EitherOfMatcher< + typename AnyOfResult$m<$for k, [[M$k]]>::type, + typename AnyOfResult$(i-m)<$for t, [[M$t]]>::type + > type; +}; + +]] + +} // namespace internal + +// Args(a_matcher) matches a tuple if the selected +// fields of it matches a_matcher. C++ doesn't support default +// arguments for function templates, so we have to overload it. + +$range i 0..n +$for i [[ +$range j 1..i +template <$for j [[int k$j, ]]typename InnerMatcher> +inline internal::ArgsMatcher +Args(const InnerMatcher& matcher) { + return internal::ArgsMatcher(matcher); +} + + +]] +// ElementsAre(e_1, e_2, ... e_n) matches an STL-style container with +// n elements, where the i-th element in the container must +// match the i-th argument in the list. Each argument of +// ElementsAre() can be either a value or a matcher. We support up to +// $n arguments. +// +// The use of DecayArray in the implementation allows ElementsAre() +// to accept string literals, whose type is const char[N], but we +// want to treat them as const char*. +// +// NOTE: Since ElementsAre() cares about the order of the elements, it +// must not be used with containers whose elements's order is +// undefined (e.g. hash_map). + +$range i 0..n +$for i [[ + +$range j 1..i + +$if i>0 [[ + +template <$for j, [[typename T$j]]> +]] + +inline internal::ElementsAreMatcher< + std::tr1::tuple< +$for j, [[ + + typename internal::DecayArray::type]]> > +ElementsAre($for j, [[const T$j& e$j]]) { + typedef std::tr1::tuple< +$for j, [[ + + typename internal::DecayArray::type]]> Args; + return internal::ElementsAreMatcher(Args($for j, [[e$j]])); +} + +]] + +// UnorderedElementsAre(e_1, e_2, ..., e_n) is an ElementsAre extension +// that matches n elements in any order. We support up to n=$n arguments. + +$range i 0..n +$for i [[ + +$range j 1..i + +$if i>0 [[ + +template <$for j, [[typename T$j]]> +]] + +inline internal::UnorderedElementsAreMatcher< + std::tr1::tuple< +$for j, [[ + + typename internal::DecayArray::type]]> > +UnorderedElementsAre($for j, [[const T$j& e$j]]) { + typedef std::tr1::tuple< +$for j, [[ + + typename internal::DecayArray::type]]> Args; + return internal::UnorderedElementsAreMatcher(Args($for j, [[e$j]])); +} + +]] + +// AllOf(m1, m2, ..., mk) matches any value that matches all of the given +// sub-matchers. AllOf is called fully qualified to prevent ADL from firing. + +$range i 2..n +$for i [[ +$range j 1..i +$var m = i/2 +$range k 1..m +$range t m+1..i + +template <$for j, [[typename M$j]]> +inline typename internal::AllOfResult$i<$for j, [[M$j]]>::type +AllOf($for j, [[M$j m$j]]) { + return typename internal::AllOfResult$i<$for j, [[M$j]]>::type( + $if m == 1 [[m1]] $else [[::testing::AllOf($for k, [[m$k]])]], + $if m+1 == i [[m$i]] $else [[::testing::AllOf($for t, [[m$t]])]]); +} + +]] + +// AnyOf(m1, m2, ..., mk) matches any value that matches any of the given +// sub-matchers. AnyOf is called fully qualified to prevent ADL from firing. + +$range i 2..n +$for i [[ +$range j 1..i +$var m = i/2 +$range k 1..m +$range t m+1..i + +template <$for j, [[typename M$j]]> +inline typename internal::AnyOfResult$i<$for j, [[M$j]]>::type +AnyOf($for j, [[M$j m$j]]) { + return typename internal::AnyOfResult$i<$for j, [[M$j]]>::type( + $if m == 1 [[m1]] $else [[::testing::AnyOf($for k, [[m$k]])]], + $if m+1 == i [[m$i]] $else [[::testing::AnyOf($for t, [[m$t]])]]); +} + +]] + +} // namespace testing +$$ } // This Pump meta comment fixes auto-indentation in Emacs. It will not +$$ // show up in the generated code. + + +// The MATCHER* family of macros can be used in a namespace scope to +// define custom matchers easily. +// +// Basic Usage +// =========== +// +// The syntax +// +// MATCHER(name, description_string) { statements; } +// +// defines a matcher with the given name that executes the statements, +// which must return a bool to indicate if the match succeeds. Inside +// the statements, you can refer to the value being matched by 'arg', +// and refer to its type by 'arg_type'. +// +// The description string documents what the matcher does, and is used +// to generate the failure message when the match fails. Since a +// MATCHER() is usually defined in a header file shared by multiple +// C++ source files, we require the description to be a C-string +// literal to avoid possible side effects. It can be empty, in which +// case we'll use the sequence of words in the matcher name as the +// description. +// +// For example: +// +// MATCHER(IsEven, "") { return (arg % 2) == 0; } +// +// allows you to write +// +// // Expects mock_foo.Bar(n) to be called where n is even. +// EXPECT_CALL(mock_foo, Bar(IsEven())); +// +// or, +// +// // Verifies that the value of some_expression is even. +// EXPECT_THAT(some_expression, IsEven()); +// +// If the above assertion fails, it will print something like: +// +// Value of: some_expression +// Expected: is even +// Actual: 7 +// +// where the description "is even" is automatically calculated from the +// matcher name IsEven. +// +// Argument Type +// ============= +// +// Note that the type of the value being matched (arg_type) is +// determined by the context in which you use the matcher and is +// supplied to you by the compiler, so you don't need to worry about +// declaring it (nor can you). This allows the matcher to be +// polymorphic. For example, IsEven() can be used to match any type +// where the value of "(arg % 2) == 0" can be implicitly converted to +// a bool. In the "Bar(IsEven())" example above, if method Bar() +// takes an int, 'arg_type' will be int; if it takes an unsigned long, +// 'arg_type' will be unsigned long; and so on. +// +// Parameterizing Matchers +// ======================= +// +// Sometimes you'll want to parameterize the matcher. For that you +// can use another macro: +// +// MATCHER_P(name, param_name, description_string) { statements; } +// +// For example: +// +// MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; } +// +// will allow you to write: +// +// EXPECT_THAT(Blah("a"), HasAbsoluteValue(n)); +// +// which may lead to this message (assuming n is 10): +// +// Value of: Blah("a") +// Expected: has absolute value 10 +// Actual: -9 +// +// Note that both the matcher description and its parameter are +// printed, making the message human-friendly. +// +// In the matcher definition body, you can write 'foo_type' to +// reference the type of a parameter named 'foo'. For example, in the +// body of MATCHER_P(HasAbsoluteValue, value) above, you can write +// 'value_type' to refer to the type of 'value'. +// +// We also provide MATCHER_P2, MATCHER_P3, ..., up to MATCHER_P$n to +// support multi-parameter matchers. +// +// Describing Parameterized Matchers +// ================================= +// +// The last argument to MATCHER*() is a string-typed expression. The +// expression can reference all of the matcher's parameters and a +// special bool-typed variable named 'negation'. When 'negation' is +// false, the expression should evaluate to the matcher's description; +// otherwise it should evaluate to the description of the negation of +// the matcher. For example, +// +// using testing::PrintToString; +// +// MATCHER_P2(InClosedRange, low, hi, +// string(negation ? "is not" : "is") + " in range [" + +// PrintToString(low) + ", " + PrintToString(hi) + "]") { +// return low <= arg && arg <= hi; +// } +// ... +// EXPECT_THAT(3, InClosedRange(4, 6)); +// EXPECT_THAT(3, Not(InClosedRange(2, 4))); +// +// would generate two failures that contain the text: +// +// Expected: is in range [4, 6] +// ... +// Expected: is not in range [2, 4] +// +// If you specify "" as the description, the failure message will +// contain the sequence of words in the matcher name followed by the +// parameter values printed as a tuple. For example, +// +// MATCHER_P2(InClosedRange, low, hi, "") { ... } +// ... +// EXPECT_THAT(3, InClosedRange(4, 6)); +// EXPECT_THAT(3, Not(InClosedRange(2, 4))); +// +// would generate two failures that contain the text: +// +// Expected: in closed range (4, 6) +// ... +// Expected: not (in closed range (2, 4)) +// +// Types of Matcher Parameters +// =========================== +// +// For the purpose of typing, you can view +// +// MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... } +// +// as shorthand for +// +// template +// FooMatcherPk +// Foo(p1_type p1, ..., pk_type pk) { ... } +// +// When you write Foo(v1, ..., vk), the compiler infers the types of +// the parameters v1, ..., and vk for you. If you are not happy with +// the result of the type inference, you can specify the types by +// explicitly instantiating the template, as in Foo(5, +// false). As said earlier, you don't get to (or need to) specify +// 'arg_type' as that's determined by the context in which the matcher +// is used. You can assign the result of expression Foo(p1, ..., pk) +// to a variable of type FooMatcherPk. This +// can be useful when composing matchers. +// +// While you can instantiate a matcher template with reference types, +// passing the parameters by pointer usually makes your code more +// readable. If, however, you still want to pass a parameter by +// reference, be aware that in the failure message generated by the +// matcher you will see the value of the referenced object but not its +// address. +// +// Explaining Match Results +// ======================== +// +// Sometimes the matcher description alone isn't enough to explain why +// the match has failed or succeeded. For example, when expecting a +// long string, it can be very helpful to also print the diff between +// the expected string and the actual one. To achieve that, you can +// optionally stream additional information to a special variable +// named result_listener, whose type is a pointer to class +// MatchResultListener: +// +// MATCHER_P(EqualsLongString, str, "") { +// if (arg == str) return true; +// +// *result_listener << "the difference: " +/// << DiffStrings(str, arg); +// return false; +// } +// +// Overloading Matchers +// ==================== +// +// You can overload matchers with different numbers of parameters: +// +// MATCHER_P(Blah, a, description_string1) { ... } +// MATCHER_P2(Blah, a, b, description_string2) { ... } +// +// Caveats +// ======= +// +// When defining a new matcher, you should also consider implementing +// MatcherInterface or using MakePolymorphicMatcher(). These +// approaches require more work than the MATCHER* macros, but also +// give you more control on the types of the value being matched and +// the matcher parameters, which may leads to better compiler error +// messages when the matcher is used wrong. They also allow +// overloading matchers based on parameter types (as opposed to just +// based on the number of parameters). +// +// MATCHER*() can only be used in a namespace scope. The reason is +// that C++ doesn't yet allow function-local types to be used to +// instantiate templates. The up-coming C++0x standard will fix this. +// Once that's done, we'll consider supporting using MATCHER*() inside +// a function. +// +// More Information +// ================ +// +// To learn more about using these macros, please search for 'MATCHER' +// on http://code.google.com/p/googlemock/wiki/CookBook. + +$range i 0..n +$for i + +[[ +$var macro_name = [[$if i==0 [[MATCHER]] $elif i==1 [[MATCHER_P]] + $else [[MATCHER_P$i]]]] +$var class_name = [[name##Matcher[[$if i==0 [[]] $elif i==1 [[P]] + $else [[P$i]]]]]] +$range j 0..i-1 +$var template = [[$if i==0 [[]] $else [[ + + template <$for j, [[typename p$j##_type]]>\ +]]]] +$var ctor_param_list = [[$for j, [[p$j##_type gmock_p$j]]]] +$var impl_ctor_param_list = [[$for j, [[p$j##_type gmock_p$j]]]] +$var impl_inits = [[$if i==0 [[]] $else [[ : $for j, [[p$j(gmock_p$j)]]]]]] +$var inits = [[$if i==0 [[]] $else [[ : $for j, [[p$j(gmock_p$j)]]]]]] +$var params = [[$for j, [[p$j]]]] +$var param_types = [[$if i==0 [[]] $else [[<$for j, [[p$j##_type]]>]]]] +$var param_types_and_names = [[$for j, [[p$j##_type p$j]]]] +$var param_field_decls = [[$for j +[[ + + p$j##_type p$j;\ +]]]] +$var param_field_decls2 = [[$for j +[[ + + p$j##_type p$j;\ +]]]] + +#define $macro_name(name$for j [[, p$j]], description)\$template + class $class_name {\ + public:\ + template \ + class gmock_Impl : public ::testing::MatcherInterface {\ + public:\ + [[$if i==1 [[explicit ]]]]gmock_Impl($impl_ctor_param_list)\ + $impl_inits {}\ + virtual bool MatchAndExplain(\ + arg_type arg, ::testing::MatchResultListener* result_listener) const;\ + virtual void DescribeTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(false);\ + }\ + virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ + *gmock_os << FormatDescription(true);\ + }\$param_field_decls + private:\ + ::testing::internal::string FormatDescription(bool negation) const {\ + const ::testing::internal::string gmock_description = (description);\ + if (!gmock_description.empty())\ + return gmock_description;\ + return ::testing::internal::FormatMatcherDescription(\ + negation, #name, \ + ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ + ::std::tr1::tuple<$for j, [[p$j##_type]]>($for j, [[p$j]])));\ + }\ + GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ + };\ + template \ + operator ::testing::Matcher() const {\ + return ::testing::Matcher(\ + new gmock_Impl($params));\ + }\ + $class_name($ctor_param_list)$inits {\ + }\$param_field_decls2 + private:\ + GTEST_DISALLOW_ASSIGN_($class_name);\ + };\$template + inline $class_name$param_types name($param_types_and_names) {\ + return $class_name$param_types($params);\ + }\$template + template \ + bool $class_name$param_types::gmock_Impl::MatchAndExplain(\ + arg_type arg, \ + ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ + const +]] + + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ diff --git a/src/gmock/include/gmock/gmock-generated-nice-strict.h b/src/gmock/include/gmock/gmock-generated-nice-strict.h new file mode 100644 index 00000000000..4095f4d5bc7 --- /dev/null +++ b/src/gmock/include/gmock/gmock-generated-nice-strict.h @@ -0,0 +1,397 @@ +// This file was GENERATED by command: +// pump.py gmock-generated-nice-strict.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Implements class templates NiceMock, NaggyMock, and StrictMock. +// +// Given a mock class MockFoo that is created using Google Mock, +// NiceMock is a subclass of MockFoo that allows +// uninteresting calls (i.e. calls to mock methods that have no +// EXPECT_CALL specs), NaggyMock is a subclass of MockFoo +// that prints a warning when an uninteresting call occurs, and +// StrictMock is a subclass of MockFoo that treats all +// uninteresting calls as errors. +// +// Currently a mock is naggy by default, so MockFoo and +// NaggyMock behave like the same. However, we will soon +// switch the default behavior of mocks to be nice, as that in general +// leads to more maintainable tests. When that happens, MockFoo will +// stop behaving like NaggyMock and start behaving like +// NiceMock. +// +// NiceMock, NaggyMock, and StrictMock "inherit" the constructors of +// their respective base class, with up-to 10 arguments. Therefore +// you can write NiceMock(5, "a") to construct a nice mock +// where MockFoo has a constructor that accepts (int, const char*), +// for example. +// +// A known limitation is that NiceMock, NaggyMock, +// and StrictMock only works for mock methods defined using +// the MOCK_METHOD* family of macros DIRECTLY in the MockFoo class. +// If a mock method is defined in a base class of MockFoo, the "nice" +// or "strict" modifier may not affect it, depending on the compiler. +// In particular, nesting NiceMock, NaggyMock, and StrictMock is NOT +// supported. +// +// Another known limitation is that the constructors of the base mock +// cannot have arguments passed by non-const reference, which are +// banned by the Google C++ style guide anyway. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_ + +#include "gmock/gmock-spec-builders.h" +#include "gmock/internal/gmock-port.h" + +namespace testing { + +template +class NiceMock : public MockClass { + public: + // We don't factor out the constructor body to a common method, as + // we have to avoid a possible clash with members of MockClass. + NiceMock() { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + // C++ doesn't (yet) allow inheritance of constructors, so we have + // to define it for each arity. + template + explicit NiceMock(const A1& a1) : MockClass(a1) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + template + NiceMock(const A1& a1, const A2& a2) : MockClass(a1, a2) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NiceMock(const A1& a1, const A2& a2, const A3& a3) : MockClass(a1, a2, a3) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NiceMock(const A1& a1, const A2& a2, const A3& a3, + const A4& a4) : MockClass(a1, a2, a3, a4) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5) : MockClass(a1, a2, a3, a4, a5) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6) : MockClass(a1, a2, a3, a4, a5, a6) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7) : MockClass(a1, a2, a3, a4, a5, + a6, a7) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8) : MockClass(a1, + a2, a3, a4, a5, a6, a7, a8) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8, + const A9& a9) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NiceMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, + const A10& a10) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) { + ::testing::Mock::AllowUninterestingCalls( + internal::ImplicitCast_(this)); + } + + virtual ~NiceMock() { + ::testing::Mock::UnregisterCallReaction( + internal::ImplicitCast_(this)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(NiceMock); +}; + +template +class NaggyMock : public MockClass { + public: + // We don't factor out the constructor body to a common method, as + // we have to avoid a possible clash with members of MockClass. + NaggyMock() { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + // C++ doesn't (yet) allow inheritance of constructors, so we have + // to define it for each arity. + template + explicit NaggyMock(const A1& a1) : MockClass(a1) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + template + NaggyMock(const A1& a1, const A2& a2) : MockClass(a1, a2) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NaggyMock(const A1& a1, const A2& a2, const A3& a3) : MockClass(a1, a2, a3) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NaggyMock(const A1& a1, const A2& a2, const A3& a3, + const A4& a4) : MockClass(a1, a2, a3, a4) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5) : MockClass(a1, a2, a3, a4, a5) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6) : MockClass(a1, a2, a3, a4, a5, a6) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7) : MockClass(a1, a2, a3, a4, a5, + a6, a7) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8) : MockClass(a1, + a2, a3, a4, a5, a6, a7, a8) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8, + const A9& a9) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + NaggyMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, + const A10& a10) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) { + ::testing::Mock::WarnUninterestingCalls( + internal::ImplicitCast_(this)); + } + + virtual ~NaggyMock() { + ::testing::Mock::UnregisterCallReaction( + internal::ImplicitCast_(this)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(NaggyMock); +}; + +template +class StrictMock : public MockClass { + public: + // We don't factor out the constructor body to a common method, as + // we have to avoid a possible clash with members of MockClass. + StrictMock() { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + // C++ doesn't (yet) allow inheritance of constructors, so we have + // to define it for each arity. + template + explicit StrictMock(const A1& a1) : MockClass(a1) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + template + StrictMock(const A1& a1, const A2& a2) : MockClass(a1, a2) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + StrictMock(const A1& a1, const A2& a2, const A3& a3) : MockClass(a1, a2, a3) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + StrictMock(const A1& a1, const A2& a2, const A3& a3, + const A4& a4) : MockClass(a1, a2, a3, a4) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5) : MockClass(a1, a2, a3, a4, a5) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6) : MockClass(a1, a2, a3, a4, a5, a6) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7) : MockClass(a1, a2, a3, a4, a5, + a6, a7) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8) : MockClass(a1, + a2, a3, a4, a5, a6, a7, a8) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8, + const A9& a9) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + template + StrictMock(const A1& a1, const A2& a2, const A3& a3, const A4& a4, + const A5& a5, const A6& a6, const A7& a7, const A8& a8, const A9& a9, + const A10& a10) : MockClass(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10) { + ::testing::Mock::FailUninterestingCalls( + internal::ImplicitCast_(this)); + } + + virtual ~StrictMock() { + ::testing::Mock::UnregisterCallReaction( + internal::ImplicitCast_(this)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(StrictMock); +}; + +// The following specializations catch some (relatively more common) +// user errors of nesting nice and strict mocks. They do NOT catch +// all possible errors. + +// These specializations are declared but not defined, as NiceMock, +// NaggyMock, and StrictMock cannot be nested. + +template +class NiceMock >; +template +class NiceMock >; +template +class NiceMock >; + +template +class NaggyMock >; +template +class NaggyMock >; +template +class NaggyMock >; + +template +class StrictMock >; +template +class StrictMock >; +template +class StrictMock >; + +} // namespace testing + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_ diff --git a/src/gmock/include/gmock/gmock-generated-nice-strict.h.pump b/src/gmock/include/gmock/gmock-generated-nice-strict.h.pump new file mode 100644 index 00000000000..3ee1ce7f309 --- /dev/null +++ b/src/gmock/include/gmock/gmock-generated-nice-strict.h.pump @@ -0,0 +1,161 @@ +$$ -*- mode: c++; -*- +$$ This is a Pump source file. Please use Pump to convert it to +$$ gmock-generated-nice-strict.h. +$$ +$var n = 10 $$ The maximum arity we support. +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Implements class templates NiceMock, NaggyMock, and StrictMock. +// +// Given a mock class MockFoo that is created using Google Mock, +// NiceMock is a subclass of MockFoo that allows +// uninteresting calls (i.e. calls to mock methods that have no +// EXPECT_CALL specs), NaggyMock is a subclass of MockFoo +// that prints a warning when an uninteresting call occurs, and +// StrictMock is a subclass of MockFoo that treats all +// uninteresting calls as errors. +// +// Currently a mock is naggy by default, so MockFoo and +// NaggyMock behave like the same. However, we will soon +// switch the default behavior of mocks to be nice, as that in general +// leads to more maintainable tests. When that happens, MockFoo will +// stop behaving like NaggyMock and start behaving like +// NiceMock. +// +// NiceMock, NaggyMock, and StrictMock "inherit" the constructors of +// their respective base class, with up-to $n arguments. Therefore +// you can write NiceMock(5, "a") to construct a nice mock +// where MockFoo has a constructor that accepts (int, const char*), +// for example. +// +// A known limitation is that NiceMock, NaggyMock, +// and StrictMock only works for mock methods defined using +// the MOCK_METHOD* family of macros DIRECTLY in the MockFoo class. +// If a mock method is defined in a base class of MockFoo, the "nice" +// or "strict" modifier may not affect it, depending on the compiler. +// In particular, nesting NiceMock, NaggyMock, and StrictMock is NOT +// supported. +// +// Another known limitation is that the constructors of the base mock +// cannot have arguments passed by non-const reference, which are +// banned by the Google C++ style guide anyway. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_ + +#include "gmock/gmock-spec-builders.h" +#include "gmock/internal/gmock-port.h" + +namespace testing { + +$range kind 0..2 +$for kind [[ + +$var clazz=[[$if kind==0 [[NiceMock]] + $elif kind==1 [[NaggyMock]] + $else [[StrictMock]]]] + +$var method=[[$if kind==0 [[AllowUninterestingCalls]] + $elif kind==1 [[WarnUninterestingCalls]] + $else [[FailUninterestingCalls]]]] + +template +class $clazz : public MockClass { + public: + // We don't factor out the constructor body to a common method, as + // we have to avoid a possible clash with members of MockClass. + $clazz() { + ::testing::Mock::$method( + internal::ImplicitCast_(this)); + } + + // C++ doesn't (yet) allow inheritance of constructors, so we have + // to define it for each arity. + template + explicit $clazz(const A1& a1) : MockClass(a1) { + ::testing::Mock::$method( + internal::ImplicitCast_(this)); + } + +$range i 2..n +$for i [[ +$range j 1..i + template <$for j, [[typename A$j]]> + $clazz($for j, [[const A$j& a$j]]) : MockClass($for j, [[a$j]]) { + ::testing::Mock::$method( + internal::ImplicitCast_(this)); + } + + +]] + virtual ~$clazz() { + ::testing::Mock::UnregisterCallReaction( + internal::ImplicitCast_(this)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_($clazz); +}; + +]] + +// The following specializations catch some (relatively more common) +// user errors of nesting nice and strict mocks. They do NOT catch +// all possible errors. + +// These specializations are declared but not defined, as NiceMock, +// NaggyMock, and StrictMock cannot be nested. + +template +class NiceMock >; +template +class NiceMock >; +template +class NiceMock >; + +template +class NaggyMock >; +template +class NaggyMock >; +template +class NaggyMock >; + +template +class StrictMock >; +template +class StrictMock >; +template +class StrictMock >; + +} // namespace testing + +#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_NICE_STRICT_H_ diff --git a/src/gmock/include/gmock/gmock-matchers.h b/src/gmock/include/gmock/gmock-matchers.h new file mode 100644 index 00000000000..44055c93556 --- /dev/null +++ b/src/gmock/include/gmock/gmock-matchers.h @@ -0,0 +1,3986 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Mock - a framework for writing C++ mock classes. +// +// This file implements some commonly used argument matchers. More +// matchers can be defined by the user implementing the +// MatcherInterface interface if necessary. + +#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ +#define GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ + +#include +#include +#include +#include +#include // NOLINT +#include +#include +#include +#include + +#include "gmock/internal/gmock-internal-utils.h" +#include "gmock/internal/gmock-port.h" +#include "gtest/gtest.h" + +#if GTEST_LANG_CXX11 +#include // NOLINT -- must be after gtest.h +#endif + +namespace testing { + +// To implement a matcher Foo for type T, define: +// 1. a class FooMatcherImpl that implements the +// MatcherInterface interface, and +// 2. a factory function that creates a Matcher object from a +// FooMatcherImpl*. +// +// The two-level delegation design makes it possible to allow a user +// to write "v" instead of "Eq(v)" where a Matcher is expected, which +// is impossible if we pass matchers by pointers. It also eases +// ownership management as Matcher objects can now be copied like +// plain values. + +// MatchResultListener is an abstract class. Its << operator can be +// used by a matcher to explain why a value matches or doesn't match. +// +// TODO(wan@google.com): add method +// bool InterestedInWhy(bool result) const; +// to indicate whether the listener is interested in why the match +// result is 'result'. +class MatchResultListener { + public: + // Creates a listener object with the given underlying ostream. The + // listener does not own the ostream, and does not dereference it + // in the constructor or destructor. + explicit MatchResultListener(::std::ostream* os) : stream_(os) {} + virtual ~MatchResultListener() = 0; // Makes this class abstract. + + // Streams x to the underlying ostream; does nothing if the ostream + // is NULL. + template + MatchResultListener& operator<<(const T& x) { + if (stream_ != NULL) + *stream_ << x; + return *this; + } + + // Returns the underlying ostream. + ::std::ostream* stream() { return stream_; } + + // Returns true iff the listener is interested in an explanation of + // the match result. A matcher's MatchAndExplain() method can use + // this information to avoid generating the explanation when no one + // intends to hear it. + bool IsInterested() const { return stream_ != NULL; } + + private: + ::std::ostream* const stream_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(MatchResultListener); +}; + +inline MatchResultListener::~MatchResultListener() { +} + +// An instance of a subclass of this knows how to describe itself as a +// matcher. +class MatcherDescriberInterface { + public: + virtual ~MatcherDescriberInterface() {} + + // Describes this matcher to an ostream. The function should print + // a verb phrase that describes the property a value matching this + // matcher should have. The subject of the verb phrase is the value + // being matched. For example, the DescribeTo() method of the Gt(7) + // matcher prints "is greater than 7". + virtual void DescribeTo(::std::ostream* os) const = 0; + + // Describes the negation of this matcher to an ostream. For + // example, if the description of this matcher is "is greater than + // 7", the negated description could be "is not greater than 7". + // You are not required to override this when implementing + // MatcherInterface, but it is highly advised so that your matcher + // can produce good error messages. + virtual void DescribeNegationTo(::std::ostream* os) const { + *os << "not ("; + DescribeTo(os); + *os << ")"; + } +}; + +// The implementation of a matcher. +template +class MatcherInterface : public MatcherDescriberInterface { + public: + // Returns true iff the matcher matches x; also explains the match + // result to 'listener' if necessary (see the next paragraph), in + // the form of a non-restrictive relative clause ("which ...", + // "whose ...", etc) that describes x. For example, the + // MatchAndExplain() method of the Pointee(...) matcher should + // generate an explanation like "which points to ...". + // + // Implementations of MatchAndExplain() should add an explanation of + // the match result *if and only if* they can provide additional + // information that's not already present (or not obvious) in the + // print-out of x and the matcher's description. Whether the match + // succeeds is not a factor in deciding whether an explanation is + // needed, as sometimes the caller needs to print a failure message + // when the match succeeds (e.g. when the matcher is used inside + // Not()). + // + // For example, a "has at least 10 elements" matcher should explain + // what the actual element count is, regardless of the match result, + // as it is useful information to the reader; on the other hand, an + // "is empty" matcher probably only needs to explain what the actual + // size is when the match fails, as it's redundant to say that the + // size is 0 when the value is already known to be empty. + // + // You should override this method when defining a new matcher. + // + // It's the responsibility of the caller (Google Mock) to guarantee + // that 'listener' is not NULL. This helps to simplify a matcher's + // implementation when it doesn't care about the performance, as it + // can talk to 'listener' without checking its validity first. + // However, in order to implement dummy listeners efficiently, + // listener->stream() may be NULL. + virtual bool MatchAndExplain(T x, MatchResultListener* listener) const = 0; + + // Inherits these methods from MatcherDescriberInterface: + // virtual void DescribeTo(::std::ostream* os) const = 0; + // virtual void DescribeNegationTo(::std::ostream* os) const; +}; + +// A match result listener that stores the explanation in a string. +class StringMatchResultListener : public MatchResultListener { + public: + StringMatchResultListener() : MatchResultListener(&ss_) {} + + // Returns the explanation accumulated so far. + internal::string str() const { return ss_.str(); } + + // Clears the explanation accumulated so far. + void Clear() { ss_.str(""); } + + private: + ::std::stringstream ss_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StringMatchResultListener); +}; + +namespace internal { + +// A match result listener that ignores the explanation. +class DummyMatchResultListener : public MatchResultListener { + public: + DummyMatchResultListener() : MatchResultListener(NULL) {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(DummyMatchResultListener); +}; + +// A match result listener that forwards the explanation to a given +// ostream. The difference between this and MatchResultListener is +// that the former is concrete. +class StreamMatchResultListener : public MatchResultListener { + public: + explicit StreamMatchResultListener(::std::ostream* os) + : MatchResultListener(os) {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamMatchResultListener); +}; + +// An internal class for implementing Matcher, which will derive +// from it. We put functionalities common to all Matcher +// specializations here to avoid code duplication. +template +class MatcherBase { + public: + // Returns true iff the matcher matches x; also explains the match + // result to 'listener'. + bool MatchAndExplain(T x, MatchResultListener* listener) const { + return impl_->MatchAndExplain(x, listener); + } + + // Returns true iff this matcher matches x. + bool Matches(T x) const { + DummyMatchResultListener dummy; + return MatchAndExplain(x, &dummy); + } + + // Describes this matcher to an ostream. + void DescribeTo(::std::ostream* os) const { impl_->DescribeTo(os); } + + // Describes the negation of this matcher to an ostream. + void DescribeNegationTo(::std::ostream* os) const { + impl_->DescribeNegationTo(os); + } + + // Explains why x matches, or doesn't match, the matcher. + void ExplainMatchResultTo(T x, ::std::ostream* os) const { + StreamMatchResultListener listener(os); + MatchAndExplain(x, &listener); + } + + // Returns the describer for this matcher object; retains ownership + // of the describer, which is only guaranteed to be alive when + // this matcher object is alive. + const MatcherDescriberInterface* GetDescriber() const { + return impl_.get(); + } + + protected: + MatcherBase() {} + + // Constructs a matcher from its implementation. + explicit MatcherBase(const MatcherInterface* impl) + : impl_(impl) {} + + virtual ~MatcherBase() {} + + private: + // shared_ptr (util/gtl/shared_ptr.h) and linked_ptr have similar + // interfaces. The former dynamically allocates a chunk of memory + // to hold the reference count, while the latter tracks all + // references using a circular linked list without allocating + // memory. It has been observed that linked_ptr performs better in + // typical scenarios. However, shared_ptr can out-perform + // linked_ptr when there are many more uses of the copy constructor + // than the default constructor. + // + // If performance becomes a problem, we should see if using + // shared_ptr helps. + ::testing::internal::linked_ptr > impl_; +}; + +} // namespace internal + +// A Matcher is a copyable and IMMUTABLE (except by assignment) +// object that can check whether a value of type T matches. The +// implementation of Matcher is just a linked_ptr to const +// MatcherInterface, so copying is fairly cheap. Don't inherit +// from Matcher! +template +class Matcher : public internal::MatcherBase { + public: + // Constructs a null matcher. Needed for storing Matcher objects in STL + // containers. A default-constructed matcher is not yet initialized. You + // cannot use it until a valid value has been assigned to it. + Matcher() {} + + // Constructs a matcher from its implementation. + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Implicit constructor here allows people to write + // EXPECT_CALL(foo, Bar(5)) instead of EXPECT_CALL(foo, Bar(Eq(5))) sometimes + Matcher(T value); // NOLINT +}; + +// The following two specializations allow the user to write str +// instead of Eq(str) and "foo" instead of Eq("foo") when a string +// matcher is expected. +template <> +class GTEST_API_ Matcher + : public internal::MatcherBase { + public: + Matcher() {} + + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Allows the user to write str instead of Eq(str) sometimes, where + // str is a string object. + Matcher(const internal::string& s); // NOLINT + + // Allows the user to write "foo" instead of Eq("foo") sometimes. + Matcher(const char* s); // NOLINT +}; + +template <> +class GTEST_API_ Matcher + : public internal::MatcherBase { + public: + Matcher() {} + + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Allows the user to write str instead of Eq(str) sometimes, where + // str is a string object. + Matcher(const internal::string& s); // NOLINT + + // Allows the user to write "foo" instead of Eq("foo") sometimes. + Matcher(const char* s); // NOLINT +}; + +#if GTEST_HAS_STRING_PIECE_ +// The following two specializations allow the user to write str +// instead of Eq(str) and "foo" instead of Eq("foo") when a StringPiece +// matcher is expected. +template <> +class GTEST_API_ Matcher + : public internal::MatcherBase { + public: + Matcher() {} + + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Allows the user to write str instead of Eq(str) sometimes, where + // str is a string object. + Matcher(const internal::string& s); // NOLINT + + // Allows the user to write "foo" instead of Eq("foo") sometimes. + Matcher(const char* s); // NOLINT + + // Allows the user to pass StringPieces directly. + Matcher(StringPiece s); // NOLINT +}; + +template <> +class GTEST_API_ Matcher + : public internal::MatcherBase { + public: + Matcher() {} + + explicit Matcher(const MatcherInterface* impl) + : internal::MatcherBase(impl) {} + + // Allows the user to write str instead of Eq(str) sometimes, where + // str is a string object. + Matcher(const internal::string& s); // NOLINT + + // Allows the user to write "foo" instead of Eq("foo") sometimes. + Matcher(const char* s); // NOLINT + + // Allows the user to pass StringPieces directly. + Matcher(StringPiece s); // NOLINT +}; +#endif // GTEST_HAS_STRING_PIECE_ + +// The PolymorphicMatcher class template makes it easy to implement a +// polymorphic matcher (i.e. a matcher that can match values of more +// than one type, e.g. Eq(n) and NotNull()). +// +// To define a polymorphic matcher, a user should provide an Impl +// class that has a DescribeTo() method and a DescribeNegationTo() +// method, and define a member function (or member function template) +// +// bool MatchAndExplain(const Value& value, +// MatchResultListener* listener) const; +// +// See the definition of NotNull() for a complete example. +template +class PolymorphicMatcher { + public: + explicit PolymorphicMatcher(const Impl& an_impl) : impl_(an_impl) {} + + // Returns a mutable reference to the underlying matcher + // implementation object. + Impl& mutable_impl() { return impl_; } + + // Returns an immutable reference to the underlying matcher + // implementation object. + const Impl& impl() const { return impl_; } + + template + operator Matcher() const { + return Matcher(new MonomorphicImpl(impl_)); + } + + private: + template + class MonomorphicImpl : public MatcherInterface { + public: + explicit MonomorphicImpl(const Impl& impl) : impl_(impl) {} + + virtual void DescribeTo(::std::ostream* os) const { + impl_.DescribeTo(os); + } + + virtual void DescribeNegationTo(::std::ostream* os) const { + impl_.DescribeNegationTo(os); + } + + virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { + return impl_.MatchAndExplain(x, listener); + } + + private: + const Impl impl_; + + GTEST_DISALLOW_ASSIGN_(MonomorphicImpl); + }; + + Impl impl_; + + GTEST_DISALLOW_ASSIGN_(PolymorphicMatcher); +}; + +// Creates a matcher from its implementation. This is easier to use +// than the Matcher constructor as it doesn't require you to +// explicitly write the template argument, e.g. +// +// MakeMatcher(foo); +// vs +// Matcher(foo); +template +inline Matcher MakeMatcher(const MatcherInterface* impl) { + return Matcher(impl); +} + +// Creates a polymorphic matcher from its implementation. This is +// easier to use than the PolymorphicMatcher constructor as it +// doesn't require you to explicitly write the template argument, e.g. +// +// MakePolymorphicMatcher(foo); +// vs +// PolymorphicMatcher(foo); +template +inline PolymorphicMatcher MakePolymorphicMatcher(const Impl& impl) { + return PolymorphicMatcher(impl); +} + +// Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION +// and MUST NOT BE USED IN USER CODE!!! +namespace internal { + +// The MatcherCastImpl class template is a helper for implementing +// MatcherCast(). We need this helper in order to partially +// specialize the implementation of MatcherCast() (C++ allows +// class/struct templates to be partially specialized, but not +// function templates.). + +// This general version is used when MatcherCast()'s argument is a +// polymorphic matcher (i.e. something that can be converted to a +// Matcher but is not one yet; for example, Eq(value)) or a value (for +// example, "hello"). +template +class MatcherCastImpl { + public: + static Matcher Cast(M polymorphic_matcher_or_value) { + // M can be a polymorhic matcher, in which case we want to use + // its conversion operator to create Matcher. Or it can be a value + // that should be passed to the Matcher's constructor. + // + // We can't call Matcher(polymorphic_matcher_or_value) when M is a + // polymorphic matcher because it'll be ambiguous if T has an implicit + // constructor from M (this usually happens when T has an implicit + // constructor from any type). + // + // It won't work to unconditionally implict_cast + // polymorphic_matcher_or_value to Matcher because it won't trigger + // a user-defined conversion from M to T if one exists (assuming M is + // a value). + return CastImpl( + polymorphic_matcher_or_value, + BooleanConstant< + internal::ImplicitlyConvertible >::value>()); + } + + private: + static Matcher CastImpl(M value, BooleanConstant) { + // M can't be implicitly converted to Matcher, so M isn't a polymorphic + // matcher. It must be a value then. Use direct initialization to create + // a matcher. + return Matcher(ImplicitCast_(value)); + } + + static Matcher CastImpl(M polymorphic_matcher_or_value, + BooleanConstant) { + // M is implicitly convertible to Matcher, which means that either + // M is a polymorhpic matcher or Matcher has an implicit constructor + // from M. In both cases using the implicit conversion will produce a + // matcher. + // + // Even if T has an implicit constructor from M, it won't be called because + // creating Matcher would require a chain of two user-defined conversions + // (first to create T from M and then to create Matcher from T). + return polymorphic_matcher_or_value; + } +}; + +// This more specialized version is used when MatcherCast()'s argument +// is already a Matcher. This only compiles when type T can be +// statically converted to type U. +template +class MatcherCastImpl > { + public: + static Matcher Cast(const Matcher& source_matcher) { + return Matcher(new Impl(source_matcher)); + } + + private: + class Impl : public MatcherInterface { + public: + explicit Impl(const Matcher& source_matcher) + : source_matcher_(source_matcher) {} + + // We delegate the matching logic to the source matcher. + virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { + return source_matcher_.MatchAndExplain(static_cast(x), listener); + } + + virtual void DescribeTo(::std::ostream* os) const { + source_matcher_.DescribeTo(os); + } + + virtual void DescribeNegationTo(::std::ostream* os) const { + source_matcher_.DescribeNegationTo(os); + } + + private: + const Matcher source_matcher_; + + GTEST_DISALLOW_ASSIGN_(Impl); + }; +}; + +// This even more specialized version is used for efficiently casting +// a matcher to its own type. +template +class MatcherCastImpl > { + public: + static Matcher Cast(const Matcher& matcher) { return matcher; } +}; + +} // namespace internal + +// In order to be safe and clear, casting between different matcher +// types is done explicitly via MatcherCast(m), which takes a +// matcher m and returns a Matcher. It compiles only when T can be +// statically converted to the argument type of m. +template +inline Matcher MatcherCast(M matcher) { + return internal::MatcherCastImpl::Cast(matcher); +} + +// Implements SafeMatcherCast(). +// +// We use an intermediate class to do the actual safe casting as Nokia's +// Symbian compiler cannot decide between +// template ... (M) and +// template ... (const Matcher&) +// for function templates but can for member function templates. +template +class SafeMatcherCastImpl { + public: + // This overload handles polymorphic matchers and values only since + // monomorphic matchers are handled by the next one. + template + static inline Matcher Cast(M polymorphic_matcher_or_value) { + return internal::MatcherCastImpl::Cast(polymorphic_matcher_or_value); + } + + // This overload handles monomorphic matchers. + // + // In general, if type T can be implicitly converted to type U, we can + // safely convert a Matcher to a Matcher (i.e. Matcher is + // contravariant): just keep a copy of the original Matcher, convert the + // argument from type T to U, and then pass it to the underlying Matcher. + // The only exception is when U is a reference and T is not, as the + // underlying Matcher may be interested in the argument's address, which + // is not preserved in the conversion from T to U. + template + static inline Matcher Cast(const Matcher& matcher) { + // Enforce that T can be implicitly converted to U. + GTEST_COMPILE_ASSERT_((internal::ImplicitlyConvertible::value), + T_must_be_implicitly_convertible_to_U); + // Enforce that we are not converting a non-reference type T to a reference + // type U. + GTEST_COMPILE_ASSERT_( + internal::is_reference::value || !internal::is_reference::value, + cannot_convert_non_referentce_arg_to_reference); + // In case both T and U are arithmetic types, enforce that the + // conversion is not lossy. + typedef GTEST_REMOVE_REFERENCE_AND_CONST_(T) RawT; + typedef GTEST_REMOVE_REFERENCE_AND_CONST_(U) RawU; + const bool kTIsOther = GMOCK_KIND_OF_(RawT) == internal::kOther; + const bool kUIsOther = GMOCK_KIND_OF_(RawU) == internal::kOther; + GTEST_COMPILE_ASSERT_( + kTIsOther || kUIsOther || + (internal::LosslessArithmeticConvertible::value), + conversion_of_arithmetic_types_must_be_lossless); + return MatcherCast(matcher); + } +}; + +template +inline Matcher SafeMatcherCast(const M& polymorphic_matcher) { + return SafeMatcherCastImpl::Cast(polymorphic_matcher); +} + +// A() returns a matcher that matches any value of type T. +template +Matcher A(); + +// Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION +// and MUST NOT BE USED IN USER CODE!!! +namespace internal { + +// If the explanation is not empty, prints it to the ostream. +inline void PrintIfNotEmpty(const internal::string& explanation, + ::std::ostream* os) { + if (explanation != "" && os != NULL) { + *os << ", " << explanation; + } +} + +// Returns true if the given type name is easy to read by a human. +// This is used to decide whether printing the type of a value might +// be helpful. +inline bool IsReadableTypeName(const string& type_name) { + // We consider a type name readable if it's short or doesn't contain + // a template or function type. + return (type_name.length() <= 20 || + type_name.find_first_of("<(") == string::npos); +} + +// Matches the value against the given matcher, prints the value and explains +// the match result to the listener. Returns the match result. +// 'listener' must not be NULL. +// Value cannot be passed by const reference, because some matchers take a +// non-const argument. +template +bool MatchPrintAndExplain(Value& value, const Matcher& matcher, + MatchResultListener* listener) { + if (!listener->IsInterested()) { + // If the listener is not interested, we do not need to construct the + // inner explanation. + return matcher.Matches(value); + } + + StringMatchResultListener inner_listener; + const bool match = matcher.MatchAndExplain(value, &inner_listener); + + UniversalPrint(value, listener->stream()); +#if GTEST_HAS_RTTI + const string& type_name = GetTypeName(); + if (IsReadableTypeName(type_name)) + *listener->stream() << " (of type " << type_name << ")"; +#endif + PrintIfNotEmpty(inner_listener.str(), listener->stream()); + + return match; +} + +// An internal helper class for doing compile-time loop on a tuple's +// fields. +template +class TuplePrefix { + public: + // TuplePrefix::Matches(matcher_tuple, value_tuple) returns true + // iff the first N fields of matcher_tuple matches the first N + // fields of value_tuple, respectively. + template + static bool Matches(const MatcherTuple& matcher_tuple, + const ValueTuple& value_tuple) { + using ::std::tr1::get; + return TuplePrefix::Matches(matcher_tuple, value_tuple) + && get(matcher_tuple).Matches(get(value_tuple)); + } + + // TuplePrefix::ExplainMatchFailuresTo(matchers, values, os) + // describes failures in matching the first N fields of matchers + // against the first N fields of values. If there is no failure, + // nothing will be streamed to os. + template + static void ExplainMatchFailuresTo(const MatcherTuple& matchers, + const ValueTuple& values, + ::std::ostream* os) { + using ::std::tr1::tuple_element; + using ::std::tr1::get; + + // First, describes failures in the first N - 1 fields. + TuplePrefix::ExplainMatchFailuresTo(matchers, values, os); + + // Then describes the failure (if any) in the (N - 1)-th (0-based) + // field. + typename tuple_element::type matcher = + get(matchers); + typedef typename tuple_element::type Value; + Value value = get(values); + StringMatchResultListener listener; + if (!matcher.MatchAndExplain(value, &listener)) { + // TODO(wan): include in the message the name of the parameter + // as used in MOCK_METHOD*() when possible. + *os << " Expected arg #" << N - 1 << ": "; + get(matchers).DescribeTo(os); + *os << "\n Actual: "; + // We remove the reference in type Value to prevent the + // universal printer from printing the address of value, which + // isn't interesting to the user most of the time. The + // matcher's MatchAndExplain() method handles the case when + // the address is interesting. + internal::UniversalPrint(value, os); + PrintIfNotEmpty(listener.str(), os); + *os << "\n"; + } + } +}; + +// The base case. +template <> +class TuplePrefix<0> { + public: + template + static bool Matches(const MatcherTuple& /* matcher_tuple */, + const ValueTuple& /* value_tuple */) { + return true; + } + + template + static void ExplainMatchFailuresTo(const MatcherTuple& /* matchers */, + const ValueTuple& /* values */, + ::std::ostream* /* os */) {} +}; + +// TupleMatches(matcher_tuple, value_tuple) returns true iff all +// matchers in matcher_tuple match the corresponding fields in +// value_tuple. It is a compiler error if matcher_tuple and +// value_tuple have different number of fields or incompatible field +// types. +template +bool TupleMatches(const MatcherTuple& matcher_tuple, + const ValueTuple& value_tuple) { + using ::std::tr1::tuple_size; + // Makes sure that matcher_tuple and value_tuple have the same + // number of fields. + GTEST_COMPILE_ASSERT_(tuple_size::value == + tuple_size::value, + matcher_and_value_have_different_numbers_of_fields); + return TuplePrefix::value>:: + Matches(matcher_tuple, value_tuple); +} + +// Describes failures in matching matchers against values. If there +// is no failure, nothing will be streamed to os. +template +void ExplainMatchFailureTupleTo(const MatcherTuple& matchers, + const ValueTuple& values, + ::std::ostream* os) { + using ::std::tr1::tuple_size; + TuplePrefix::value>::ExplainMatchFailuresTo( + matchers, values, os); +} + +// TransformTupleValues and its helper. +// +// TransformTupleValuesHelper hides the internal machinery that +// TransformTupleValues uses to implement a tuple traversal. +template +class TransformTupleValuesHelper { + private: + typedef typename ::std::tr1::tuple_size TupleSize; + + public: + // For each member of tuple 't', taken in order, evaluates '*out++ = f(t)'. + // Returns the final value of 'out' in case the caller needs it. + static OutIter Run(Func f, const Tuple& t, OutIter out) { + return IterateOverTuple()(f, t, out); + } + + private: + template + struct IterateOverTuple { + OutIter operator() (Func f, const Tup& t, OutIter out) const { + *out++ = f(::std::tr1::get(t)); + return IterateOverTuple()(f, t, out); + } + }; + template + struct IterateOverTuple { + OutIter operator() (Func /* f */, const Tup& /* t */, OutIter out) const { + return out; + } + }; +}; + +// Successively invokes 'f(element)' on each element of the tuple 't', +// appending each result to the 'out' iterator. Returns the final value +// of 'out'. +template +OutIter TransformTupleValues(Func f, const Tuple& t, OutIter out) { + return TransformTupleValuesHelper::Run(f, t, out); +} + +// Implements A(). +template +class AnyMatcherImpl : public MatcherInterface { + public: + virtual bool MatchAndExplain( + T /* x */, MatchResultListener* /* listener */) const { return true; } + virtual void DescribeTo(::std::ostream* os) const { *os << "is anything"; } + virtual void DescribeNegationTo(::std::ostream* os) const { + // This is mostly for completeness' safe, as it's not very useful + // to write Not(A()). However we cannot completely rule out + // such a possibility, and it doesn't hurt to be prepared. + *os << "never matches"; + } +}; + +// Implements _, a matcher that matches any value of any +// type. This is a polymorphic matcher, so we need a template type +// conversion operator to make it appearing as a Matcher for any +// type T. +class AnythingMatcher { + public: + template + operator Matcher() const { return A(); } +}; + +// Implements a matcher that compares a given value with a +// pre-supplied value using one of the ==, <=, <, etc, operators. The +// two values being compared don't have to have the same type. +// +// The matcher defined here is polymorphic (for example, Eq(5) can be +// used to match an int, a short, a double, etc). Therefore we use +// a template type conversion operator in the implementation. +// +// We define this as a macro in order to eliminate duplicated source +// code. +// +// The following template definition assumes that the Rhs parameter is +// a "bare" type (i.e. neither 'const T' nor 'T&'). +#define GMOCK_IMPLEMENT_COMPARISON_MATCHER_( \ + name, op, relation, negated_relation) \ + template class name##Matcher { \ + public: \ + explicit name##Matcher(const Rhs& rhs) : rhs_(rhs) {} \ + template \ + operator Matcher() const { \ + return MakeMatcher(new Impl(rhs_)); \ + } \ + private: \ + template \ + class Impl : public MatcherInterface { \ + public: \ + explicit Impl(const Rhs& rhs) : rhs_(rhs) {} \ + virtual bool MatchAndExplain(\ + Lhs lhs, MatchResultListener* /* listener */) const { \ + return lhs op rhs_; \ + } \ + virtual void DescribeTo(::std::ostream* os) const { \ + *os << relation " "; \ + UniversalPrint(rhs_, os); \ + } \ + virtual void DescribeNegationTo(::std::ostream* os) const { \ + *os << negated_relation " "; \ + UniversalPrint(rhs_, os); \ + } \ + private: \ + Rhs rhs_; \ + GTEST_DISALLOW_ASSIGN_(Impl); \ + }; \ + Rhs rhs_; \ + GTEST_DISALLOW_ASSIGN_(name##Matcher); \ + } + +// Implements Eq(v), Ge(v), Gt(v), Le(v), Lt(v), and Ne(v) +// respectively. +GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Eq, ==, "is equal to", "isn't equal to"); +GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Ge, >=, "is >=", "isn't >="); +GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Gt, >, "is >", "isn't >"); +GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Le, <=, "is <=", "isn't <="); +GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Lt, <, "is <", "isn't <"); +GMOCK_IMPLEMENT_COMPARISON_MATCHER_(Ne, !=, "isn't equal to", "is equal to"); + +#undef GMOCK_IMPLEMENT_COMPARISON_MATCHER_ + +// Implements the polymorphic IsNull() matcher, which matches any raw or smart +// pointer that is NULL. +class IsNullMatcher { + public: + template + bool MatchAndExplain(const Pointer& p, + MatchResultListener* /* listener */) const { + return GetRawPointer(p) == NULL; + } + + void DescribeTo(::std::ostream* os) const { *os << "is NULL"; } + void DescribeNegationTo(::std::ostream* os) const { + *os << "isn't NULL"; + } +}; + +// Implements the polymorphic NotNull() matcher, which matches any raw or smart +// pointer that is not NULL. +class NotNullMatcher { + public: + template + bool MatchAndExplain(const Pointer& p, + MatchResultListener* /* listener */) const { + return GetRawPointer(p) != NULL; + } + + void DescribeTo(::std::ostream* os) const { *os << "isn't NULL"; } + void DescribeNegationTo(::std::ostream* os) const { + *os << "is NULL"; + } +}; + +// Ref(variable) matches any argument that is a reference to +// 'variable'. This matcher is polymorphic as it can match any +// super type of the type of 'variable'. +// +// The RefMatcher template class implements Ref(variable). It can +// only be instantiated with a reference type. This prevents a user +// from mistakenly using Ref(x) to match a non-reference function +// argument. For example, the following will righteously cause a +// compiler error: +// +// int n; +// Matcher m1 = Ref(n); // This won't compile. +// Matcher m2 = Ref(n); // This will compile. +template +class RefMatcher; + +template +class RefMatcher { + // Google Mock is a generic framework and thus needs to support + // mocking any function types, including those that take non-const + // reference arguments. Therefore the template parameter T (and + // Super below) can be instantiated to either a const type or a + // non-const type. + public: + // RefMatcher() takes a T& instead of const T&, as we want the + // compiler to catch using Ref(const_value) as a matcher for a + // non-const reference. + explicit RefMatcher(T& x) : object_(x) {} // NOLINT + + template + operator Matcher() const { + // By passing object_ (type T&) to Impl(), which expects a Super&, + // we make sure that Super is a super type of T. In particular, + // this catches using Ref(const_value) as a matcher for a + // non-const reference, as you cannot implicitly convert a const + // reference to a non-const reference. + return MakeMatcher(new Impl(object_)); + } + + private: + template + class Impl : public MatcherInterface { + public: + explicit Impl(Super& x) : object_(x) {} // NOLINT + + // MatchAndExplain() takes a Super& (as opposed to const Super&) + // in order to match the interface MatcherInterface. + virtual bool MatchAndExplain( + Super& x, MatchResultListener* listener) const { + *listener << "which is located @" << static_cast(&x); + return &x == &object_; + } + + virtual void DescribeTo(::std::ostream* os) const { + *os << "references the variable "; + UniversalPrinter::Print(object_, os); + } + + virtual void DescribeNegationTo(::std::ostream* os) const { + *os << "does not reference the variable "; + UniversalPrinter::Print(object_, os); + } + + private: + const Super& object_; + + GTEST_DISALLOW_ASSIGN_(Impl); + }; + + T& object_; + + GTEST_DISALLOW_ASSIGN_(RefMatcher); +}; + +// Polymorphic helper functions for narrow and wide string matchers. +inline bool CaseInsensitiveCStringEquals(const char* lhs, const char* rhs) { + return String::CaseInsensitiveCStringEquals(lhs, rhs); +} + +inline bool CaseInsensitiveCStringEquals(const wchar_t* lhs, + const wchar_t* rhs) { + return String::CaseInsensitiveWideCStringEquals(lhs, rhs); +} + +// String comparison for narrow or wide strings that can have embedded NUL +// characters. +template +bool CaseInsensitiveStringEquals(const StringType& s1, + const StringType& s2) { + // Are the heads equal? + if (!CaseInsensitiveCStringEquals(s1.c_str(), s2.c_str())) { + return false; + } + + // Skip the equal heads. + const typename StringType::value_type nul = 0; + const size_t i1 = s1.find(nul), i2 = s2.find(nul); + + // Are we at the end of either s1 or s2? + if (i1 == StringType::npos || i2 == StringType::npos) { + return i1 == i2; + } + + // Are the tails equal? + return CaseInsensitiveStringEquals(s1.substr(i1 + 1), s2.substr(i2 + 1)); +} + +// String matchers. + +// Implements equality-based string matchers like StrEq, StrCaseNe, and etc. +template +class StrEqualityMatcher { + public: + StrEqualityMatcher(const StringType& str, bool expect_eq, + bool case_sensitive) + : string_(str), expect_eq_(expect_eq), case_sensitive_(case_sensitive) {} + + // Accepts pointer types, particularly: + // const char* + // char* + // const wchar_t* + // wchar_t* + template + bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { + if (s == NULL) { + return !expect_eq_; + } + return MatchAndExplain(StringType(s), listener); + } + + // Matches anything that can convert to StringType. + // + // This is a template, not just a plain function with const StringType&, + // because StringPiece has some interfering non-explicit constructors. + template + bool MatchAndExplain(const MatcheeStringType& s, + MatchResultListener* /* listener */) const { + const StringType& s2(s); + const bool eq = case_sensitive_ ? s2 == string_ : + CaseInsensitiveStringEquals(s2, string_); + return expect_eq_ == eq; + } + + void DescribeTo(::std::ostream* os) const { + DescribeToHelper(expect_eq_, os); + } + + void DescribeNegationTo(::std::ostream* os) const { + DescribeToHelper(!expect_eq_, os); + } + + private: + void DescribeToHelper(bool expect_eq, ::std::ostream* os) const { + *os << (expect_eq ? "is " : "isn't "); + *os << "equal to "; + if (!case_sensitive_) { + *os << "(ignoring case) "; + } + UniversalPrint(string_, os); + } + + const StringType string_; + const bool expect_eq_; + const bool case_sensitive_; + + GTEST_DISALLOW_ASSIGN_(StrEqualityMatcher); +}; + +// Implements the polymorphic HasSubstr(substring) matcher, which +// can be used as a Matcher as long as T can be converted to a +// string. +template +class HasSubstrMatcher { + public: + explicit HasSubstrMatcher(const StringType& substring) + : substring_(substring) {} + + // Accepts pointer types, particularly: + // const char* + // char* + // const wchar_t* + // wchar_t* + template + bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { + return s != NULL && MatchAndExplain(StringType(s), listener); + } + + // Matches anything that can convert to StringType. + // + // This is a template, not just a plain function with const StringType&, + // because StringPiece has some interfering non-explicit constructors. + template + bool MatchAndExplain(const MatcheeStringType& s, + MatchResultListener* /* listener */) const { + const StringType& s2(s); + return s2.find(substring_) != StringType::npos; + } + + // Describes what this matcher matches. + void DescribeTo(::std::ostream* os) const { + *os << "has substring "; + UniversalPrint(substring_, os); + } + + void DescribeNegationTo(::std::ostream* os) const { + *os << "has no substring "; + UniversalPrint(substring_, os); + } + + private: + const StringType substring_; + + GTEST_DISALLOW_ASSIGN_(HasSubstrMatcher); +}; + +// Implements the polymorphic StartsWith(substring) matcher, which +// can be used as a Matcher as long as T can be converted to a +// string. +template +class StartsWithMatcher { + public: + explicit StartsWithMatcher(const StringType& prefix) : prefix_(prefix) { + } + + // Accepts pointer types, particularly: + // const char* + // char* + // const wchar_t* + // wchar_t* + template + bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { + return s != NULL && MatchAndExplain(StringType(s), listener); + } + + // Matches anything that can convert to StringType. + // + // This is a template, not just a plain function with const StringType&, + // because StringPiece has some interfering non-explicit constructors. + template + bool MatchAndExplain(const MatcheeStringType& s, + MatchResultListener* /* listener */) const { + const StringType& s2(s); + return s2.length() >= prefix_.length() && + s2.substr(0, prefix_.length()) == prefix_; + } + + void DescribeTo(::std::ostream* os) const { + *os << "starts with "; + UniversalPrint(prefix_, os); + } + + void DescribeNegationTo(::std::ostream* os) const { + *os << "doesn't start with "; + UniversalPrint(prefix_, os); + } + + private: + const StringType prefix_; + + GTEST_DISALLOW_ASSIGN_(StartsWithMatcher); +}; + +// Implements the polymorphic EndsWith(substring) matcher, which +// can be used as a Matcher as long as T can be converted to a +// string. +template +class EndsWithMatcher { + public: + explicit EndsWithMatcher(const StringType& suffix) : suffix_(suffix) {} + + // Accepts pointer types, particularly: + // const char* + // char* + // const wchar_t* + // wchar_t* + template + bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { + return s != NULL && MatchAndExplain(StringType(s), listener); + } + + // Matches anything that can convert to StringType. + // + // This is a template, not just a plain function with const StringType&, + // because StringPiece has some interfering non-explicit constructors. + template + bool MatchAndExplain(const MatcheeStringType& s, + MatchResultListener* /* listener */) const { + const StringType& s2(s); + return s2.length() >= suffix_.length() && + s2.substr(s2.length() - suffix_.length()) == suffix_; + } + + void DescribeTo(::std::ostream* os) const { + *os << "ends with "; + UniversalPrint(suffix_, os); + } + + void DescribeNegationTo(::std::ostream* os) const { + *os << "doesn't end with "; + UniversalPrint(suffix_, os); + } + + private: + const StringType suffix_; + + GTEST_DISALLOW_ASSIGN_(EndsWithMatcher); +}; + +// Implements polymorphic matchers MatchesRegex(regex) and +// ContainsRegex(regex), which can be used as a Matcher as long as +// T can be converted to a string. +class MatchesRegexMatcher { + public: + MatchesRegexMatcher(const RE* regex, bool full_match) + : regex_(regex), full_match_(full_match) {} + + // Accepts pointer types, particularly: + // const char* + // char* + // const wchar_t* + // wchar_t* + template + bool MatchAndExplain(CharType* s, MatchResultListener* listener) const { + return s != NULL && MatchAndExplain(internal::string(s), listener); + } + + // Matches anything that can convert to internal::string. + // + // This is a template, not just a plain function with const internal::string&, + // because StringPiece has some interfering non-explicit constructors. + template + bool MatchAndExplain(const MatcheeStringType& s, + MatchResultListener* /* listener */) const { + const internal::string& s2(s); + return full_match_ ? RE::FullMatch(s2, *regex_) : + RE::PartialMatch(s2, *regex_); + } + + void DescribeTo(::std::ostream* os) const { + *os << (full_match_ ? "matches" : "contains") + << " regular expression "; + UniversalPrinter::Print(regex_->pattern(), os); + } + + void DescribeNegationTo(::std::ostream* os) const { + *os << "doesn't " << (full_match_ ? "match" : "contain") + << " regular expression "; + UniversalPrinter::Print(regex_->pattern(), os); + } + + private: + const internal::linked_ptr regex_; + const bool full_match_; + + GTEST_DISALLOW_ASSIGN_(MatchesRegexMatcher); +}; + +// Implements a matcher that compares the two fields of a 2-tuple +// using one of the ==, <=, <, etc, operators. The two fields being +// compared don't have to have the same type. +// +// The matcher defined here is polymorphic (for example, Eq() can be +// used to match a tuple, a tuple, +// etc). Therefore we use a template type conversion operator in the +// implementation. +// +// We define this as a macro in order to eliminate duplicated source +// code. +#define GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(name, op, relation) \ + class name##2Matcher { \ + public: \ + template \ + operator Matcher< ::std::tr1::tuple >() const { \ + return MakeMatcher(new Impl< ::std::tr1::tuple >); \ + } \ + template \ + operator Matcher&>() const { \ + return MakeMatcher(new Impl&>); \ + } \ + private: \ + template \ + class Impl : public MatcherInterface { \ + public: \ + virtual bool MatchAndExplain( \ + Tuple args, \ + MatchResultListener* /* listener */) const { \ + return ::std::tr1::get<0>(args) op ::std::tr1::get<1>(args); \ + } \ + virtual void DescribeTo(::std::ostream* os) const { \ + *os << "are " relation; \ + } \ + virtual void DescribeNegationTo(::std::ostream* os) const { \ + *os << "aren't " relation; \ + } \ + }; \ + } + +// Implements Eq(), Ge(), Gt(), Le(), Lt(), and Ne() respectively. +GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(Eq, ==, "an equal pair"); +GMOCK_IMPLEMENT_COMPARISON2_MATCHER_( + Ge, >=, "a pair where the first >= the second"); +GMOCK_IMPLEMENT_COMPARISON2_MATCHER_( + Gt, >, "a pair where the first > the second"); +GMOCK_IMPLEMENT_COMPARISON2_MATCHER_( + Le, <=, "a pair where the first <= the second"); +GMOCK_IMPLEMENT_COMPARISON2_MATCHER_( + Lt, <, "a pair where the first < the second"); +GMOCK_IMPLEMENT_COMPARISON2_MATCHER_(Ne, !=, "an unequal pair"); + +#undef GMOCK_IMPLEMENT_COMPARISON2_MATCHER_ + +// Implements the Not(...) matcher for a particular argument type T. +// We do not nest it inside the NotMatcher class template, as that +// will prevent different instantiations of NotMatcher from sharing +// the same NotMatcherImpl class. +template +class NotMatcherImpl : public MatcherInterface { + public: + explicit NotMatcherImpl(const Matcher& matcher) + : matcher_(matcher) {} + + virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { + return !matcher_.MatchAndExplain(x, listener); + } + + virtual void DescribeTo(::std::ostream* os) const { + matcher_.DescribeNegationTo(os); + } + + virtual void DescribeNegationTo(::std::ostream* os) const { + matcher_.DescribeTo(os); + } + + private: + const Matcher matcher_; + + GTEST_DISALLOW_ASSIGN_(NotMatcherImpl); +}; + +// Implements the Not(m) matcher, which matches a value that doesn't +// match matcher m. +template +class NotMatcher { + public: + explicit NotMatcher(InnerMatcher matcher) : matcher_(matcher) {} + + // This template type conversion operator allows Not(m) to be used + // to match any type m can match. + template + operator Matcher() const { + return Matcher(new NotMatcherImpl(SafeMatcherCast(matcher_))); + } + + private: + InnerMatcher matcher_; + + GTEST_DISALLOW_ASSIGN_(NotMatcher); +}; + +// Implements the AllOf(m1, m2) matcher for a particular argument type +// T. We do not nest it inside the BothOfMatcher class template, as +// that will prevent different instantiations of BothOfMatcher from +// sharing the same BothOfMatcherImpl class. +template +class BothOfMatcherImpl : public MatcherInterface { + public: + BothOfMatcherImpl(const Matcher& matcher1, const Matcher& matcher2) + : matcher1_(matcher1), matcher2_(matcher2) {} + + virtual void DescribeTo(::std::ostream* os) const { + *os << "("; + matcher1_.DescribeTo(os); + *os << ") and ("; + matcher2_.DescribeTo(os); + *os << ")"; + } + + virtual void DescribeNegationTo(::std::ostream* os) const { + *os << "("; + matcher1_.DescribeNegationTo(os); + *os << ") or ("; + matcher2_.DescribeNegationTo(os); + *os << ")"; + } + + virtual bool MatchAndExplain(T x, MatchResultListener* listener) const { + // If either matcher1_ or matcher2_ doesn't match x, we only need + // to explain why one of them fails. + StringMatchResultListener listener1; + if (!matcher1_.MatchAndExplain(x, &listener1)) { + *listener << listener1.str(); + return false; + } + + StringMatchResultListener listener2; + if (!matcher2_.MatchAndExplain(x, &listener2)) { + *listener << listener2.str(); + return false; + } + + // Otherwise we need to explain why *both* of them match. + const internal::string s1 = listener1.str(); + const internal::string s2 = listener2.str(); + + if (s1 == "") { + *listener << s2; + } else { + *listener << s1; + if (s2 != "") { + *listener << ", and " << s2; + } + } + return true; + } + + private: + const Matcher matcher1_; + const Matcher matcher2_; + + GTEST_DISALLOW_ASSIGN_(BothOfMatcherImpl); +}; + +#if GTEST_LANG_CXX11 +// MatcherList provides mechanisms for storing a variable number of matchers in +// a list structure (ListType) and creating a combining matcher from such a +// list. +// The template is defined recursively using the following template paramters: +// * kSize is the length of the MatcherList. +// * Head is the type of the first matcher of the list. +// * Tail denotes the types of the remaining matchers of the list. +template +struct MatcherList { + typedef MatcherList MatcherListTail; + typedef ::std::pair ListType; + + // BuildList stores variadic type values in a nested pair structure. + // Example: + // MatcherList<3, int, string, float>::BuildList(5, "foo", 2.0) will return + // the corresponding result of type pair>. + static ListType BuildList(const Head& matcher, const Tail&... tail) { + return ListType(matcher, MatcherListTail::BuildList(tail...)); + } + + // CreateMatcher creates a Matcher from a given list of matchers (built + // by BuildList()). CombiningMatcher is used to combine the matchers of the + // list. CombiningMatcher must implement MatcherInterface and have a + // constructor taking two Matchers as input. + template class CombiningMatcher> + static Matcher CreateMatcher(const ListType& matchers) { + return Matcher(new CombiningMatcher( + SafeMatcherCast(matchers.first), + MatcherListTail::template CreateMatcher( + matchers.second))); + } +}; + +// The following defines the base case for the recursive definition of +// MatcherList. +template +struct MatcherList<2, Matcher1, Matcher2> { + typedef ::std::pair ListType; + + static ListType BuildList(const Matcher1& matcher1, + const Matcher2& matcher2) { + return ::std::pair(matcher1, matcher2); + } + + template class CombiningMatcher> + static Matcher CreateMatcher(const ListType& matchers) { + return Matcher(new CombiningMatcher( + SafeMatcherCast(matchers.first), + SafeMatcherCast(matchers.second))); + } +}; + +// VariadicMatcher is used for the variadic implementation of +// AllOf(m_1, m_2, ...) and AnyOf(m_1, m_2, ...). +// CombiningMatcher is used to recursively combine the provided matchers +// (of type Args...). +template