Skip to content

Commit

Permalink
tiny-cnn -> tiny-dnn
Browse files Browse the repository at this point in the history
all file names, cmake paths, namespaces are renamed into tiny-dnn
  • Loading branch information
nyanp committed Aug 18, 2016
1 parent 792f4d4 commit 4313935
Show file tree
Hide file tree
Showing 144 changed files with 1,471 additions and 1,098 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -89,4 +89,4 @@ before_script:
-DBUILD_TESTS=$BUILD_TESTS .;
fi

script: make -j2 && ./test/tiny_cnn_test
script: make -j2 && ./test/tiny_dnn_test
34 changes: 17 additions & 17 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ cmake_minimum_required(VERSION 3.2 FATAL_ERROR)
# Set variables:
# * PROJECT_NAME
# * PROJECT_VERSION
project(tiny_cnn VERSION 0.1.0 LANGUAGES C CXX)
project(tiny_dnn VERSION 0.1.0 LANGUAGES C CXX)

#####
# Enables link_directories() treat paths relative
Expand Down Expand Up @@ -39,14 +39,14 @@ endif(NOT CMAKE_BUILD_TYPE)

####
# Define user options
option(USE_SSE "Build tiny-cnn with SSE library support" ON)
option(USE_AVX "Build tiny-cnn with AVX library support" ON)
option(USE_AVX2 "Build tiny-cnn with AVX2 library support" OFF)
option(USE_TBB "Build tiny-cnn with TBB library support" OFF)
option(USE_OMP "Build tiny-cnn with OMP library support" OFF)
option(USE_NNPACK "Build tiny-cnn with NNPACK library support" OFF)
option(USE_OPENCV "Build tiny-cnn with OpenCV library support" OFF)
option(USE_OPENCL "Build tiny-cnn with OpenCL library support" OFF)
option(USE_SSE "Build tiny-dnn with SSE library support" ON)
option(USE_AVX "Build tiny-dnn with AVX library support" ON)
option(USE_AVX2 "Build tiny-dnn with AVX2 library support" OFF)
option(USE_TBB "Build tiny-dnn with TBB library support" OFF)
option(USE_OMP "Build tiny-dnn with OMP library support" OFF)
option(USE_NNPACK "Build tiny-dnn with NNPACK library support" OFF)
option(USE_OPENCV "Build tiny-dnn with OpenCV library support" OFF)
option(USE_OPENCL "Build tiny-dnn with OpenCL library support" OFF)

option(BUILD_TESTS "Set to ON to build tests" OFF)
option(BUILD_EXAMPLES "Set to ON to build examples" OFF)
Expand All @@ -56,7 +56,7 @@ option(BUILD_DOCS "Set to ON to build documentation" OFF)
# Create the library target

set(project_library_target_name ${PROJECT_NAME})
set(PACKAGE_NAME TinyCNN)
set(PACKAGE_NAME TinyDNN)

add_library(${project_library_target_name} INTERFACE)

Expand All @@ -66,18 +66,18 @@ add_library(${project_library_target_name} INTERFACE)
# Using cmake scripts and modules
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/Modules)

# Tiny-cnn provides a couple of multithreading solutions.
# Tiny-dnn provides a couple of multithreading solutions.
# The user can specify to use Intel Threading Building Blocks (TBB)
# or Open Multi-Processing (OpenMP) as a backend for multi threading
# processing. In case that none of this libraries are required, tiny-cnn
# processing. In case that none of this libraries are required, tiny-dnn
# will use the standard C++11 Thread support library.

# Find Intel Threading Building Blocks (TBB)
find_package(TBB QUIET)
if(USE_TBB AND TBB_FOUND)
message(STATUS "Found Intel TBB: ${TBB_INCLUDE_DIR}")
# In case that TBB is found we force to disable OpenMP since
# tiny-cnn does not support mutiple multithreading backends.
# tiny-dnn does not support mutiple multithreading backends.
set(USE_OMP OFF)
#TODO: add definitions in configure
add_definitions(-DCNN_USE_TBB)
Expand All @@ -97,7 +97,7 @@ find_package(OpenMP QUIET)
if(USE_OMP AND OPENMP_FOUND)
message(STATUS "Found OpenMP")
# In case that OMP is found we force to disable Intel TBB since
# tiny-cnn does not support mutiple multithreading backends.
# tiny-dnn does not support mutiple multithreading backends.
set(USE_TBB OFF)
add_definitions(-DCNN_USE_OMP)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
Expand Down Expand Up @@ -216,9 +216,9 @@ set(CMAKE_CXX_FLAGS_DEBUG "${EXTRA_C_FLAGS_DEBUG}")

####
# Write the config.h
# TODO: replace for tiny_cnn/config.h
# configure_file(cmake/Templates/tinycnn_config.h.in
# "${PROJECT_BINARY_DIR}/tinycnn_config.h")
# TODO: replace for tiny_dnn/config.h
# configure_file(cmake/Templates/tinydnn_config.h.in
# "${PROJECT_BINARY_DIR}/tinydnn_config.h")

####
# Setup the cmake config files
Expand Down
6 changes: 3 additions & 3 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ RUN apt-get install ninja-build && \
python ./configure.py && \
ninja

# Download tiny-cnn
# Download tiny-dnn
RUN cd /software && \
git clone https://github.com/nyanp/tiny-cnn.git && \
cd /software/tiny-cnn && \
git clone https://github.com/tiny-dnn/tiny-dnn.git && \
cd /software/tiny-dnn && \
git submodule update --init
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ modification, are permitted provided that the following conditions are met:
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.

* Neither the name of tiny-cnn nor the names of its
* Neither the name of tiny-dnn nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.

Expand Down
32 changes: 16 additions & 16 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,16 @@ tiny-dnn is a C++11 implementation of deep learning. It is suitable for deep lea
Check out the [documentation](doc/readme.md) for more info.

## What's New
- 2016/8/7 tiny-cnn is now moved to organization account, and rename into tiny-dnn :)
- 2016/7/27 [tiny-cnn v0.1.1 released!](https://github.com/tiny-dnn/tiny-dnn/releases/tag/v0.1.1)
- 2016/8/7 tiny-dnn is now moved to organization account, and rename into tiny-dnn :)
- 2016/7/27 [tiny-dnn v0.1.1 released!](https://github.com/tiny-dnn/tiny-dnn/releases/tag/v0.1.1)

## Features
- reasonably fast, without GPU
- with TBB threading and SSE/AVX vectorization
- 98.8% accuracy on MNIST in 13 minutes training (@Core i7-3520M)
- portable & header-only
- Run anywhere as long as you have a compiler which supports C++11
- Just include tiny_cnn.h and write your model in C++. There is nothing to install.
- Just include tiny_dnn.h and write your model in C++. There is nothing to install.
- easy to integrate with real applications
- no output to stdout/stderr
- a constant throughput (simple parallelization model, no garbage collection)
Expand Down Expand Up @@ -139,13 +139,13 @@ You can edit include/config.h to customize default behavior.
construct convolutional neural networks

```cpp
#include "tiny_cnn/tiny_cnn.h"
using namespace tiny_cnn;
using namespace tiny_cnn::activation;
using namespace tiny_cnn::layers;
#include "tiny_dnn/tiny_dnn.h"
using namespace tiny_dnn;
using namespace tiny_dnn::activation;
using namespace tiny_dnn::layers;

void construct_cnn() {
using namespace tiny_cnn;
using namespace tiny_dnn;

network<sequential> net;

Expand Down Expand Up @@ -183,10 +183,10 @@ void construct_cnn() {
construct multi-layer perceptron(mlp)
```cpp
#include "tiny_cnn/tiny_cnn.h"
using namespace tiny_cnn;
using namespace tiny_cnn::activation;
using namespace tiny_cnn::layers;
#include "tiny_dnn/tiny_dnn.h"
using namespace tiny_dnn;
using namespace tiny_dnn::activation;
using namespace tiny_dnn::layers;
void construct_mlp() {
network<sequential> net;
Expand All @@ -202,9 +202,9 @@ void construct_mlp() {
another way to construct mlp

```cpp
#include "tiny_cnn/tiny_cnn.h"
using namespace tiny_cnn;
using namespace tiny_cnn::activation;
#include "tiny_dnn/tiny_dnn.h"
using namespace tiny_dnn;
using namespace tiny_dnn::activation;

void construct_mlp() {
auto mynet = make_mlp<tan_h>({ 32 * 32, 300, 10 });
Expand Down Expand Up @@ -237,4 +237,4 @@ The BSD 3-Clause License
## Mailing list
google group for questions and discussions:
https://groups.google.com/forum/#!forum/tiny-cnn-users
https://groups.google.com/forum/#!forum/tiny-dnn-users
4 changes: 2 additions & 2 deletions appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ before_build:

after_build:
- cmd: 'cd C:\projects\build\test\Release'
- cmd: tiny_cnn_test.exe
- cmd: tiny_dnn_test.exe

build:
project: C:\projects\build\tiny_cnn.sln
project: C:\projects\build\tiny_dnn.sln
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
set(PACKAGE_VERSION "@TINYCNN_VERSION@")
set(PACKAGE_VERSION "@TINYDNN_VERSION@")

# Check whether the requested PACKAGE_FIND_VERSION is compatible
if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,67 +1,67 @@
# FindTinyCNN
# FindTinyDNN
# -----------
#
# Find TinyCNN include dirs and libraries
# Find TinyDNN include dirs and libraries
#
# Use this module by invoking find_package with the form:
#
# find_package(TinyCNN
# find_package(TinyDNN
# [version] [EXACT] # Minimum or EXACT version e.g. 0.1.0
# [REQUIRED] # Fail with error if TinyCNN is not found
# [REQUIRED] # Fail with error if TinyDNN is not found
# )
#
# This module finds headers and requested component libraries OR a CMake
# package configuration file provided by a "TinyCNN CMake" build. For the
# latter case skip to the "TinyCNN CMake" section below. For the former
# package configuration file provided by a "TinyDNN CMake" build. For the
# latter case skip to the "TinyDNN CMake" section below. For the former
# case results are reported in variables::
#
# TinyCNN_FOUND - True if headers and requested libraries were found
# TinyCNN_INCLUDE_DIRS - TinyCNN include directories
# TinyCNN_LIBRARY_DIRS - Link directories for TinyCNN libraries
# TinyCNN_LIBRARIES - TinyCNN third-party libraries to be linked
# TinyCNN_VERSION - Version string appended to library filenames
# TinyCNN_MAJOR_VERSION - TinyCNN major version number (X in X.y.z)
# TinyCNN_MINOR_VERSION - TinyCNN minor version number (Y in x.Y.z)
# TinyCNN_SUBMINOR_VERSION - TinyCNN subminor version number (Z in x.y.Z)
# TinyDNN_FOUND - True if headers and requested libraries were found
# TinyDNN_INCLUDE_DIRS - TinyDNN include directories
# TinyDNN_LIBRARY_DIRS - Link directories for TinyDNN libraries
# TinyDNN_LIBRARIES - TinyDNN third-party libraries to be linked
# TinyDNN_VERSION - Version string appended to library filenames
# TinyDNN_MAJOR_VERSION - TinyDNN major version number (X in X.y.z)
# TinyDNN_MINOR_VERSION - TinyDNN minor version number (Y in x.Y.z)
# TinyDNN_SUBMINOR_VERSION - TinyDNN subminor version number (Z in x.y.Z)
#
# The following :prop_tgt:`IMPORTED` targets are also defined::
#
# TinyCNN::tiny_cnn - Target for header-only dependencies
# (TinyCNN include directory)
# TinyDNN::tiny_cnn - Target for header-only dependencies
# (TinyDNN include directory)
#
# TinyCNN comes in many variants encoded in their file name.
# TinyDNN comes in many variants encoded in their file name.
# Users or projects may tell this module which variant to find by
# setting variables::
#
# TinyCNN_USE_TBB - Set to ON to use the Intel Threading Building
# TinyDNN_USE_TBB - Set to ON to use the Intel Threading Building
# Blocks (TBB) libraries. Default is OFF.
# TinyCNN_USE_OMP - Set to ON to use of the Open Multi-Processing
# TinyDNN_USE_OMP - Set to ON to use of the Open Multi-Processing
# (OpenMP) libraries. Default is OFF.
# TinyCNN_USE_SSE - Set to OFF to use the Streaming SIMD Extension
# TinyDNN_USE_SSE - Set to OFF to use the Streaming SIMD Extension
# (SSE) instructions libraries. Default is ON.
# TinyCNN_USE_AVX - Set to OFF to use the Advanced Vector Extensions
# TinyDNN_USE_AVX - Set to OFF to use the Advanced Vector Extensions
# (AVX) libraries). Default is ON.
# TinyCNN_USE_AVX2 - Set to ON to use the Advanced Vector Extensions 2
# TinyDNN_USE_AVX2 - Set to ON to use the Advanced Vector Extensions 2
# (AVX2) libraries). Default is OFF.
# TinyCNN_USE_NNPACK - Set to ON to use the Acceleration package
# TinyDNN_USE_NNPACK - Set to ON to use the Acceleration package
# for neural networks on multi-core CPUs.
#
# Example to find TinyCNN headers only::
# Example to find TinyDNN headers only::
#
# find_package(TinyCNN 0.1.0)
# if(TinyCNN_FOUND)
# find_package(TinyDNN 0.1.0)
# if(TinyDNN_FOUND)
# add_executable(foo foo.cc)
# target_link_libraries(foo TinyCNN::tiny_cnn)
# target_link_libraries(foo TinyDNN::tiny_cnn)
# endif()
#
# Example to find TinyCNN headers and some *static* libraries::
# Example to find TinyDNN headers and some *static* libraries::
#
# set(TinyCNN_USE_TBB ON) # only find static libs
# set(TinyDNN_USE_TBB ON) # only find static libs
# set(TInyCNN_USE_AVX2 ON)
# find_package(TinyCNN 0.1.0)
# if(TinyCNN_FOUND)
# find_package(TinyDNN 0.1.0)
# if(TinyDNN_FOUND)
# add_executable(foo foo.cc)
# target_link_libraries(foo TinyCNN::tiny_cnn ${TinyCNN_LIBRARIES})
# target_link_libraries(foo TinyDNN::tiny_cnn ${TinyDNN_LIBRARIES})
# endif()
#
##################################################################
Expand Down Expand Up @@ -100,7 +100,7 @@ find_package(TBB QUIET)
if(@PACKAGE_NAME@_USE_TBB AND TBB_FOUND)
message(STATUS "Found Intel TBB: ${TBB_INCLUDE_DIR}")
# In case that TBB is found we force to disable OpenMP since
# tiny-cnn does not support mutiple multithreading backends.
# tiny-dnn does not support mutiple multithreading backends.
set(@PACKAGE_NAME@_USE_OMP OFF)
#TODO: add definitions in configure
add_definitions(-DCNN_USE_TBB)
Expand All @@ -120,7 +120,7 @@ find_package(OpenMP QUIET)
if(@PACKAGE_NAME@_USE_OMP AND OPENMP_FOUND)
message(STATUS "Found OpenMP")
# In case that OMP is found we force to disable Intel TBB since
# tiny-cnn does not support mutiple multithreading backends.
# tiny-dnn does not support mutiple multithreading backends.
set(@PACKAGE_NAME@_USE_TBB OFF)
add_definitions(-DCNN_USE_OMP)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
#ifndef TINYCNN_CONFIG_HPP_
#define TINYCNN_CONFIG_HPP_
#ifndef TINYDNN_CONFIG_HPP_
#define TINYDNN_CONFIG_HPP_

/* Define if you want to use intel TBB library */
#cmakedefine CNN_USE_TBB
Expand All @@ -16,4 +16,4 @@
/* Define to enable avx vectorization */
#cmakedefine CNN_USE_AVX

#endif // TINYCNN_CONFIG_HPP_
#endif // TINYDNN_CONFIG_HPP_
10 changes: 5 additions & 5 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ target_link_libraries(example_cifar_train

find_package(Protobuf QUIET)
if(PROTOBUF_FOUND)
set(proto_file "${CMAKE_CURRENT_SOURCE_DIR}/tiny_cnn/io/caffe/caffe.pb.cc")
set(proto_file "${CMAKE_CURRENT_SOURCE_DIR}/tiny_dnn/io/caffe/caffe.pb.cc")
if(EXISTS ${proto_file})
add_executable(example_caffe_converter
caffe_converter/caffe_converter.cpp ${proto_file})
Expand All @@ -55,12 +55,12 @@ if(PROTOBUF_FOUND)
# and should be installed separately as in: sudo apt-get install protobuf-compiler
elseif(EXISTS ${PROTOBUF_PROTOC_EXECUTABLE})
message(STATUS "Found PROTOBUF Compiler: ${PROTOBUF_PROTOC_EXECUTABLE}")
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/tiny_cnn/io/caffe/caffe.proto)
if(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/tiny_dnn/io/caffe/caffe.proto)
PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS
${CMAKE_CURRENT_SOURCE_DIR}/tiny_cnn/io/caffe/caffe.proto)
${CMAKE_CURRENT_SOURCE_DIR}/tiny_dnn/io/caffe/caffe.proto)
if(EXISTS ${PROTO_HDRS})
file(COPY ${PROTO_SRCS} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}/tiny_cnn/io/caffe")
file(COPY ${PROTO_HDRS} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}/tiny_cnn/io/caffe")
file(COPY ${PROTO_SRCS} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}/tiny_dnn/io/caffe")
file(COPY ${PROTO_HDRS} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}/tiny_dnn/io/caffe")
endif()
add_executable(example_caffe_converter
caffe_converter/caffe_converter.cpp
Expand Down
4 changes: 2 additions & 2 deletions examples/benchmarks/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@

#include <iostream>

#include "tiny_cnn/tiny_cnn.h"
#include "tiny_dnn/tiny_dnn.h"

using namespace tiny_cnn;
using namespace tiny_dnn;
using namespace std;

int main(int argc, char** argv) {
Expand Down
6 changes: 3 additions & 3 deletions examples/caffe_converter/readme.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Import Caffe Model to tiny-cnn
tiny-cnn can import Caffe's trained models.
# Import Caffe Model to tiny-dnn
tiny-dnn can import Caffe's trained models.

## Prerequisites for this example
- Google protobuf
Expand Down Expand Up @@ -31,4 +31,4 @@ In the [pre-trained CaffeNet](https://github.com/BVLC/caffe/tree/master/examples
```

## Restrictions
- tiny-cnn's converter only supports single input/single output network without branch.
- tiny-dnn's converter only supports single input/single output network without branch.
Loading

0 comments on commit 4313935

Please sign in to comment.