[caffe-contrib] 52/362: Imported Upstream version 0.9999~rc2+git20150730+7f7085439c

Zhou Mo cdluminate-guest at moszumanska.debian.org
Tue May 3 09:24:13 UTC 2016


This is an automated email from the git hooks/post-receive script.

cdluminate-guest pushed a commit to branch master
in repository caffe-contrib.

commit e33f69724a99d42b3691f12803a72ac31d1e7954
Author: Zhou Mo <cdluminate at gmail.com>
Date:   Thu Jul 30 01:35:08 2015 +0000

    Imported Upstream version 0.9999~rc2+git20150730+7f7085439c
---
 .travis.yml                                    | 13 +++++--
 CMakeLists.txt                                 |  4 ++
 Makefile                                       |  4 +-
 cmake/Dependencies.cmake                       |  9 +++--
 docs/install_apt.md                            |  8 ++--
 docs/install_yum.md                            |  4 +-
 docs/installation.md                           |  2 +-
 docs/tutorial/interfaces.md                    | 14 +++----
 docs/tutorial/layers.md                        |  2 +-
 docs/tutorial/net_layer_blob.md                |  2 +-
 examples/cifar10/readme.md                     |  3 +-
 examples/feature_extraction/readme.md          |  2 +-
 examples/imagenet/readme.md                    |  2 +-
 include/caffe/layer.hpp                        |  2 +-
 include/caffe/loss_layers.hpp                  |  8 ++--
 python/caffe/test/test_net.py                  |  7 ++--
 python/caffe/test/test_net_spec.py             |  2 +-
 python/caffe/test/test_python_layer.py         |  5 ++-
 python/caffe/test/test_solver.py               |  7 ++--
 scripts/travis/travis_build_and_test.sh        | 11 +++++-
 scripts/travis/travis_install.sh               | 52 +++++++++++++++++++-------
 scripts/travis/travis_setup_makefile_config.sh |  4 +-
 src/caffe/layer_factory.cpp                    |  5 +++
 src/caffe/layers/absval_layer.cu               |  1 -
 24 files changed, 114 insertions(+), 59 deletions(-)

diff --git a/.travis.yml b/.travis.yml
index 955aa8c..b920a93 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -6,24 +6,31 @@ env:
     - WITH_CUDA=false WITH_CMAKE=true
     - WITH_CUDA=true WITH_CMAKE=false
     - WITH_CUDA=true WITH_CMAKE=true
+    - WITH_CUDA=false WITH_CMAKE=true PYTHON_VERSION=3
 
 language: cpp
 
 # Cache Ubuntu apt packages.
-cache: apt
+cache:
+  apt: true
+  directories:
+  - /home/travis/miniconda
+  - /home/travis/miniconda2
+  - /home/travis/miniconda3
 
 compiler: gcc
 
 before_install:
   - export NUM_THREADS=4
   - export SCRIPTS=./scripts/travis
+  - export CONDA_DIR="/home/travis/miniconda$PYTHON_VERSION"
 
 install:
   - sudo -E $SCRIPTS/travis_install.sh
 
 before_script:
-  - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64
-  - export PATH=/home/travis/miniconda/bin:$PATH
+  - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib:/usr/local/cuda/lib64:$CONDA_DIR/lib
+  - export PATH=$CONDA_DIR/bin:$PATH
   - if ! $WITH_CMAKE; then $SCRIPTS/travis_setup_makefile_config.sh; fi
 
 script: $SCRIPTS/travis_build_and_test.sh
diff --git a/CMakeLists.txt b/CMakeLists.txt
index e202350..ef599b6 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -62,6 +62,10 @@ add_subdirectory(docs)
 # ---[ Linter target
 add_custom_target(lint COMMAND ${CMAKE_COMMAND} -P ${PROJECT_SOURCE_DIR}/cmake/lint.cmake)
 
+# ---[ pytest target
+add_custom_target(pytest COMMAND python${python_version} -m unittest discover -s caffe/test WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/python )
+add_dependencies(pytest pycaffe)
+
 # ---[ Configuration summary
 caffe_print_configuration_summary()
 
diff --git a/Makefile b/Makefile
index e4e66df..05b783a 100644
--- a/Makefile
+++ b/Makefile
@@ -228,7 +228,7 @@ ifeq ($(LINUX), 1)
 	CXX ?= /usr/bin/g++
 	GCCVERSION := $(shell $(CXX) -dumpversion | cut -f1,2 -d.)
 	# older versions of gcc are too dumb to build boost with -Wuninitalized
-	ifeq ($(shell echo $(GCCVERSION) \< 4.6 | bc), 1)
+	ifeq ($(shell echo | awk '{exit $(GCCVERSION) < 4.6;}'), 1)
 		WARNINGS += -Wno-uninitialized
 	endif
 	# boost::thread is reasonably called boost_thread (compare OS X)
@@ -243,7 +243,7 @@ ifeq ($(OSX), 1)
 	CXX := /usr/bin/clang++
 	ifneq ($(CPU_ONLY), 1)
 		CUDA_VERSION := $(shell $(CUDA_DIR)/bin/nvcc -V | grep -o 'release \d' | grep -o '\d')
-		ifeq ($(shell echo $(CUDA_VERSION) \< 7.0 | bc), 1)
+		ifeq ($(shell echo | awk '{exit $(CUDA_VERSION) < 7.0;}'), 1)
 			CXXFLAGS += -stdlib=libstdc++
 			LINKFLAGS += -stdlib=libstdc++
 		endif
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index 7cae5c9..7c86dd5 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -106,14 +106,15 @@ if(BUILD_python)
     
     while(NOT "${version}" STREQUAL "" AND NOT Boost_PYTHON_FOUND)
       STRING( REGEX REPLACE "([0-9.]+).[0-9]+" "\\1" version ${version} )
-      STRING( REGEX MATCHALL "([0-9.]+).[0-9]+" has_more_version ${version} )
-      if("${has_more_version}" STREQUAL "")
-        break()
-      endif()
       
       STRING( REPLACE "." "" boost_py_version ${version} )
       find_package(Boost 1.46 COMPONENTS "python-py${boost_py_version}")
       set(Boost_PYTHON_FOUND ${Boost_PYTHON-PY${boost_py_version}_FOUND})
+      
+      STRING( REGEX MATCHALL "([0-9.]+).[0-9]+" has_more_version ${version} )
+      if("${has_more_version}" STREQUAL "")
+        break()
+      endif()
     endwhile()
     if(NOT Boost_PYTHON_FOUND)
       find_package(Boost 1.46 COMPONENTS python)
diff --git a/docs/install_apt.md b/docs/install_apt.md
index 0fa205a..2976e3c 100644
--- a/docs/install_apt.md
+++ b/docs/install_apt.md
@@ -6,7 +6,7 @@ title: Installation: Ubuntu
 
 **General dependencies**
 
-    sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev
+    sudo apt-get install libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler
     sudo apt-get install --no-install-recommends libboost-all-dev
 
 **CUDA**: Install via the NVIDIA package instead of `apt-get` to be certain of the library and driver versions.
@@ -21,7 +21,7 @@ This can be skipped for CPU-only installation.
 
 Everything is packaged in 14.04.
 
-    sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev protobuf-compiler
+    sudo apt-get install libgflags-dev libgoogle-glog-dev liblmdb-dev
 
 **Remaining dependencies, 12.04**
 
@@ -41,8 +41,8 @@ These dependencies need manual installation in 12.04.
     export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1
     make && make install
     # lmdb
-    git clone https://gitorious.org/mdb/mdb.git
-    cd mdb/libraries/liblmdb
+    git clone https://github.com/LMDB/lmdb
+    cd lmdb/libraries/liblmdb
     make && make install
 
 Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first.
diff --git a/docs/install_yum.md b/docs/install_yum.md
index 478e7d9..2104912 100644
--- a/docs/install_yum.md
+++ b/docs/install_yum.md
@@ -28,8 +28,8 @@ title: Installation: RHEL / Fedora / CentOS
     export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1
     make && make install
     # lmdb
-    git clone git://gitorious.org/mdb/mdb.git
-    cd mdb/libraries/liblmdb
+    git clone https://github.com/LMDB/lmdb
+    cd lmdb/libraries/liblmdb
     make && make install
 
 Note that glog does not compile with the most recent gflags version (2.1), so before that is resolved you will need to build with glog first.
diff --git a/docs/installation.md b/docs/installation.md
index 144e6a3..d535c6d 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -75,7 +75,7 @@ To import the `caffe` Python module after completing the installation, add the m
 
 Install MATLAB, and make sure that its `mex` is in your `$PATH`.
 
-*Caffe's MATLAB interface works with versions 2014a/b, 2013a/b, and 2012b.*
+*Caffe's MATLAB interface works with versions 2015a, 2014a/b, 2013a/b, and 2012b.*
 
 #### Windows
 
diff --git a/docs/tutorial/interfaces.md b/docs/tutorial/interfaces.md
index 1296331..4060294 100644
--- a/docs/tutorial/interfaces.md
+++ b/docs/tutorial/interfaces.md
@@ -11,8 +11,8 @@ The command line interface -- cmdcaffe -- is the `caffe` tool for model training
 
 **Training**: `caffe train` learns models from scratch, resumes learning from saved snapshots, and fine-tunes models to new data and tasks:
 
-* All training requires a solver configuration through the `-solver solver.prototxt` argument. 
-* Resuming requires the `-snapshot model_iter_1000.solverstate` argument to load the solver snapshot. 
+* All training requires a solver configuration through the `-solver solver.prototxt` argument.
+* Resuming requires the `-snapshot model_iter_1000.solverstate` argument to load the solver snapshot.
 * Fine-tuning requires the `-weights model.caffemodel` argument for the model initialization.
 
 For example, you can run:
@@ -31,8 +31,7 @@ For a full example of fine-tuning, see examples/finetuning_on_flickr_style, but
 
 **Testing**: `caffe test` scores models by running them in the test phase and reports the net output as its score. The net architecture must be properly defined to output an accuracy measure or loss as its output. The per-batch score is reported and then the grand average is reported last.
 
-    #
-    # score the learned LeNet model on the validation set as defined in the 
+    # score the learned LeNet model on the validation set as defined in the
     # model architeture lenet_train_test.prototxt
     caffe test -model examples/mnist/lenet_train_test.prototxt -weights examples/mnist/lenet_iter_10000.caffemodel -gpu 0 -iterations 100
 
@@ -63,7 +62,8 @@ The Python interface -- pycaffe -- is the `caffe` module and its scripts in caff
 
 Tutorial IPython notebooks are found in caffe/examples: do `ipython notebook caffe/examples` to try them. For developer reference docstrings can be found throughout the code.
 
-Compile pycaffe by `make pycaffe`. The module dir caffe/python/caffe should be installed in your PYTHONPATH for `import caffe`.
+Compile pycaffe by `make pycaffe`.
+Add the module directory to your `$PYTHONPATH` by `export PYTHONPATH=/path/to/caffe/python:$PYTHONPATH` or the like for `import caffe`.
 
 ## MATLAB
 
@@ -182,7 +182,7 @@ To get a layer's type (string):
 #### Forward and backward
 
 Forward pass can be done using `net.forward` or `net.forward_prefilled`. Function `net.forward` takes in a cell array of N-D arrays containing data of input blob(s) and outputs a cell array containing data from output blob(s). Function `net.forward_prefilled` uses existing data in input blob(s) during forward pass, takes no input and produces no output. After creating some data for input blobs like `data = rand(net.blobs('data').shape);` you can run
-    
+
     res = net.forward({data});
     prob = res{1};
 
@@ -202,7 +202,7 @@ Or
     net.blobs('prob').set_diff(prob_diff);
     net.backward_prefilled();
     data_diff = net.blobs('data').get_diff();
-    
+
 **However, the backward computation above doesn't get correct results, because Caffe decides that the network does not need backward computation. To get correct backward results, you need to set `'force_backward: true'` in your network prototxt.**
 
 After performing forward or backward pass, you can also get the data or diff in internal blobs. For example, to extract pool5 features after forward pass:
diff --git a/docs/tutorial/layers.md b/docs/tutorial/layers.md
index 806374e..eabc792 100644
--- a/docs/tutorial/layers.md
+++ b/docs/tutorial/layers.md
@@ -213,7 +213,7 @@ Given an input value x, The `ReLU` layer computes the output as x if x > 0 and n
 * Layer type: `Sigmoid`
 * CPU implementation: `./src/caffe/layers/sigmoid_layer.cpp`
 * CUDA GPU implementation: `./src/caffe/layers/sigmoid_layer.cu`
-* Sample (as seen in `./examples/imagenet/mnist_autoencoder.prototxt`)
+* Sample (as seen in `./examples/mnist/mnist_autoencoder.prototxt`)
 
       layer {
         name: "encode1neuron"
diff --git a/docs/tutorial/net_layer_blob.md b/docs/tutorial/net_layer_blob.md
index e8b7bd3..d6df737 100644
--- a/docs/tutorial/net_layer_blob.md
+++ b/docs/tutorial/net_layer_blob.md
@@ -19,7 +19,7 @@ Blobs conceal the computational and mental overhead of mixed CPU/GPU operation b
 
 The conventional blob dimensions for batches of image data are number N x channel K x height H x width W. Blob memory is row-major in layout, so the last / rightmost dimension changes fastest. For example, in a 4D blob, the value at index (n, k, h, w) is physically located at index ((n * K + k) * H + h) * W + w.
 
-- Number / N is the batch size of the data. Batch processing achieves better throughput for communication and device processing. For an ImageNet training batch of 256 images B = 256.
+- Number / N is the batch size of the data. Batch processing achieves better throughput for communication and device processing. For an ImageNet training batch of 256 images N = 256.
 - Channel / K is the feature dimension e.g. for RGB images K = 3.
 
 Note that although many blobs in Caffe examples are 4D with axes for image applications, it is totally valid to use blobs for non-image applications. For example, if you simply need fully-connected networks like the conventional multi-layer perceptron, use 2D blobs (shape (N, D)) and call the InnerProductLayer (which we will cover soon).
diff --git a/examples/cifar10/readme.md b/examples/cifar10/readme.md
index 4a95cee..5d8d81e 100644
--- a/examples/cifar10/readme.md
+++ b/examples/cifar10/readme.md
@@ -22,9 +22,8 @@ Prepare the Dataset
 
 You will first need to download and convert the data format from the [CIFAR-10 website](http://www.cs.toronto.edu/~kriz/cifar.html). To do this, simply run the following commands:
 
-    cd $CAFFE_ROOT/data/cifar10
-    ./get_cifar10.sh
     cd $CAFFE_ROOT
+    ./data/cifar10/get_cifar10.sh
     ./examples/cifar10/create_cifar10.sh
 
 If it complains that `wget` or `gunzip` are not installed, you need to install them respectively. After running the script there should be the dataset, `./cifar10-leveldb`, and the data set image mean `./mean.binaryproto`.
diff --git a/examples/feature_extraction/readme.md b/examples/feature_extraction/readme.md
index 6c8917e..a980b8b 100644
--- a/examples/feature_extraction/readme.md
+++ b/examples/feature_extraction/readme.md
@@ -10,7 +10,7 @@ Extracting Features
 ===================
 
 In this tutorial, we will extract features using a pre-trained model with the included C++ utility.
-Note that we recommend using the Python interface for this task, as for example in the [filter visualization example](http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/filter_visualization.ipynb).
+Note that we recommend using the Python interface for this task, as for example in the [filter visualization example](http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/00-classification.ipynb).
 
 Follow instructions for [installing Caffe](../../installation.html) and run `scripts/download_model_binary.py models/bvlc_reference_caffenet` from caffe root directory.
 If you need detailed information about the tools below, please consult their source code, in which additional documentation is usually provided.
diff --git a/examples/imagenet/readme.md b/examples/imagenet/readme.md
index b1ebfaf..65174d6 100644
--- a/examples/imagenet/readme.md
+++ b/examples/imagenet/readme.md
@@ -102,4 +102,4 @@ Hope you liked this recipe!
 Many researchers have gone further since the ILSVRC 2012 challenge, changing the network architecture and/or fine-tuning the various parameters in the network to address new data and tasks.
 **Caffe lets you explore different network choices more easily by simply writing different prototxt files** - isn't that exciting?
 
-And since now you have a trained network, check out how to use it with the Python interface for [classifying ImageNet](http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/classification.ipynb).
+And since now you have a trained network, check out how to use it with the Python interface for [classifying ImageNet](http://nbviewer.ipython.org/github/BVLC/caffe/blob/master/examples/00-classification.ipynb).
diff --git a/include/caffe/layer.hpp b/include/caffe/layer.hpp
index 8f924a7..e2eba19 100644
--- a/include/caffe/layer.hpp
+++ b/include/caffe/layer.hpp
@@ -139,7 +139,7 @@ class Layer {
    * (Backward_cpu or Backward_gpu) to compute the bottom blob diffs given the
    * top blob diffs.
    *
-   * Your layer should implement Forward_cpu and (optionally) Forward_gpu.
+   * Your layer should implement Backward_cpu and (optionally) Backward_gpu.
    */
   inline void Backward(const vector<Blob<Dtype>*>& top,
       const vector<bool>& propagate_down,
diff --git a/include/caffe/loss_layers.hpp b/include/caffe/loss_layers.hpp
index 86c3424..5282663 100644
--- a/include/caffe/loss_layers.hpp
+++ b/include/caffe/loss_layers.hpp
@@ -128,9 +128,9 @@ class LossLayer : public Layer<Dtype> {
 /**
  * @brief Computes the contrastive loss @f$
  *          E = \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d +
- *              \left(1-y\right) \max \left(margin-d, 0\right)
+ *              \left(1-y\right) \max \left(margin-d, 0\right)^2
  *          @f$ where @f$
- *          d = \left| \left| a_n - b_n \right| \right|_2^2 @f$. This can be
+ *          d = \left| \left| a_n - b_n \right| \right|_2 @f$. This can be
  *          used to train siamese networks.
  *
  * @param bottom input Blob vector (length 3)
@@ -144,9 +144,9 @@ class LossLayer : public Layer<Dtype> {
  *   -# @f$ (1 \times 1 \times 1 \times 1) @f$
  *      the computed contrastive loss: @f$ E =
  *          \frac{1}{2N} \sum\limits_{n=1}^N \left(y\right) d +
- *          \left(1-y\right) \max \left(margin-d, 0\right)
+ *          \left(1-y\right) \max \left(margin-d, 0\right)^2
  *          @f$ where @f$
- *          d = \left| \left| a_n - b_n \right| \right|_2^2 @f$.
+ *          d = \left| \left| a_n - b_n \right| \right|_2 @f$.
  * This can be used to train siamese networks.
  */
 template <typename Dtype>
diff --git a/python/caffe/test/test_net.py b/python/caffe/test/test_net.py
index cc36747..aad828a 100644
--- a/python/caffe/test/test_net.py
+++ b/python/caffe/test/test_net.py
@@ -2,6 +2,7 @@ import unittest
 import tempfile
 import os
 import numpy as np
+import six
 
 import caffe
 
@@ -10,7 +11,7 @@ def simple_net_file(num_output):
     """Make a simple net prototxt, based on test_net.cpp, returning the name
     of the (temporary) file."""
 
-    f = tempfile.NamedTemporaryFile(delete=False)
+    f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
     f.write("""name: 'testnet' force_backward: true
     layer { type: 'DummyData' name: 'data' top: 'data' top: 'label'
       dummy_data_param { num: 5 channels: 2 height: 3 width: 4
@@ -47,7 +48,7 @@ class TestNet(unittest.TestCase):
     def test_memory(self):
         """Check that holding onto blob data beyond the life of a Net is OK"""
 
-        params = sum(map(list, self.net.params.itervalues()), [])
+        params = sum(map(list, six.itervalues(self.net.params)), [])
         blobs = self.net.blobs.values()
         del self.net
 
@@ -67,7 +68,7 @@ class TestNet(unittest.TestCase):
         self.assertEqual(self.net.outputs, ['loss'])
 
     def test_save_and_read(self):
-        f = tempfile.NamedTemporaryFile(delete=False)
+        f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
         f.close()
         self.net.save(f.name)
         net_file = simple_net_file(self.num_output)
diff --git a/python/caffe/test/test_net_spec.py b/python/caffe/test/test_net_spec.py
index 65b73b9..909a101 100644
--- a/python/caffe/test/test_net_spec.py
+++ b/python/caffe/test/test_net_spec.py
@@ -43,7 +43,7 @@ def anon_lenet(batch_size):
 
 class TestNetSpec(unittest.TestCase):
     def load_net(self, net_proto):
-        f = tempfile.NamedTemporaryFile(delete=False)
+        f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
         f.write(str(net_proto))
         f.close()
         return caffe.Net(f.name, caffe.TEST)
diff --git a/python/caffe/test/test_python_layer.py b/python/caffe/test/test_python_layer.py
index 6fba491..f41e283 100644
--- a/python/caffe/test/test_python_layer.py
+++ b/python/caffe/test/test_python_layer.py
@@ -1,6 +1,7 @@
 import unittest
 import tempfile
 import os
+import six
 
 import caffe
 
@@ -22,7 +23,7 @@ class SimpleLayer(caffe.Layer):
 
 
 def python_net_file():
-    with tempfile.NamedTemporaryFile(delete=False) as f:
+    with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
         f.write("""name: 'pythonnet' force_backward: true
         input: 'data' input_shape { dim: 10 dim: 9 dim: 8 }
         layer { type: 'Python' name: 'one' bottom: 'data' top: 'one'
@@ -58,6 +59,6 @@ class TestPythonLayer(unittest.TestCase):
         s = 4
         self.net.blobs['data'].reshape(s, s, s, s)
         self.net.forward()
-        for blob in self.net.blobs.itervalues():
+        for blob in six.itervalues(self.net.blobs):
             for d in blob.data.shape:
                 self.assertEqual(s, d)
diff --git a/python/caffe/test/test_solver.py b/python/caffe/test/test_solver.py
index 09b974d..9cfc10d 100644
--- a/python/caffe/test/test_solver.py
+++ b/python/caffe/test/test_solver.py
@@ -2,6 +2,7 @@ import unittest
 import tempfile
 import os
 import numpy as np
+import six
 
 import caffe
 from test_net import simple_net_file
@@ -11,7 +12,7 @@ class TestSolver(unittest.TestCase):
     def setUp(self):
         self.num_output = 13
         net_f = simple_net_file(self.num_output)
-        f = tempfile.NamedTemporaryFile(delete=False)
+        f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
         f.write("""net: '""" + net_f + """'
         test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9
         weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75
@@ -45,8 +46,8 @@ class TestSolver(unittest.TestCase):
 
         total = 0
         for net in nets:
-            for ps in net.params.itervalues():
+            for ps in six.itervalues(net.params):
                 for p in ps:
                     total += p.data.sum() + p.diff.sum()
-            for bl in net.blobs.itervalues():
+            for bl in six.itervalues(net.blobs):
                 total += bl.data.sum() + bl.diff.sum()
diff --git a/scripts/travis/travis_build_and_test.sh b/scripts/travis/travis_build_and_test.sh
index 8ff63f3..9ba737e 100755
--- a/scripts/travis/travis_build_and_test.sh
+++ b/scripts/travis/travis_build_and_test.sh
@@ -7,8 +7,17 @@ MAKE="make --jobs=$NUM_THREADS --keep-going"
 if $WITH_CMAKE; then
   mkdir build
   cd build
-  cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release -DCPU_ONLY=ON ..
+  CPU_ONLY=" -DCPU_ONLY=ON"
+  if ! $WITH_CUDA; then
+    CPU_ONLY=" -DCPU_ONLY=OFF"
+  fi
+  PYTHON_ARGS=""
+  if [ "$PYTHON_VERSION" = "3" ]; then
+    PYTHON_ARGS="$PYTHON_ARGS -Dpython_version=3 -DBOOST_LIBRARYDIR=$CONDA_DIR/lib/"
+  fi
+  cmake -DBUILD_python=ON -DCMAKE_BUILD_TYPE=Release $CPU_ONLY $PYTHON_ARGS -DCMAKE_INCLUDE_PATH="$CONDA_DIR/include/" -DCMAKE_LIBRARY_PATH="$CONDA_DIR/lib/" ..
   $MAKE
+  $MAKE pytest
   if ! $WITH_CUDA; then
     $MAKE runtest
     $MAKE lint
diff --git a/scripts/travis/travis_install.sh b/scripts/travis/travis_install.sh
index b6e6f6c..d6c6e22 100755
--- a/scripts/travis/travis_install.sh
+++ b/scripts/travis/travis_install.sh
@@ -4,7 +4,6 @@
 set -e
 
 MAKE="make --jobs=$NUM_THREADS"
-
 # Install apt packages where the Ubuntu 12.04 default and ppa works for Caffe
 
 # This ppa is for gflags and glog
@@ -12,9 +11,8 @@ add-apt-repository -y ppa:tuleu/precise-backports
 apt-get -y update
 apt-get install \
     wget git curl \
-    python-dev python-numpy \
+    python-dev python-numpy python3-dev\
     libleveldb-dev libsnappy-dev libopencv-dev \
-    libboost-dev libboost-system-dev libboost-python-dev libboost-thread-dev \
     libprotobuf-dev protobuf-compiler \
     libatlas-dev libatlas-base-dev \
     libhdf5-serial-dev libgflags-dev libgoogle-glog-dev \
@@ -24,9 +22,10 @@ apt-get install \
 # if needed.  By default, Aptitude in Ubuntu 12.04 installs CMake 2.8.7, but
 # Caffe requires a minimum CMake version of 2.8.8.
 if $WITH_CMAKE; then
-  add-apt-repository -y ppa:ubuntu-sdk-team/ppa
-  apt-get -y update
-  apt-get -y install cmake
+  # cmake 3 will make sure that the python interpreter and libraries match
+  wget http://www.cmake.org/files/v3.2/cmake-3.2.3-Linux-x86_64.sh -O cmake3.sh
+  chmod +x cmake3.sh
+  ./cmake3.sh --prefix=/usr/ --skip-license --exclude-subdir
 fi
 
 # Install CUDA, if needed
@@ -60,10 +59,37 @@ rm -f $LMDB_FILE
 
 # Install the Python runtime dependencies via miniconda (this is much faster
 # than using pip for everything).
-wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
-chmod +x miniconda.sh
-./miniconda.sh -b
-export PATH=/home/travis/miniconda/bin:$PATH
-conda update --yes conda
-conda install --yes numpy scipy matplotlib scikit-image pip
-pip install protobuf
+export PATH=$CONDA_DIR/bin:$PATH
+if [ ! -d $CONDA_DIR ]; then
+	if [ "$PYTHON_VERSION" -eq "3" ]; then
+		wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh
+	else
+		wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh
+	fi
+	chmod +x miniconda.sh
+	./miniconda.sh -b -p $CONDA_DIR
+	
+	conda update --yes conda
+	conda install --yes numpy scipy matplotlib scikit-image pip
+	# Let conda install boost (so that boost_python matches)
+	conda install --yes -c https://conda.binstar.org/menpo boost=1.56.0
+fi
+
+# install protobuf 3 (just use the miniconda3 directory to avoid having to setup the path again)
+if [ "$PYTHON_VERSION" -eq "3" ] && [ ! -e "$CONDA_DIR/bin/protoc" ]; then
+	pushd .
+	wget https://github.com/google/protobuf/archive/v3.0.0-alpha-3.1.tar.gz -O protobuf-3.tar.gz
+	tar -C /tmp -xzvf protobuf-3.tar.gz
+	cd /tmp/protobuf-3*/
+	./autogen.sh
+	./configure --prefix=$CONDA_DIR
+	$MAKE
+	$MAKE install
+	popd
+fi
+
+if [ "$PYTHON_VERSION" -eq "3" ]; then
+	pip install --pre protobuf
+else
+	pip install protobuf
+fi
diff --git a/scripts/travis/travis_setup_makefile_config.sh b/scripts/travis/travis_setup_makefile_config.sh
index ba32626..1440be2 100755
--- a/scripts/travis/travis_setup_makefile_config.sh
+++ b/scripts/travis/travis_setup_makefile_config.sh
@@ -12,7 +12,9 @@ if $WITH_CUDA; then
 fi
 
 cat << 'EOF' >> Makefile.config
-ANACONDA_HOME := $(HOME)/miniconda
+# Travis' nvcc doesn't like newer boost versions
+NVCCFLAGS := -Xcudafe --diag_suppress=cc_clobber_ignored -Xcudafe --diag_suppress=useless_using_declaration -Xcudafe --diag_suppress=set_but_not_used
+ANACONDA_HOME := $(CONDA_DIR)
 PYTHON_INCLUDE := $(ANACONDA_HOME)/include \
 		$(ANACONDA_HOME)/include/python2.7 \
 		$(ANACONDA_HOME)/lib/python2.7/site-packages/numpy/core/include
diff --git a/src/caffe/layer_factory.cpp b/src/caffe/layer_factory.cpp
index d6a1cac..926c7d8 100644
--- a/src/caffe/layer_factory.cpp
+++ b/src/caffe/layer_factory.cpp
@@ -1,3 +1,8 @@
+// Make sure we include Python.h before any system header
+// to avoid _POSIX_C_SOURCE redefinition
+#ifdef WITH_PYTHON_LAYER
+#include <boost/python.hpp>
+#endif
 #include <string>
 
 #include "caffe/layer.hpp"
diff --git a/src/caffe/layers/absval_layer.cu b/src/caffe/layers/absval_layer.cu
index 91f3c77..bb310e1 100644
--- a/src/caffe/layers/absval_layer.cu
+++ b/src/caffe/layers/absval_layer.cu
@@ -18,7 +18,6 @@ template <typename Dtype>
 void AbsValLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
     const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
   const int count = top[0]->count();
-  const Dtype* top_data = top[0]->gpu_data();
   const Dtype* top_diff = top[0]->gpu_diff();
   if (propagate_down[0]) {
     const Dtype* bottom_data = bottom[0]->gpu_data();

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/caffe-contrib.git



More information about the debian-science-commits mailing list