[lua-torch-torch7] 01/07: New upstream version 0~20170926-g89ede3b

Zhou Mo cdluminate-guest at moszumanska.debian.org
Wed Oct 25 12:16:13 UTC 2017


This is an automated email from the git hooks/post-receive script.

cdluminate-guest pushed a commit to branch master
in repository lua-torch-torch7.

commit 59565e0d8cbe9464a2451f459bb2dca37cd4fa0a
Author: Mo Zhou <cdluminate at gmail.com>
Date:   Wed Oct 25 11:41:50 2017 +0000

    New upstream version 0~20170926-g89ede3b
---
 lib/TH/CMakeLists.txt           |  3 +-
 lib/TH/THAllocator.c            |  7 -----
 lib/TH/THStorage.c              | 14 +++++++--
 lib/TH/generic/THTensorMath.c   | 69 +++++++++++++++++++++++++++++++----------
 lib/TH/generic/THTensorMath.h   |  2 ++
 lib/TH/generic/THTensorRandom.c | 35 ++++++++++++++++++++-
 lib/TH/generic/THTensorRandom.h |  5 +++
 lib/TH/vector/VSX.c             |  4 +--
 8 files changed, 109 insertions(+), 30 deletions(-)

diff --git a/lib/TH/CMakeLists.txt b/lib/TH/CMakeLists.txt
index c9be21a..803dfe1 100644
--- a/lib/TH/CMakeLists.txt
+++ b/lib/TH/CMakeLists.txt
@@ -154,7 +154,7 @@ IF(C_SSE3_FOUND)
   MESSAGE(STATUS "SSE3 Found")
   SET(CMAKE_C_FLAGS "${C_SSE3_FLAGS} -DUSE_SSE3 ${CMAKE_C_FLAGS}")
 ENDIF(C_SSE3_FOUND)
-# we dont set -mavx and -mavx2 flags globally, but only for specific files
+# we don't set -mavx and -mavx2 flags globally, but only for specific files
 # however, we want to enable the AVX codepaths, so we still need to
 # add USE_AVX and USE_AVX2 macro defines
 IF(C_AVX_FOUND)
@@ -473,7 +473,6 @@ IF (BLAS_FOUND AND BLAS_INSTALL_LIBRARIES)
     Install_Required_Library(${BLAS_openblas_LIBRARY})
     Install_Required_Library("${libpath}/libquadmath")
     Install_Required_Library("${libpath}/libgfortran")
-    Install_Required_Library("${libpath}/libquadmath")
     Install_Required_Library("${libpath}/libgcc")
   ENDIF()
 ENDIF()
diff --git a/lib/TH/THAllocator.c b/lib/TH/THAllocator.c
index e69b3cc..51ac69b 100644
--- a/lib/TH/THAllocator.c
+++ b/lib/TH/THAllocator.c
@@ -105,10 +105,6 @@ void THMapAllocatorContext_free(THMapAllocatorContext *ctx)
 
 static void *_map_alloc(void* ctx_, ptrdiff_t size)
 {
-  if (size == 0) {
-    return NULL;
-  }
-
   THMapAllocatorContext *ctx = ctx_;
   void *data = NULL;
 
@@ -336,9 +332,6 @@ static void *THMapAllocator_realloc(void* ctx, void* ptr, ptrdiff_t size) {
 }
 
 static void THMapAllocator_free(void* ctx_, void* data) {
-  if (data == NULL)
-    return;
-
   THMapAllocatorContext *ctx = ctx_;
 
 #ifdef _WIN32
diff --git a/lib/TH/THStorage.c b/lib/TH/THStorage.c
index f6b63f4..9f86eb6 100644
--- a/lib/TH/THStorage.c
+++ b/lib/TH/THStorage.c
@@ -34,11 +34,11 @@ THLongStorage *THLongStorage_newInferSize(THLongStorage *size, ptrdiff_t nElemen
   if (dim_infer != -1) {
     THDescBuff buf = THLongStorage_sizeDesc(size);
     THArgCheck(total_size > 0 && nElement % total_size == 0, 2,
-        "size '%s' is invalid for input of with %td elements", buf.str, nElement);
+        "size '%s' is invalid for input with %td elements", buf.str, nElement);
   } else {
     THDescBuff buf = THLongStorage_sizeDesc(size);
     THArgCheck(nElement == total_size, 2,
-        "size '%s' is invalid for input of with %td elements", buf.str, nElement);
+        "size '%s' is invalid for input with %td elements", buf.str, nElement);
   }
   THLongStorage* copy = THLongStorage_newWithSize(size->size);
   THLongStorage_copy(copy, size);
@@ -132,6 +132,16 @@ int THLongStorage_inferExpandGeometry(long *tensorSizes, long *tensorStrides, lo
     long stride = (dim >= 0) ?
         tensorStrides[dim] : expandedSizesCalc[i + 1] * expandedStridesCalc[i+1];
     long targetSize = THLongStorage_data(sizes)[i];
+    if (targetSize == -1) {
+      if (dim < 0) {
+        THFree(expandedSizesCalc);
+        THFree(expandedStridesCalc);
+        snprintf(error_buffer, buffer_len, "The expanded size of the tensor (%ld) isn't allowed in a leading, non-existing dimension %ld.", targetSize, i);
+        return -1;
+      } else {
+        targetSize = size;
+      }
+    }
     if (size != targetSize) {
       if (size == 1) {
         size = targetSize;
diff --git a/lib/TH/generic/THTensorMath.c b/lib/TH/generic/THTensorMath.c
index ac099cf..1e62bae 100644
--- a/lib/TH/generic/THTensorMath.c
+++ b/lib/TH/generic/THTensorMath.c
@@ -1595,6 +1595,10 @@ void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int
     THLongTensor_zero(indices_);
 
     if(t->size[dimension] == 1) {
+      if (!keepdim) {
+        THTensor_(squeeze1d)(values_, values_, dimension);
+        THLongTensor_squeeze1d(indices_, indices_, dimension);
+      }
       return;
     }
 
@@ -1671,6 +1675,10 @@ void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int
     THLongTensor_zero(indices_);
 
     if(t->size[dimension] == 1) {
+      if (!keepdim) {
+        THTensor_(squeeze1d)(values_, values_, dimension);
+        THLongTensor_squeeze1d(indices_, indices_, dimension);
+      }
       return;
     }
 
@@ -1919,6 +1927,18 @@ void THTensor_(zeros)(THTensor *r_, THLongStorage *size)
   THTensor_(zero)(r_);
 }
 
+void THTensor_(zerosLike)(THTensor *r_, THTensor *input)
+{
+  THTensor_(resizeAs)(r_, input);
+  THTensor_(zero)(r_);
+}
+
+void THTensor_(onesLike)(THTensor *r_, THTensor *input)
+{
+  THTensor_(resizeAs)(r_, input);
+  THTensor_(fill)(r_, 1);
+}
+
 void THTensor_(ones)(THTensor *r_, THLongStorage *size)
 {
   THTensor_(resize)(r_, size, NULL);
@@ -2836,19 +2856,14 @@ TENSOR_IMPLEMENT_LOGICAL(ne,!=)
     TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data);); \
   }                                                           \
 
-#define LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(NAME, CFUNC)                 \
-  void THTensor_(NAME)(THTensor *r_, THTensor *t, real value)              \
-  {                                                                     \
-    THTensor_(resizeAs)(r_, t);                                         \
-    TH_TENSOR_APPLY2(real, t, real, r_, *r__data = CFUNC(*t_data, value);); \
-  }                                                                     \
-
 #if defined(TH_REAL_IS_LONG)
 LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs)
+LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
 #endif /* long only part */
 
 #if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT)
 LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs)
+LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
 #endif /* int only part */
 
 #if defined(TH_REAL_IS_BYTE)
@@ -2890,7 +2905,6 @@ LAB_IMPLEMENT_BASIC_FUNCTION(sinh,TH_MATH_NAME(sinh))
 LAB_IMPLEMENT_BASIC_FUNCTION(tan,TH_MATH_NAME(tan))
 LAB_IMPLEMENT_BASIC_FUNCTION(atan,TH_MATH_NAME(atan))
 LAB_IMPLEMENT_BASIC_FUNCTION(tanh,TH_MATH_NAME(tanh))
-LAB_IMPLEMENT_BASIC_FUNCTION_VALUE(pow,TH_MATH_NAME(pow))
 LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,TH_MATH_NAME(sqrt))
 LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_MATH_NAME(TH_rsqrt))
 LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil))
@@ -2903,6 +2917,35 @@ LAB_IMPLEMENT_BASIC_FUNCTION(neg,-)
 LAB_IMPLEMENT_BASIC_FUNCTION(cinv, TH_MATH_NAME(1.0) / )
 
 
+void THTensor_(pow)(THTensor *r_, THTensor *t, real value)
+{
+  THTensor_(resizeAs)(r_, t);
+  if(value == 1){
+    THTensor_(copy)(r_, t);
+  }
+  else if(value == 2){
+    THTensor_(cmul)(r_, t, t);
+  }
+  else if(value == 3){
+    TH_TENSOR_APPLY2(real, t, real, r_, *r__data = *t_data * *t_data * *t_data;);
+  }
+  else if(value == 0.5){
+    THTensor_(sqrt)(r_, t);
+  }
+  else if(value == -0.5){
+    THTensor_(rsqrt)(r_, t);
+  }
+  else if(value == -1){
+    THTensor_(cinv)(r_, t);
+  }
+  else if(value == -2){
+    TH_TENSOR_APPLY2(real, t, real, r_, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data););
+  }
+  else{
+    TH_TENSOR_APPLY2(real, t, real, r_, *r__data = TH_MATH_NAME(pow)(*t_data, value););
+  }
+}
+
 void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty)
 {
   THTensor_(resizeAs)(r_, tx);
@@ -3158,10 +3201,7 @@ void THTensor_(linspace)(THTensor *r_, real a, real b, long n)
   }
 
   if(n == 1) {
-     TH_TENSOR_APPLY(real, r_,
-             *r__data = a;
-             i++;
-           );
+    THTensor_(set1d)(r_, 0, a);
   } else {
      TH_TENSOR_APPLY(real, r_,
              *r__data = a + i*(b-a)/((real)(n-1));
@@ -3181,10 +3221,7 @@ void THTensor_(logspace)(THTensor *r_, real a, real b, long n)
   }
 
   if(n == 1) {
-    TH_TENSOR_APPLY(real, r_,
-        *r__data = TH_MATH_NAME(pow)(10.0, a);
-        i++;
-        );
+    THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a));
   } else {
     TH_TENSOR_APPLY(real, r_,
         *r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1)));
diff --git a/lib/TH/generic/THTensorMath.h b/lib/TH/generic/THTensorMath.h
index d0963b1..5f38701 100644
--- a/lib/TH/generic/THTensorMath.h
+++ b/lib/TH/generic/THTensorMath.h
@@ -90,7 +90,9 @@ TH_API void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value);
 TH_API void THTensor_(cminValue)(THTensor *r, THTensor *t, real value);
 
 TH_API void THTensor_(zeros)(THTensor *r_, THLongStorage *size);
+TH_API void THTensor_(zerosLike)(THTensor *r_, THTensor *input);
 TH_API void THTensor_(ones)(THTensor *r_, THLongStorage *size);
+TH_API void THTensor_(onesLike)(THTensor *r_, THTensor *input);
 TH_API void THTensor_(diag)(THTensor *r_, THTensor *t, int k);
 TH_API void THTensor_(eye)(THTensor *r_, long n, long m);
 TH_API void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step);
diff --git a/lib/TH/generic/THTensorRandom.c b/lib/TH/generic/THTensorRandom.c
index 595cfa7..21359a1 100644
--- a/lib/TH/generic/THTensorRandom.c
+++ b/lib/TH/generic/THTensorRandom.c
@@ -23,6 +23,16 @@ void THTensor_(random)(THTensor *self, THGenerator *_generator)
 #endif
 }
 
+void THTensor_(clampedRandom)(THTensor *self, THGenerator *_generator, long min, long max) {
+  THArgCheck(max > min, 2, "max must be greater than min");
+  TH_TENSOR_APPLY(real, self, *self_data = (real)((THRandom_random(_generator) % (max - min)) + min);)
+}
+
+void THTensor_(cappedRandom)(THTensor *self, THGenerator *_generator, long max) {
+  THArgCheck(max > 0, 1, "max must be positive");
+  THTensor_(clampedRandom)(self, _generator, 0, max);
+}
+
 void THTensor_(geometric)(THTensor *self, THGenerator *_generator, double p)
 {
   TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_geometric(_generator, p););
@@ -55,6 +65,29 @@ void THTensor_(normal)(THTensor *self, THGenerator *_generator, double mean, dou
   TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_normal(_generator, mean, stdv););
 }
 
+void THTensor_(normal_means)(THTensor *self, THGenerator *gen, THTensor *means, double stddev)
+{
+  THTensor_(resizeAs)(self, means);
+  THTensor_(normal)(self, gen, 0, stddev);
+  THTensor_(cadd)(self, self, 1, means);
+}
+
+void THTensor_(normal_stddevs)(THTensor *self, THGenerator *gen, double mean, THTensor *stddevs)
+{
+  THTensor_(resizeAs)(self, stddevs);
+  THTensor_(normal)(self, gen, 0, 1);
+  THTensor_(cmul)(self, self, stddevs);
+  THTensor_(add)(self, self, mean);
+}
+
+void THTensor_(normal_means_stddevs)(THTensor *self, THGenerator *gen, THTensor *means, THTensor *stddevs)
+{
+  THTensor_(resizeAs)(self, means);
+  THTensor_(normal)(self, gen, 0, 1);
+  THTensor_(cmul)(self, self, stddevs);
+  THTensor_(cadd)(self, self, 1, means);
+}
+
 void THTensor_(exponential)(THTensor *self, THGenerator *_generator, double lambda)
 {
   TH_TENSOR_APPLY(real, self, *self_data = (real)THRandom_exponential(_generator, lambda););
@@ -114,7 +147,7 @@ void THTensor_(multinomialAliasSetup)(THTensor *probs, THLongTensor *J, THTensor
       THTensor_fastSet1d(J, small, large);
       q_data[large * q->stride[0]] -= 1.0 - THTensor_fastGet1d(q, small);
 
-      if(q_data[large] < 1.0)
+      if(q_data[large * q->stride[0]] < 1.0)
         {
           THTensor_fastSet1d(smaller, small_c-1, large);
           large_c -= 1;
diff --git a/lib/TH/generic/THTensorRandom.h b/lib/TH/generic/THTensorRandom.h
index e39d589..145c7d7 100644
--- a/lib/TH/generic/THTensorRandom.h
+++ b/lib/TH/generic/THTensorRandom.h
@@ -3,6 +3,8 @@
 #else
 
 TH_API void THTensor_(random)(THTensor *self, THGenerator *_generator);
+TH_API void THTensor_(clampedRandom)(THTensor *self, THGenerator *_generator, long min, long max);
+TH_API void THTensor_(cappedRandom)(THTensor *self, THGenerator *_generator, long max);
 TH_API void THTensor_(geometric)(THTensor *self, THGenerator *_generator, double p);
 TH_API void THTensor_(bernoulli)(THTensor *self, THGenerator *_generator, double p);
 TH_API void THTensor_(bernoulli_FloatTensor)(THTensor *self, THGenerator *_generator, THFloatTensor *p);
@@ -11,6 +13,9 @@ TH_API void THTensor_(bernoulli_DoubleTensor)(THTensor *self, THGenerator *_gene
 #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
 TH_API void THTensor_(uniform)(THTensor *self, THGenerator *_generator, double a, double b);
 TH_API void THTensor_(normal)(THTensor *self, THGenerator *_generator, double mean, double stdv);
+TH_API void THTensor_(normal_means)(THTensor *self, THGenerator *gen, THTensor *means, double stddev);
+TH_API void THTensor_(normal_stddevs)(THTensor *self, THGenerator *gen, double mean, THTensor *stddevs);
+TH_API void THTensor_(normal_means_stddevs)(THTensor *self, THGenerator *gen, THTensor *means, THTensor *stddevs);
 TH_API void THTensor_(exponential)(THTensor *self, THGenerator *_generator, double lambda);
 TH_API void THTensor_(cauchy)(THTensor *self, THGenerator *_generator, double median, double sigma);
 TH_API void THTensor_(logNormal)(THTensor *self, THGenerator *_generator, double mean, double stdv);
diff --git a/lib/TH/vector/VSX.c b/lib/TH/vector/VSX.c
index ce5bb38..f01718c 100644
--- a/lib/TH/vector/VSX.c
+++ b/lib/TH/vector/VSX.c
@@ -1345,7 +1345,7 @@ static void THFloatVector_divs_VSX(float *y, const float*x, const float c, const
 //	TODO
 //
 //
-//    Finished runnning all tests. All tests PASSED.
+//    Finished running all tests. All tests PASSED.
 //
 //------------------------------------------------
 #ifdef RUN_VSX_TESTS
@@ -2509,7 +2509,7 @@ int main()
 
 
 
-    printf("Finished runnning all tests. All tests PASSED.\n");
+    printf("Finished running all tests. All tests PASSED.\n");
     return 0;
 }
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/lua-torch-torch7.git



More information about the debian-science-commits mailing list